file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
main_test.go | // {C} Copyright 2018 Pensando Systems Inc. All rights reserved.
package events
import (
"context"
"crypto/x509"
"encoding/json"
"fmt"
"os"
"sync"
"testing"
"time"
es "github.com/olivere/elastic"
uuid "github.com/satori/go.uuid"
"github.com/pensando/sw/api"
"github.com/pensando/sw/api/generated/apiclient"
evtsapi "github.com/pensando/sw/api/generated/events"
testutils "github.com/pensando/sw/test/utils"
"github.com/pensando/sw/venice/apiserver"
"github.com/pensando/sw/venice/citadel/query"
types "github.com/pensando/sw/venice/cmd/types/protos"
"github.com/pensando/sw/venice/ctrler/evtsmgr"
"github.com/pensando/sw/venice/globals"
"github.com/pensando/sw/venice/utils/certs"
"github.com/pensando/sw/venice/utils/elastic"
"github.com/pensando/sw/venice/utils/events"
"github.com/pensando/sw/venice/utils/events/recorder"
"github.com/pensando/sw/venice/utils/log"
mockresolver "github.com/pensando/sw/venice/utils/resolver/mock"
"github.com/pensando/sw/venice/utils/rpckit"
. "github.com/pensando/sw/venice/utils/testutils"
"github.com/pensando/sw/venice/utils/testutils/serviceutils"
)
var (
testURL = "localhost:0"
sortByField = ""
sortAsc = true
// create mock events recorder
_, _ = recorder.NewRecorder(&recorder.Config{
Component: "events_integ_test",
BackupDir: "/tmp",
SkipEvtsProxy: true}, log.GetNewLogger(log.GetDefaultConfig("events_integ_test")))
)
// tInfo represents test info.
type tInfo struct {
logger log.Logger
mockResolver *mockresolver.ResolverClient // resolver
esClient elastic.ESClient // elastic client to verify the results
elasticsearchAddr string // elastic address
elasticsearchName string // name of the elasticsearch server name; used to stop the server
elasticsearchDir string // name of the directory where Elastic credentials and logs are stored
apiServer apiserver.Server // venice API server
apiServerAddr string // API server address
evtsMgr *evtsmgr.EventsManager // events manager to write events to elastic
evtProxyServices *testutils.EvtProxyServices // events proxy to receive and distribute events
storeConfig *events.StoreConfig // events store config
dedupInterval time.Duration // events dedup interval
batchInterval time.Duration // events batch interval
mockCitadelQueryServer *query.Server // citadel query server with mock broker
signer certs.CSRSigner // function to sign CSRs for TLS
trustRoots []*x509.Certificate // trust roots to verify TLS certs
apiClient apiclient.Services
recorders *recorders
testName string
}
// list of recorders belonging to the test
type recorders struct {
sync.Mutex
list []events.Recorder
}
// setup helper function create evtsmgr, evtsproxy, etc. services
func (t *tInfo) setup(tst *testing.T) error {
var err error
logConfig := log.GetDefaultConfig("events_test")
logConfig.Format = log.JSONFmt
logConfig.Filter = log.AllowInfoFilter
t.logger = log.GetNewLogger(logConfig).WithContext("t_name", tst.Name())
t.logger.Infof("Starting test %s", tst.Name())
t.mockResolver = mockresolver.New()
t.testName = tst.Name()
// We need a fairly high limit because all clients are collapsed into a single process
// so they hit the same rate limiter
rpckit.SetDefaultListenerConnectionRateLimit(50)
// start certificate server
err = testutils.SetupIntegTLSProvider()
if err != nil {
log.Fatalf("Error setting up TLS provider: %v", err)
}
t.signer, _, t.trustRoots, err = testutils.GetCAKit()
if err != nil {
t.logger.Errorf("Error getting CA artifacts: %v", err)
return err
}
if t.dedupInterval == 0 {
t.dedupInterval = 10 * time.Second
}
if t.batchInterval == 0 |
if t.storeConfig == nil {
t.storeConfig = &events.StoreConfig{}
}
t.recorders = &recorders{}
// start elasticsearch
if err = t.startElasticsearch(); err != nil {
t.logger.Errorf("failed to start elasticsearch, err: %v", err)
return err
}
// create elasticsearch client
if err = t.createElasticClient(); err != nil {
t.logger.Errorf("failed to create elasticsearch client, err: %v", err)
return err
}
// start API server
if err = t.startAPIServer(tst); err != nil {
t.logger.Errorf("failed to start API server, err: %v", err)
return err
}
// start mock citadel query server
mockCitadelQueryServer, mockCitadelQueryServerURL, err := testutils.StartMockCitadelQueryServer(tst)
if err != nil {
t.logger.Errorf("failed to start mock citadel query server, err: %v", err)
return err
}
t.mockCitadelQueryServer = mockCitadelQueryServer
t.updateResolver(globals.Citadel, mockCitadelQueryServerURL)
// start events manager
evtsMgr, evtsMgrURL, err := testutils.StartEvtsMgr(testURL, t.mockResolver, t.logger, t.esClient, nil)
if err != nil {
t.logger.Errorf("failed to start events manager, err: %v", err)
return err
}
t.evtsMgr = evtsMgr
t.updateResolver(globals.EvtsMgr, evtsMgrURL)
// start events proxy
evtProxyServices, evtsProxyURL, storeConfig, err := testutils.StartEvtsProxy(tst.Name(), testURL, t.mockResolver, t.logger, t.dedupInterval, t.batchInterval, t.storeConfig)
if err != nil {
t.logger.Errorf("failed to start events proxy, err: %v", err)
return err
}
t.evtProxyServices = evtProxyServices
t.storeConfig = storeConfig
t.updateResolver(globals.EvtsProxy, evtsProxyURL)
return nil
}
// teardown stops all the services that were started during setup
func (t *tInfo) teardown() {
t.recorders.close()
if t.apiClient != nil {
t.apiClient.ClusterV1().Version().Delete(context.Background(), &api.ObjectMeta{Name: t.testName})
t.apiClient.Close()
t.apiClient = nil
}
if t.esClient != nil {
t.esClient.Close()
}
testutils.StopElasticsearch(t.elasticsearchName, t.elasticsearchDir)
if t.mockCitadelQueryServer != nil {
t.mockCitadelQueryServer.Stop()
t.mockCitadelQueryServer = nil
}
if t.evtsMgr != nil {
t.evtsMgr.Stop()
t.evtsMgr = nil
}
t.evtProxyServices.Stop()
if t.apiServer != nil {
t.apiServer.Stop()
t.apiServer = nil
}
// stop certificate server
testutils.CleanupIntegTLSProvider()
if t.mockResolver != nil {
t.mockResolver.Stop()
t.mockResolver = nil
}
// remove the local persistent events store
t.logger.Infof("removing events store %s", t.storeConfig.Dir)
os.RemoveAll(t.storeConfig.Dir)
t.logger.Infof("completed test")
}
// cleans up alerts, alert policies and destinations
func (t *tInfo) cleanupPolicies() error {
if t.apiClient != nil {
// delete all alerts
alerts, err := t.apiClient.MonitoringV1().Alert().List(context.Background(), &api.ListWatchOptions{ObjectMeta: api.ObjectMeta{Tenant: "default"}})
if err != nil {
return err
}
for _, a := range alerts {
t.apiClient.MonitoringV1().Alert().Delete(context.Background(), &a.ObjectMeta)
}
// delete all alert destinations
alertDestinations, err := t.apiClient.MonitoringV1().AlertDestination().List(context.Background(), &api.ListWatchOptions{ObjectMeta: api.ObjectMeta{Tenant: "default"}})
if err != nil {
return err
}
for _, ad := range alertDestinations {
t.apiClient.MonitoringV1().AlertDestination().Delete(context.Background(), &ad.ObjectMeta)
}
// delete all alert policies
alertPolicies, err := t.apiClient.MonitoringV1().AlertPolicy().List(context.Background(), &api.ListWatchOptions{ObjectMeta: api.ObjectMeta{Tenant: "default"}})
if err != nil {
return err
}
for _, ap := range alertPolicies {
t.apiClient.MonitoringV1().AlertPolicy().Delete(context.Background(), &ap.ObjectMeta)
}
}
return nil
}
func (t *tInfo) startAPIServer(tst *testing.T) error {
var err error
t.apiServer, t.apiServerAddr, err = serviceutils.StartAPIServer(testURL, tst.Name(), t.logger, []string{})
if err != nil {
return err
}
t.updateResolver(globals.APIServer, t.apiServerAddr)
if t.apiClient != nil { // close existing client if any
t.apiClient.Close()
}
AssertEventually(tst, func() (bool, interface{}) {
t.apiClient, err = apiclient.NewGrpcAPIClient("events_test", t.apiServerAddr, t.logger)
if err != nil {
return false, err
}
return true, nil
}, "Failed to create api client", "15s", "3m")
return nil
}
func (t *tInfo) stopAPIServer() {
t.apiServer.Stop()
t.removeResolverEntry(globals.APIServer, t.apiServerAddr)
if t.apiClient != nil {
t.apiClient.Close()
t.apiClient = nil
}
}
// createElasticClient helper function to create elastic client
func (t *tInfo) createElasticClient() error {
var err error
t.esClient, err = testutils.CreateElasticClient(t.elasticsearchAddr, t.mockResolver, t.logger, t.signer, t.trustRoots)
return err
}
// startElasticsearch helper function to start elasticsearch
func (t *tInfo) startElasticsearch() error {
var err error
t.elasticsearchName = uuid.NewV4().String()
t.elasticsearchAddr, t.elasticsearchDir, err = testutils.StartElasticsearch(t.elasticsearchName, t.elasticsearchDir, t.signer, t.trustRoots)
if err != nil {
return fmt.Errorf("failed to start elasticsearch, err: %v", err)
}
// add mock elastic service to mock resolver
t.updateResolver(globals.ElasticSearch, t.elasticsearchAddr)
return nil
}
// updateResolver helper function to update mock resolver with the given service and URL
func (t *tInfo) updateResolver(serviceName, url string) {
t.mockResolver.AddServiceInstance(&types.ServiceInstance{
TypeMeta: api.TypeMeta{
Kind: "ServiceInstance",
},
ObjectMeta: api.ObjectMeta{
Name: serviceName,
},
Service: serviceName,
URL: url,
})
}
// removeResolverEntry helper function to remove entry from mock resolver
func (t *tInfo) removeResolverEntry(serviceName, url string) {
t.mockResolver.DeleteServiceInstance(&types.ServiceInstance{
TypeMeta: api.TypeMeta{
Kind: "ServiceInstance",
},
ObjectMeta: api.ObjectMeta{
Name: serviceName,
},
Service: serviceName,
URL: url,
})
}
// assertElasticTotalEvents helper function to assert events received by elastic with the total events sent.
// exact == true; asserts totalEventsReceived == totalEventsSent
// exact == false; asserts totalEventsReceived >= totalEventsSent
func (t *tInfo) assertElasticTotalEvents(te *testing.T, query es.Query, exact bool, totalEventsSent int, timeout string) {
AssertEventually(te,
func() (bool, interface{}) {
var totalEventsReceived int
var evt evtsapi.Event
// total number of docs/events available (single events and de-duped events)
// 1. query single events, count = 1
singleEvents := es.NewBoolQuery()
singleEvents.Must(query, es.NewRangeQuery("count").Lte(1).Gt(0))
// count = 1
resp, err := t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), singleEvents, nil, 0, 10, sortByField, sortAsc)
if err != nil {
return false, err
}
totalEventsReceived += int(resp.TotalHits())
// 2. query de-duped events, count>1
dedupedEvents := es.NewBoolQuery()
dedupedEvents.Must(query, es.NewRangeQuery("count").Gt(1))
resp, err = t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), dedupedEvents, nil, 0, 10000, sortByField, sortAsc)
if err != nil {
return false, err
}
for _, hit := range resp.Hits.Hits {
_ = json.Unmarshal(*hit.Source, &evt)
totalEventsReceived += int(evt.GetCount())
}
if exact {
if !(totalEventsReceived == totalEventsSent) {
return false, fmt.Sprintf("expected: %d, got: %d", totalEventsSent, totalEventsReceived)
}
} else {
if !(totalEventsReceived >= totalEventsSent) {
return false, fmt.Sprintf("expected: >=%d, got: %d", totalEventsSent, totalEventsReceived)
}
}
return true, nil
}, "couldn't get the expected number of total events", "100ms", timeout)
}
// assertElasticUniqueEvents helper function to assert events received by elastic with the total unique events sent.
// exact == true; asserts uniqueEventsReceived == uniqueEventsSent
// exact == false; asserts uniqueEventsReceived >= uniqueEventsSent
func (t *tInfo) assertElasticUniqueEvents(te *testing.T, query es.Query, exact bool, uniqueEventsSent int, timeout string) {
AssertEventually(te,
func() (bool, interface{}) {
var uniqueEventsReceived int
resp, err := t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), query, nil, 0, 10000, sortByField, sortAsc)
if err != nil {
return false, err
}
uniqueEventsReceived = len(resp.Hits.Hits)
if exact {
if !(uniqueEventsReceived == uniqueEventsSent) {
return false, fmt.Sprintf("expected: %d, got: %d", uniqueEventsSent, uniqueEventsReceived)
}
} else {
if !(uniqueEventsReceived >= uniqueEventsSent) {
return false, fmt.Sprintf("expected: >=%d, got: %d", uniqueEventsSent, uniqueEventsReceived)
}
}
return true, nil
}, "couldn't get the expected number of unique events", "60ms", timeout)
}
// close all the recorders
func (r *recorders) close() {
r.Lock()
defer r.Unlock()
for _, re := range r.list {
re.Close()
}
}
| {
t.batchInterval = 100 * time.Millisecond
} | conditional_block |
main_test.go | // {C} Copyright 2018 Pensando Systems Inc. All rights reserved.
package events
import (
"context"
"crypto/x509"
"encoding/json"
"fmt"
"os"
"sync"
"testing"
"time"
es "github.com/olivere/elastic"
uuid "github.com/satori/go.uuid"
"github.com/pensando/sw/api"
"github.com/pensando/sw/api/generated/apiclient"
evtsapi "github.com/pensando/sw/api/generated/events"
testutils "github.com/pensando/sw/test/utils"
"github.com/pensando/sw/venice/apiserver"
"github.com/pensando/sw/venice/citadel/query"
types "github.com/pensando/sw/venice/cmd/types/protos"
"github.com/pensando/sw/venice/ctrler/evtsmgr"
"github.com/pensando/sw/venice/globals"
"github.com/pensando/sw/venice/utils/certs"
"github.com/pensando/sw/venice/utils/elastic"
"github.com/pensando/sw/venice/utils/events"
"github.com/pensando/sw/venice/utils/events/recorder"
"github.com/pensando/sw/venice/utils/log"
mockresolver "github.com/pensando/sw/venice/utils/resolver/mock"
"github.com/pensando/sw/venice/utils/rpckit"
. "github.com/pensando/sw/venice/utils/testutils"
"github.com/pensando/sw/venice/utils/testutils/serviceutils"
)
var (
testURL = "localhost:0"
sortByField = ""
sortAsc = true
// create mock events recorder
_, _ = recorder.NewRecorder(&recorder.Config{
Component: "events_integ_test",
BackupDir: "/tmp",
SkipEvtsProxy: true}, log.GetNewLogger(log.GetDefaultConfig("events_integ_test")))
)
// tInfo represents test info.
type tInfo struct {
logger log.Logger
mockResolver *mockresolver.ResolverClient // resolver
esClient elastic.ESClient // elastic client to verify the results
elasticsearchAddr string // elastic address
elasticsearchName string // name of the elasticsearch server name; used to stop the server
elasticsearchDir string // name of the directory where Elastic credentials and logs are stored
apiServer apiserver.Server // venice API server
apiServerAddr string // API server address
evtsMgr *evtsmgr.EventsManager // events manager to write events to elastic
evtProxyServices *testutils.EvtProxyServices // events proxy to receive and distribute events
storeConfig *events.StoreConfig // events store config
dedupInterval time.Duration // events dedup interval
batchInterval time.Duration // events batch interval
mockCitadelQueryServer *query.Server // citadel query server with mock broker
signer certs.CSRSigner // function to sign CSRs for TLS
trustRoots []*x509.Certificate // trust roots to verify TLS certs
apiClient apiclient.Services
recorders *recorders
testName string
}
// list of recorders belonging to the test
type recorders struct {
sync.Mutex
list []events.Recorder
}
// setup helper function create evtsmgr, evtsproxy, etc. services
func (t *tInfo) setup(tst *testing.T) error {
var err error
logConfig := log.GetDefaultConfig("events_test")
logConfig.Format = log.JSONFmt
logConfig.Filter = log.AllowInfoFilter
t.logger = log.GetNewLogger(logConfig).WithContext("t_name", tst.Name())
t.logger.Infof("Starting test %s", tst.Name())
t.mockResolver = mockresolver.New()
t.testName = tst.Name()
// We need a fairly high limit because all clients are collapsed into a single process
// so they hit the same rate limiter
rpckit.SetDefaultListenerConnectionRateLimit(50)
// start certificate server
err = testutils.SetupIntegTLSProvider()
if err != nil {
log.Fatalf("Error setting up TLS provider: %v", err)
}
t.signer, _, t.trustRoots, err = testutils.GetCAKit()
if err != nil {
t.logger.Errorf("Error getting CA artifacts: %v", err)
return err
}
if t.dedupInterval == 0 {
t.dedupInterval = 10 * time.Second
}
if t.batchInterval == 0 {
t.batchInterval = 100 * time.Millisecond
}
if t.storeConfig == nil {
t.storeConfig = &events.StoreConfig{}
}
t.recorders = &recorders{}
// start elasticsearch
if err = t.startElasticsearch(); err != nil {
t.logger.Errorf("failed to start elasticsearch, err: %v", err)
return err
}
// create elasticsearch client
if err = t.createElasticClient(); err != nil {
t.logger.Errorf("failed to create elasticsearch client, err: %v", err)
return err
}
// start API server
if err = t.startAPIServer(tst); err != nil {
t.logger.Errorf("failed to start API server, err: %v", err)
return err
}
// start mock citadel query server
mockCitadelQueryServer, mockCitadelQueryServerURL, err := testutils.StartMockCitadelQueryServer(tst)
if err != nil {
t.logger.Errorf("failed to start mock citadel query server, err: %v", err)
return err
}
t.mockCitadelQueryServer = mockCitadelQueryServer
t.updateResolver(globals.Citadel, mockCitadelQueryServerURL)
// start events manager
evtsMgr, evtsMgrURL, err := testutils.StartEvtsMgr(testURL, t.mockResolver, t.logger, t.esClient, nil)
if err != nil {
t.logger.Errorf("failed to start events manager, err: %v", err)
return err
}
t.evtsMgr = evtsMgr
t.updateResolver(globals.EvtsMgr, evtsMgrURL)
// start events proxy
evtProxyServices, evtsProxyURL, storeConfig, err := testutils.StartEvtsProxy(tst.Name(), testURL, t.mockResolver, t.logger, t.dedupInterval, t.batchInterval, t.storeConfig)
if err != nil {
t.logger.Errorf("failed to start events proxy, err: %v", err)
return err
}
t.evtProxyServices = evtProxyServices
t.storeConfig = storeConfig
t.updateResolver(globals.EvtsProxy, evtsProxyURL)
return nil
}
// teardown stops all the services that were started during setup
func (t *tInfo) teardown() |
// cleans up alerts, alert policies and destinations
func (t *tInfo) cleanupPolicies() error {
if t.apiClient != nil {
// delete all alerts
alerts, err := t.apiClient.MonitoringV1().Alert().List(context.Background(), &api.ListWatchOptions{ObjectMeta: api.ObjectMeta{Tenant: "default"}})
if err != nil {
return err
}
for _, a := range alerts {
t.apiClient.MonitoringV1().Alert().Delete(context.Background(), &a.ObjectMeta)
}
// delete all alert destinations
alertDestinations, err := t.apiClient.MonitoringV1().AlertDestination().List(context.Background(), &api.ListWatchOptions{ObjectMeta: api.ObjectMeta{Tenant: "default"}})
if err != nil {
return err
}
for _, ad := range alertDestinations {
t.apiClient.MonitoringV1().AlertDestination().Delete(context.Background(), &ad.ObjectMeta)
}
// delete all alert policies
alertPolicies, err := t.apiClient.MonitoringV1().AlertPolicy().List(context.Background(), &api.ListWatchOptions{ObjectMeta: api.ObjectMeta{Tenant: "default"}})
if err != nil {
return err
}
for _, ap := range alertPolicies {
t.apiClient.MonitoringV1().AlertPolicy().Delete(context.Background(), &ap.ObjectMeta)
}
}
return nil
}
func (t *tInfo) startAPIServer(tst *testing.T) error {
var err error
t.apiServer, t.apiServerAddr, err = serviceutils.StartAPIServer(testURL, tst.Name(), t.logger, []string{})
if err != nil {
return err
}
t.updateResolver(globals.APIServer, t.apiServerAddr)
if t.apiClient != nil { // close existing client if any
t.apiClient.Close()
}
AssertEventually(tst, func() (bool, interface{}) {
t.apiClient, err = apiclient.NewGrpcAPIClient("events_test", t.apiServerAddr, t.logger)
if err != nil {
return false, err
}
return true, nil
}, "Failed to create api client", "15s", "3m")
return nil
}
func (t *tInfo) stopAPIServer() {
t.apiServer.Stop()
t.removeResolverEntry(globals.APIServer, t.apiServerAddr)
if t.apiClient != nil {
t.apiClient.Close()
t.apiClient = nil
}
}
// createElasticClient helper function to create elastic client
func (t *tInfo) createElasticClient() error {
var err error
t.esClient, err = testutils.CreateElasticClient(t.elasticsearchAddr, t.mockResolver, t.logger, t.signer, t.trustRoots)
return err
}
// startElasticsearch helper function to start elasticsearch
func (t *tInfo) startElasticsearch() error {
var err error
t.elasticsearchName = uuid.NewV4().String()
t.elasticsearchAddr, t.elasticsearchDir, err = testutils.StartElasticsearch(t.elasticsearchName, t.elasticsearchDir, t.signer, t.trustRoots)
if err != nil {
return fmt.Errorf("failed to start elasticsearch, err: %v", err)
}
// add mock elastic service to mock resolver
t.updateResolver(globals.ElasticSearch, t.elasticsearchAddr)
return nil
}
// updateResolver helper function to update mock resolver with the given service and URL
func (t *tInfo) updateResolver(serviceName, url string) {
t.mockResolver.AddServiceInstance(&types.ServiceInstance{
TypeMeta: api.TypeMeta{
Kind: "ServiceInstance",
},
ObjectMeta: api.ObjectMeta{
Name: serviceName,
},
Service: serviceName,
URL: url,
})
}
// removeResolverEntry helper function to remove entry from mock resolver
func (t *tInfo) removeResolverEntry(serviceName, url string) {
t.mockResolver.DeleteServiceInstance(&types.ServiceInstance{
TypeMeta: api.TypeMeta{
Kind: "ServiceInstance",
},
ObjectMeta: api.ObjectMeta{
Name: serviceName,
},
Service: serviceName,
URL: url,
})
}
// assertElasticTotalEvents helper function to assert events received by elastic with the total events sent.
// exact == true; asserts totalEventsReceived == totalEventsSent
// exact == false; asserts totalEventsReceived >= totalEventsSent
func (t *tInfo) assertElasticTotalEvents(te *testing.T, query es.Query, exact bool, totalEventsSent int, timeout string) {
AssertEventually(te,
func() (bool, interface{}) {
var totalEventsReceived int
var evt evtsapi.Event
// total number of docs/events available (single events and de-duped events)
// 1. query single events, count = 1
singleEvents := es.NewBoolQuery()
singleEvents.Must(query, es.NewRangeQuery("count").Lte(1).Gt(0))
// count = 1
resp, err := t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), singleEvents, nil, 0, 10, sortByField, sortAsc)
if err != nil {
return false, err
}
totalEventsReceived += int(resp.TotalHits())
// 2. query de-duped events, count>1
dedupedEvents := es.NewBoolQuery()
dedupedEvents.Must(query, es.NewRangeQuery("count").Gt(1))
resp, err = t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), dedupedEvents, nil, 0, 10000, sortByField, sortAsc)
if err != nil {
return false, err
}
for _, hit := range resp.Hits.Hits {
_ = json.Unmarshal(*hit.Source, &evt)
totalEventsReceived += int(evt.GetCount())
}
if exact {
if !(totalEventsReceived == totalEventsSent) {
return false, fmt.Sprintf("expected: %d, got: %d", totalEventsSent, totalEventsReceived)
}
} else {
if !(totalEventsReceived >= totalEventsSent) {
return false, fmt.Sprintf("expected: >=%d, got: %d", totalEventsSent, totalEventsReceived)
}
}
return true, nil
}, "couldn't get the expected number of total events", "100ms", timeout)
}
// assertElasticUniqueEvents helper function to assert events received by elastic with the total unique events sent.
// exact == true; asserts uniqueEventsReceived == uniqueEventsSent
// exact == false; asserts uniqueEventsReceived >= uniqueEventsSent
func (t *tInfo) assertElasticUniqueEvents(te *testing.T, query es.Query, exact bool, uniqueEventsSent int, timeout string) {
AssertEventually(te,
func() (bool, interface{}) {
var uniqueEventsReceived int
resp, err := t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), query, nil, 0, 10000, sortByField, sortAsc)
if err != nil {
return false, err
}
uniqueEventsReceived = len(resp.Hits.Hits)
if exact {
if !(uniqueEventsReceived == uniqueEventsSent) {
return false, fmt.Sprintf("expected: %d, got: %d", uniqueEventsSent, uniqueEventsReceived)
}
} else {
if !(uniqueEventsReceived >= uniqueEventsSent) {
return false, fmt.Sprintf("expected: >=%d, got: %d", uniqueEventsSent, uniqueEventsReceived)
}
}
return true, nil
}, "couldn't get the expected number of unique events", "60ms", timeout)
}
// close all the recorders
func (r *recorders) close() {
r.Lock()
defer r.Unlock()
for _, re := range r.list {
re.Close()
}
}
| {
t.recorders.close()
if t.apiClient != nil {
t.apiClient.ClusterV1().Version().Delete(context.Background(), &api.ObjectMeta{Name: t.testName})
t.apiClient.Close()
t.apiClient = nil
}
if t.esClient != nil {
t.esClient.Close()
}
testutils.StopElasticsearch(t.elasticsearchName, t.elasticsearchDir)
if t.mockCitadelQueryServer != nil {
t.mockCitadelQueryServer.Stop()
t.mockCitadelQueryServer = nil
}
if t.evtsMgr != nil {
t.evtsMgr.Stop()
t.evtsMgr = nil
}
t.evtProxyServices.Stop()
if t.apiServer != nil {
t.apiServer.Stop()
t.apiServer = nil
}
// stop certificate server
testutils.CleanupIntegTLSProvider()
if t.mockResolver != nil {
t.mockResolver.Stop()
t.mockResolver = nil
}
// remove the local persistent events store
t.logger.Infof("removing events store %s", t.storeConfig.Dir)
os.RemoveAll(t.storeConfig.Dir)
t.logger.Infof("completed test")
} | identifier_body |
main_test.go | // {C} Copyright 2018 Pensando Systems Inc. All rights reserved.
package events
import (
"context"
"crypto/x509"
"encoding/json"
"fmt"
"os"
"sync"
"testing"
"time"
es "github.com/olivere/elastic"
uuid "github.com/satori/go.uuid"
"github.com/pensando/sw/api"
"github.com/pensando/sw/api/generated/apiclient"
evtsapi "github.com/pensando/sw/api/generated/events"
testutils "github.com/pensando/sw/test/utils"
"github.com/pensando/sw/venice/apiserver"
"github.com/pensando/sw/venice/citadel/query"
types "github.com/pensando/sw/venice/cmd/types/protos"
"github.com/pensando/sw/venice/ctrler/evtsmgr"
"github.com/pensando/sw/venice/globals"
"github.com/pensando/sw/venice/utils/certs"
"github.com/pensando/sw/venice/utils/elastic"
"github.com/pensando/sw/venice/utils/events"
"github.com/pensando/sw/venice/utils/events/recorder"
"github.com/pensando/sw/venice/utils/log"
mockresolver "github.com/pensando/sw/venice/utils/resolver/mock"
"github.com/pensando/sw/venice/utils/rpckit"
. "github.com/pensando/sw/venice/utils/testutils"
"github.com/pensando/sw/venice/utils/testutils/serviceutils"
)
var (
testURL = "localhost:0"
sortByField = ""
sortAsc = true
// create mock events recorder
_, _ = recorder.NewRecorder(&recorder.Config{
Component: "events_integ_test",
BackupDir: "/tmp",
SkipEvtsProxy: true}, log.GetNewLogger(log.GetDefaultConfig("events_integ_test")))
)
// tInfo represents test info.
type tInfo struct {
logger log.Logger
mockResolver *mockresolver.ResolverClient // resolver
esClient elastic.ESClient // elastic client to verify the results
elasticsearchAddr string // elastic address
elasticsearchName string // name of the elasticsearch server name; used to stop the server
elasticsearchDir string // name of the directory where Elastic credentials and logs are stored
apiServer apiserver.Server // venice API server
apiServerAddr string // API server address
evtsMgr *evtsmgr.EventsManager // events manager to write events to elastic
evtProxyServices *testutils.EvtProxyServices // events proxy to receive and distribute events
storeConfig *events.StoreConfig // events store config
dedupInterval time.Duration // events dedup interval
batchInterval time.Duration // events batch interval
mockCitadelQueryServer *query.Server // citadel query server with mock broker
signer certs.CSRSigner // function to sign CSRs for TLS
trustRoots []*x509.Certificate // trust roots to verify TLS certs
apiClient apiclient.Services
recorders *recorders
testName string
}
// list of recorders belonging to the test
type recorders struct {
sync.Mutex
list []events.Recorder
}
// setup helper function create evtsmgr, evtsproxy, etc. services
func (t *tInfo) | (tst *testing.T) error {
var err error
logConfig := log.GetDefaultConfig("events_test")
logConfig.Format = log.JSONFmt
logConfig.Filter = log.AllowInfoFilter
t.logger = log.GetNewLogger(logConfig).WithContext("t_name", tst.Name())
t.logger.Infof("Starting test %s", tst.Name())
t.mockResolver = mockresolver.New()
t.testName = tst.Name()
// We need a fairly high limit because all clients are collapsed into a single process
// so they hit the same rate limiter
rpckit.SetDefaultListenerConnectionRateLimit(50)
// start certificate server
err = testutils.SetupIntegTLSProvider()
if err != nil {
log.Fatalf("Error setting up TLS provider: %v", err)
}
t.signer, _, t.trustRoots, err = testutils.GetCAKit()
if err != nil {
t.logger.Errorf("Error getting CA artifacts: %v", err)
return err
}
if t.dedupInterval == 0 {
t.dedupInterval = 10 * time.Second
}
if t.batchInterval == 0 {
t.batchInterval = 100 * time.Millisecond
}
if t.storeConfig == nil {
t.storeConfig = &events.StoreConfig{}
}
t.recorders = &recorders{}
// start elasticsearch
if err = t.startElasticsearch(); err != nil {
t.logger.Errorf("failed to start elasticsearch, err: %v", err)
return err
}
// create elasticsearch client
if err = t.createElasticClient(); err != nil {
t.logger.Errorf("failed to create elasticsearch client, err: %v", err)
return err
}
// start API server
if err = t.startAPIServer(tst); err != nil {
t.logger.Errorf("failed to start API server, err: %v", err)
return err
}
// start mock citadel query server
mockCitadelQueryServer, mockCitadelQueryServerURL, err := testutils.StartMockCitadelQueryServer(tst)
if err != nil {
t.logger.Errorf("failed to start mock citadel query server, err: %v", err)
return err
}
t.mockCitadelQueryServer = mockCitadelQueryServer
t.updateResolver(globals.Citadel, mockCitadelQueryServerURL)
// start events manager
evtsMgr, evtsMgrURL, err := testutils.StartEvtsMgr(testURL, t.mockResolver, t.logger, t.esClient, nil)
if err != nil {
t.logger.Errorf("failed to start events manager, err: %v", err)
return err
}
t.evtsMgr = evtsMgr
t.updateResolver(globals.EvtsMgr, evtsMgrURL)
// start events proxy
evtProxyServices, evtsProxyURL, storeConfig, err := testutils.StartEvtsProxy(tst.Name(), testURL, t.mockResolver, t.logger, t.dedupInterval, t.batchInterval, t.storeConfig)
if err != nil {
t.logger.Errorf("failed to start events proxy, err: %v", err)
return err
}
t.evtProxyServices = evtProxyServices
t.storeConfig = storeConfig
t.updateResolver(globals.EvtsProxy, evtsProxyURL)
return nil
}
// teardown stops all the services that were started during setup
func (t *tInfo) teardown() {
t.recorders.close()
if t.apiClient != nil {
t.apiClient.ClusterV1().Version().Delete(context.Background(), &api.ObjectMeta{Name: t.testName})
t.apiClient.Close()
t.apiClient = nil
}
if t.esClient != nil {
t.esClient.Close()
}
testutils.StopElasticsearch(t.elasticsearchName, t.elasticsearchDir)
if t.mockCitadelQueryServer != nil {
t.mockCitadelQueryServer.Stop()
t.mockCitadelQueryServer = nil
}
if t.evtsMgr != nil {
t.evtsMgr.Stop()
t.evtsMgr = nil
}
t.evtProxyServices.Stop()
if t.apiServer != nil {
t.apiServer.Stop()
t.apiServer = nil
}
// stop certificate server
testutils.CleanupIntegTLSProvider()
if t.mockResolver != nil {
t.mockResolver.Stop()
t.mockResolver = nil
}
// remove the local persistent events store
t.logger.Infof("removing events store %s", t.storeConfig.Dir)
os.RemoveAll(t.storeConfig.Dir)
t.logger.Infof("completed test")
}
// cleans up alerts, alert policies and destinations
func (t *tInfo) cleanupPolicies() error {
if t.apiClient != nil {
// delete all alerts
alerts, err := t.apiClient.MonitoringV1().Alert().List(context.Background(), &api.ListWatchOptions{ObjectMeta: api.ObjectMeta{Tenant: "default"}})
if err != nil {
return err
}
for _, a := range alerts {
t.apiClient.MonitoringV1().Alert().Delete(context.Background(), &a.ObjectMeta)
}
// delete all alert destinations
alertDestinations, err := t.apiClient.MonitoringV1().AlertDestination().List(context.Background(), &api.ListWatchOptions{ObjectMeta: api.ObjectMeta{Tenant: "default"}})
if err != nil {
return err
}
for _, ad := range alertDestinations {
t.apiClient.MonitoringV1().AlertDestination().Delete(context.Background(), &ad.ObjectMeta)
}
// delete all alert policies
alertPolicies, err := t.apiClient.MonitoringV1().AlertPolicy().List(context.Background(), &api.ListWatchOptions{ObjectMeta: api.ObjectMeta{Tenant: "default"}})
if err != nil {
return err
}
for _, ap := range alertPolicies {
t.apiClient.MonitoringV1().AlertPolicy().Delete(context.Background(), &ap.ObjectMeta)
}
}
return nil
}
func (t *tInfo) startAPIServer(tst *testing.T) error {
var err error
t.apiServer, t.apiServerAddr, err = serviceutils.StartAPIServer(testURL, tst.Name(), t.logger, []string{})
if err != nil {
return err
}
t.updateResolver(globals.APIServer, t.apiServerAddr)
if t.apiClient != nil { // close existing client if any
t.apiClient.Close()
}
AssertEventually(tst, func() (bool, interface{}) {
t.apiClient, err = apiclient.NewGrpcAPIClient("events_test", t.apiServerAddr, t.logger)
if err != nil {
return false, err
}
return true, nil
}, "Failed to create api client", "15s", "3m")
return nil
}
func (t *tInfo) stopAPIServer() {
t.apiServer.Stop()
t.removeResolverEntry(globals.APIServer, t.apiServerAddr)
if t.apiClient != nil {
t.apiClient.Close()
t.apiClient = nil
}
}
// createElasticClient helper function to create elastic client
func (t *tInfo) createElasticClient() error {
var err error
t.esClient, err = testutils.CreateElasticClient(t.elasticsearchAddr, t.mockResolver, t.logger, t.signer, t.trustRoots)
return err
}
// startElasticsearch helper function to start elasticsearch
func (t *tInfo) startElasticsearch() error {
var err error
t.elasticsearchName = uuid.NewV4().String()
t.elasticsearchAddr, t.elasticsearchDir, err = testutils.StartElasticsearch(t.elasticsearchName, t.elasticsearchDir, t.signer, t.trustRoots)
if err != nil {
return fmt.Errorf("failed to start elasticsearch, err: %v", err)
}
// add mock elastic service to mock resolver
t.updateResolver(globals.ElasticSearch, t.elasticsearchAddr)
return nil
}
// updateResolver helper function to update mock resolver with the given service and URL
func (t *tInfo) updateResolver(serviceName, url string) {
t.mockResolver.AddServiceInstance(&types.ServiceInstance{
TypeMeta: api.TypeMeta{
Kind: "ServiceInstance",
},
ObjectMeta: api.ObjectMeta{
Name: serviceName,
},
Service: serviceName,
URL: url,
})
}
// removeResolverEntry helper function to remove entry from mock resolver
func (t *tInfo) removeResolverEntry(serviceName, url string) {
t.mockResolver.DeleteServiceInstance(&types.ServiceInstance{
TypeMeta: api.TypeMeta{
Kind: "ServiceInstance",
},
ObjectMeta: api.ObjectMeta{
Name: serviceName,
},
Service: serviceName,
URL: url,
})
}
// assertElasticTotalEvents helper function to assert events received by elastic with the total events sent.
// exact == true; asserts totalEventsReceived == totalEventsSent
// exact == false; asserts totalEventsReceived >= totalEventsSent
func (t *tInfo) assertElasticTotalEvents(te *testing.T, query es.Query, exact bool, totalEventsSent int, timeout string) {
AssertEventually(te,
func() (bool, interface{}) {
var totalEventsReceived int
var evt evtsapi.Event
// total number of docs/events available (single events and de-duped events)
// 1. query single events, count = 1
singleEvents := es.NewBoolQuery()
singleEvents.Must(query, es.NewRangeQuery("count").Lte(1).Gt(0))
// count = 1
resp, err := t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), singleEvents, nil, 0, 10, sortByField, sortAsc)
if err != nil {
return false, err
}
totalEventsReceived += int(resp.TotalHits())
// 2. query de-duped events, count>1
dedupedEvents := es.NewBoolQuery()
dedupedEvents.Must(query, es.NewRangeQuery("count").Gt(1))
resp, err = t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), dedupedEvents, nil, 0, 10000, sortByField, sortAsc)
if err != nil {
return false, err
}
for _, hit := range resp.Hits.Hits {
_ = json.Unmarshal(*hit.Source, &evt)
totalEventsReceived += int(evt.GetCount())
}
if exact {
if !(totalEventsReceived == totalEventsSent) {
return false, fmt.Sprintf("expected: %d, got: %d", totalEventsSent, totalEventsReceived)
}
} else {
if !(totalEventsReceived >= totalEventsSent) {
return false, fmt.Sprintf("expected: >=%d, got: %d", totalEventsSent, totalEventsReceived)
}
}
return true, nil
}, "couldn't get the expected number of total events", "100ms", timeout)
}
// assertElasticUniqueEvents helper function to assert events received by elastic with the total unique events sent.
// exact == true; asserts uniqueEventsReceived == uniqueEventsSent
// exact == false; asserts uniqueEventsReceived >= uniqueEventsSent
func (t *tInfo) assertElasticUniqueEvents(te *testing.T, query es.Query, exact bool, uniqueEventsSent int, timeout string) {
AssertEventually(te,
func() (bool, interface{}) {
var uniqueEventsReceived int
resp, err := t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), query, nil, 0, 10000, sortByField, sortAsc)
if err != nil {
return false, err
}
uniqueEventsReceived = len(resp.Hits.Hits)
if exact {
if !(uniqueEventsReceived == uniqueEventsSent) {
return false, fmt.Sprintf("expected: %d, got: %d", uniqueEventsSent, uniqueEventsReceived)
}
} else {
if !(uniqueEventsReceived >= uniqueEventsSent) {
return false, fmt.Sprintf("expected: >=%d, got: %d", uniqueEventsSent, uniqueEventsReceived)
}
}
return true, nil
}, "couldn't get the expected number of unique events", "60ms", timeout)
}
// close all the recorders
func (r *recorders) close() {
r.Lock()
defer r.Unlock()
for _, re := range r.list {
re.Close()
}
}
| setup | identifier_name |
main_test.go | // {C} Copyright 2018 Pensando Systems Inc. All rights reserved.
package events
import (
"context"
"crypto/x509"
"encoding/json"
"fmt"
"os"
"sync"
"testing"
"time"
es "github.com/olivere/elastic"
uuid "github.com/satori/go.uuid"
"github.com/pensando/sw/api"
"github.com/pensando/sw/api/generated/apiclient"
evtsapi "github.com/pensando/sw/api/generated/events"
testutils "github.com/pensando/sw/test/utils"
"github.com/pensando/sw/venice/apiserver"
"github.com/pensando/sw/venice/citadel/query"
types "github.com/pensando/sw/venice/cmd/types/protos"
"github.com/pensando/sw/venice/ctrler/evtsmgr"
"github.com/pensando/sw/venice/globals"
"github.com/pensando/sw/venice/utils/certs"
"github.com/pensando/sw/venice/utils/elastic"
"github.com/pensando/sw/venice/utils/events"
"github.com/pensando/sw/venice/utils/events/recorder"
"github.com/pensando/sw/venice/utils/log"
mockresolver "github.com/pensando/sw/venice/utils/resolver/mock"
"github.com/pensando/sw/venice/utils/rpckit"
. "github.com/pensando/sw/venice/utils/testutils"
"github.com/pensando/sw/venice/utils/testutils/serviceutils"
)
var (
testURL = "localhost:0"
sortByField = ""
sortAsc = true
// create mock events recorder
_, _ = recorder.NewRecorder(&recorder.Config{
Component: "events_integ_test",
BackupDir: "/tmp",
SkipEvtsProxy: true}, log.GetNewLogger(log.GetDefaultConfig("events_integ_test")))
)
// tInfo represents test info.
type tInfo struct { | elasticsearchDir string // name of the directory where Elastic credentials and logs are stored
apiServer apiserver.Server // venice API server
apiServerAddr string // API server address
evtsMgr *evtsmgr.EventsManager // events manager to write events to elastic
evtProxyServices *testutils.EvtProxyServices // events proxy to receive and distribute events
storeConfig *events.StoreConfig // events store config
dedupInterval time.Duration // events dedup interval
batchInterval time.Duration // events batch interval
mockCitadelQueryServer *query.Server // citadel query server with mock broker
signer certs.CSRSigner // function to sign CSRs for TLS
trustRoots []*x509.Certificate // trust roots to verify TLS certs
apiClient apiclient.Services
recorders *recorders
testName string
}
// list of recorders belonging to the test
type recorders struct {
sync.Mutex
list []events.Recorder
}
// setup helper function create evtsmgr, evtsproxy, etc. services
func (t *tInfo) setup(tst *testing.T) error {
var err error
logConfig := log.GetDefaultConfig("events_test")
logConfig.Format = log.JSONFmt
logConfig.Filter = log.AllowInfoFilter
t.logger = log.GetNewLogger(logConfig).WithContext("t_name", tst.Name())
t.logger.Infof("Starting test %s", tst.Name())
t.mockResolver = mockresolver.New()
t.testName = tst.Name()
// We need a fairly high limit because all clients are collapsed into a single process
// so they hit the same rate limiter
rpckit.SetDefaultListenerConnectionRateLimit(50)
// start certificate server
err = testutils.SetupIntegTLSProvider()
if err != nil {
log.Fatalf("Error setting up TLS provider: %v", err)
}
t.signer, _, t.trustRoots, err = testutils.GetCAKit()
if err != nil {
t.logger.Errorf("Error getting CA artifacts: %v", err)
return err
}
if t.dedupInterval == 0 {
t.dedupInterval = 10 * time.Second
}
if t.batchInterval == 0 {
t.batchInterval = 100 * time.Millisecond
}
if t.storeConfig == nil {
t.storeConfig = &events.StoreConfig{}
}
t.recorders = &recorders{}
// start elasticsearch
if err = t.startElasticsearch(); err != nil {
t.logger.Errorf("failed to start elasticsearch, err: %v", err)
return err
}
// create elasticsearch client
if err = t.createElasticClient(); err != nil {
t.logger.Errorf("failed to create elasticsearch client, err: %v", err)
return err
}
// start API server
if err = t.startAPIServer(tst); err != nil {
t.logger.Errorf("failed to start API server, err: %v", err)
return err
}
// start mock citadel query server
mockCitadelQueryServer, mockCitadelQueryServerURL, err := testutils.StartMockCitadelQueryServer(tst)
if err != nil {
t.logger.Errorf("failed to start mock citadel query server, err: %v", err)
return err
}
t.mockCitadelQueryServer = mockCitadelQueryServer
t.updateResolver(globals.Citadel, mockCitadelQueryServerURL)
// start events manager
evtsMgr, evtsMgrURL, err := testutils.StartEvtsMgr(testURL, t.mockResolver, t.logger, t.esClient, nil)
if err != nil {
t.logger.Errorf("failed to start events manager, err: %v", err)
return err
}
t.evtsMgr = evtsMgr
t.updateResolver(globals.EvtsMgr, evtsMgrURL)
// start events proxy
evtProxyServices, evtsProxyURL, storeConfig, err := testutils.StartEvtsProxy(tst.Name(), testURL, t.mockResolver, t.logger, t.dedupInterval, t.batchInterval, t.storeConfig)
if err != nil {
t.logger.Errorf("failed to start events proxy, err: %v", err)
return err
}
t.evtProxyServices = evtProxyServices
t.storeConfig = storeConfig
t.updateResolver(globals.EvtsProxy, evtsProxyURL)
return nil
}
// teardown stops all the services that were started during setup
func (t *tInfo) teardown() {
t.recorders.close()
if t.apiClient != nil {
t.apiClient.ClusterV1().Version().Delete(context.Background(), &api.ObjectMeta{Name: t.testName})
t.apiClient.Close()
t.apiClient = nil
}
if t.esClient != nil {
t.esClient.Close()
}
testutils.StopElasticsearch(t.elasticsearchName, t.elasticsearchDir)
if t.mockCitadelQueryServer != nil {
t.mockCitadelQueryServer.Stop()
t.mockCitadelQueryServer = nil
}
if t.evtsMgr != nil {
t.evtsMgr.Stop()
t.evtsMgr = nil
}
t.evtProxyServices.Stop()
if t.apiServer != nil {
t.apiServer.Stop()
t.apiServer = nil
}
// stop certificate server
testutils.CleanupIntegTLSProvider()
if t.mockResolver != nil {
t.mockResolver.Stop()
t.mockResolver = nil
}
// remove the local persistent events store
t.logger.Infof("removing events store %s", t.storeConfig.Dir)
os.RemoveAll(t.storeConfig.Dir)
t.logger.Infof("completed test")
}
// cleans up alerts, alert policies and destinations
func (t *tInfo) cleanupPolicies() error {
if t.apiClient != nil {
// delete all alerts
alerts, err := t.apiClient.MonitoringV1().Alert().List(context.Background(), &api.ListWatchOptions{ObjectMeta: api.ObjectMeta{Tenant: "default"}})
if err != nil {
return err
}
for _, a := range alerts {
t.apiClient.MonitoringV1().Alert().Delete(context.Background(), &a.ObjectMeta)
}
// delete all alert destinations
alertDestinations, err := t.apiClient.MonitoringV1().AlertDestination().List(context.Background(), &api.ListWatchOptions{ObjectMeta: api.ObjectMeta{Tenant: "default"}})
if err != nil {
return err
}
for _, ad := range alertDestinations {
t.apiClient.MonitoringV1().AlertDestination().Delete(context.Background(), &ad.ObjectMeta)
}
// delete all alert policies
alertPolicies, err := t.apiClient.MonitoringV1().AlertPolicy().List(context.Background(), &api.ListWatchOptions{ObjectMeta: api.ObjectMeta{Tenant: "default"}})
if err != nil {
return err
}
for _, ap := range alertPolicies {
t.apiClient.MonitoringV1().AlertPolicy().Delete(context.Background(), &ap.ObjectMeta)
}
}
return nil
}
func (t *tInfo) startAPIServer(tst *testing.T) error {
var err error
t.apiServer, t.apiServerAddr, err = serviceutils.StartAPIServer(testURL, tst.Name(), t.logger, []string{})
if err != nil {
return err
}
t.updateResolver(globals.APIServer, t.apiServerAddr)
if t.apiClient != nil { // close existing client if any
t.apiClient.Close()
}
AssertEventually(tst, func() (bool, interface{}) {
t.apiClient, err = apiclient.NewGrpcAPIClient("events_test", t.apiServerAddr, t.logger)
if err != nil {
return false, err
}
return true, nil
}, "Failed to create api client", "15s", "3m")
return nil
}
func (t *tInfo) stopAPIServer() {
t.apiServer.Stop()
t.removeResolverEntry(globals.APIServer, t.apiServerAddr)
if t.apiClient != nil {
t.apiClient.Close()
t.apiClient = nil
}
}
// createElasticClient helper function to create elastic client
func (t *tInfo) createElasticClient() error {
var err error
t.esClient, err = testutils.CreateElasticClient(t.elasticsearchAddr, t.mockResolver, t.logger, t.signer, t.trustRoots)
return err
}
// startElasticsearch helper function to start elasticsearch
func (t *tInfo) startElasticsearch() error {
var err error
t.elasticsearchName = uuid.NewV4().String()
t.elasticsearchAddr, t.elasticsearchDir, err = testutils.StartElasticsearch(t.elasticsearchName, t.elasticsearchDir, t.signer, t.trustRoots)
if err != nil {
return fmt.Errorf("failed to start elasticsearch, err: %v", err)
}
// add mock elastic service to mock resolver
t.updateResolver(globals.ElasticSearch, t.elasticsearchAddr)
return nil
}
// updateResolver helper function to update mock resolver with the given service and URL
func (t *tInfo) updateResolver(serviceName, url string) {
t.mockResolver.AddServiceInstance(&types.ServiceInstance{
TypeMeta: api.TypeMeta{
Kind: "ServiceInstance",
},
ObjectMeta: api.ObjectMeta{
Name: serviceName,
},
Service: serviceName,
URL: url,
})
}
// removeResolverEntry helper function to remove entry from mock resolver
func (t *tInfo) removeResolverEntry(serviceName, url string) {
t.mockResolver.DeleteServiceInstance(&types.ServiceInstance{
TypeMeta: api.TypeMeta{
Kind: "ServiceInstance",
},
ObjectMeta: api.ObjectMeta{
Name: serviceName,
},
Service: serviceName,
URL: url,
})
}
// assertElasticTotalEvents helper function to assert events received by elastic with the total events sent.
// exact == true; asserts totalEventsReceived == totalEventsSent
// exact == false; asserts totalEventsReceived >= totalEventsSent
func (t *tInfo) assertElasticTotalEvents(te *testing.T, query es.Query, exact bool, totalEventsSent int, timeout string) {
AssertEventually(te,
func() (bool, interface{}) {
var totalEventsReceived int
var evt evtsapi.Event
// total number of docs/events available (single events and de-duped events)
// 1. query single events, count = 1
singleEvents := es.NewBoolQuery()
singleEvents.Must(query, es.NewRangeQuery("count").Lte(1).Gt(0))
// count = 1
resp, err := t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), singleEvents, nil, 0, 10, sortByField, sortAsc)
if err != nil {
return false, err
}
totalEventsReceived += int(resp.TotalHits())
// 2. query de-duped events, count>1
dedupedEvents := es.NewBoolQuery()
dedupedEvents.Must(query, es.NewRangeQuery("count").Gt(1))
resp, err = t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), dedupedEvents, nil, 0, 10000, sortByField, sortAsc)
if err != nil {
return false, err
}
for _, hit := range resp.Hits.Hits {
_ = json.Unmarshal(*hit.Source, &evt)
totalEventsReceived += int(evt.GetCount())
}
if exact {
if !(totalEventsReceived == totalEventsSent) {
return false, fmt.Sprintf("expected: %d, got: %d", totalEventsSent, totalEventsReceived)
}
} else {
if !(totalEventsReceived >= totalEventsSent) {
return false, fmt.Sprintf("expected: >=%d, got: %d", totalEventsSent, totalEventsReceived)
}
}
return true, nil
}, "couldn't get the expected number of total events", "100ms", timeout)
}
// assertElasticUniqueEvents helper function to assert events received by elastic with the total unique events sent.
// exact == true; asserts uniqueEventsReceived == uniqueEventsSent
// exact == false; asserts uniqueEventsReceived >= uniqueEventsSent
func (t *tInfo) assertElasticUniqueEvents(te *testing.T, query es.Query, exact bool, uniqueEventsSent int, timeout string) {
AssertEventually(te,
func() (bool, interface{}) {
var uniqueEventsReceived int
resp, err := t.esClient.Search(context.Background(), elastic.GetIndex(globals.Events, globals.DefaultTenant), query, nil, 0, 10000, sortByField, sortAsc)
if err != nil {
return false, err
}
uniqueEventsReceived = len(resp.Hits.Hits)
if exact {
if !(uniqueEventsReceived == uniqueEventsSent) {
return false, fmt.Sprintf("expected: %d, got: %d", uniqueEventsSent, uniqueEventsReceived)
}
} else {
if !(uniqueEventsReceived >= uniqueEventsSent) {
return false, fmt.Sprintf("expected: >=%d, got: %d", uniqueEventsSent, uniqueEventsReceived)
}
}
return true, nil
}, "couldn't get the expected number of unique events", "60ms", timeout)
}
// close all the recorders
func (r *recorders) close() {
r.Lock()
defer r.Unlock()
for _, re := range r.list {
re.Close()
}
} | logger log.Logger
mockResolver *mockresolver.ResolverClient // resolver
esClient elastic.ESClient // elastic client to verify the results
elasticsearchAddr string // elastic address
elasticsearchName string // name of the elasticsearch server name; used to stop the server | random_line_split |
mod.rs | /// Purpose of these submudules is to create a recursive page table hierarchy
/// with four levels of pagetables in it.
/// To acomplish this a enum Hierarchy to differentiate between the top three
/// levels and the fourth. As a result of this we can extract the addresses stored
/// in the first three levels then jump to the last level (level 1) retrive
/// its address and then move to the offsets.
pub use self::entry::*;
///FrameAllocator is the method used to create Frames from Pages
use memory::{PAGE_SIZE, Frame, FrameAllocator}; // needed later
use self::table::{Table, Level4};
pub use self::mapper::Mapper;
use core::ptr::Unique;
use core::ops::{Deref, DerefMut};
use self::temporary_page::TemporaryPage;
use multiboot2::BootInformation;
//use acpi::rsdp;
use acpi::*;
use core::mem;
use core::option;
//use self::paging::PhysicalAddress;
//use self::entry::HUGE_PAGE;
mod entry;
mod table;
mod mapper;
///Used to temporary map a frame to virtal address
mod temporary_page;
const ENTRY_COUNT: usize = 512;
pub type PhysicalAddress = usize;
pub type VirtualAddress = usize;
/// Copy so that it can be used after passing 'map_to' and similar functions.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Page {
number: usize,
}
impl Page {
/// The address space is split into two halves , high/low, where the higher
/// half contains addresses and the sign extentions, the lower half contains
/// just adresses. This is checked here with an assert.
pub fn containing_address(address: VirtualAddress) -> Page {
assert!(address < 0x0000_8000_0000_0000 ||
address >= 0xffff_8000_0000_0000,
"invalid address: 0x{:x}", address);
Page { number: address / PAGE_SIZE }
}
/// Takes a VirtualAddress and calculates start address
fn start_address(&self) -> PhysicalAddress {
self.number * PAGE_SIZE
}
/// Calculates the p4-starting index/point
fn p4_index(&self) -> usize {
(self.number >> 27) & 0o777
}
/// Calculates the p3-starting index/point
fn p3_index(&self) -> usize {
(self.number >> 18) & 0o777
}
/// Calculates the p2-starting index/point
fn p2_index(&self) -> usize {
(self.number >> 9) & 0o777
}
/// Calculates the p1-starting index/point
fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
/// Returns inclusive range iterator of pages
pub fn range_inclusive(start: Page, end: Page) -> PageIter {
PageIter {
start: start,
end: end
}
}
}
pub struct PageIter {
start: Page,
end: Page
}
impl Iterator for PageIter {
type Item = Page;
fn next(&mut self) -> Option<Page> {
if self.start <= self.end {
let page = self.start;
self.start.number += 1;
Some(page)
} else {
None
}
}
}
pub struct ActivePageTable {
mapper: Mapper,
}
/// Dereference the ActivePageTable
/// Returns reference to Mapper
impl Deref for ActivePageTable {
type Target = Mapper;
fn deref(&self) -> &Mapper {
&self.mapper
}
}
/// Dereference the ActivePageTable
/// Returns a mutable reference to Mapper
impl DerefMut for ActivePageTable {
fn deref_mut(&mut self) -> &mut Mapper {
&mut self.mapper
}
}
/// Does the recursive mapping to the four levels of page tables.
impl ActivePageTable {
unsafe fn new() -> ActivePageTable {
ActivePageTable {
mapper: Mapper::new(),
}
}
/// Module that temporarily changes the recursive mapping.
/// It overwrites the 511th P4 entry and points it to the
/// inactive table frame.
/// "It overwrites the 511th P4 entry and points it to the inactive table frame. Then it flushes the translation lookaside buffer (TLB), which still contains some old translations. We need to flush all pages that are part of the recursive mapping, so the easiest way is to flush the TLB completely.""
pub fn with<F>(&mut self,
table: &mut InactivePageTable,
temporary_page: &mut temporary_page::TemporaryPage,
f: F)
where F: FnOnce(&mut Mapper)
{
use x86::{controlregs, tlb};
let flush_tlb = || unsafe { tlb::flush_all() };
{
let backup = Frame::containing_address (
unsafe { controlregs::cr3() } as usize);
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), self);
// overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE);
flush_tlb();
// execute f in the new context
f(self);
// restore recursive mapping to original p4 table
p4_table[511].set(backup, PRESENT | WRITABLE);
flush_tlb();
}
temporary_page.unmap(self);
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::controlregs;
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(
unsafe { controlregs::cr3() } as usize),
};
unsafe {
controlregs::cr3_write(new_table.p4_frame.start_address() as u64);
}
old_table
}
}
pub struct InactivePageTable {
p4_frame: Frame,
}
/// Creates valid, inactive page tables that are zeroed and recursively mapped.
impl InactivePageTable {
pub fn new(frame: Frame,
active_table: &mut ActivePageTable,
temporary_page: &mut TemporaryPage)
-> InactivePageTable {
{
// The 'active_table' and 'temporary_table' arguments needs to
// be in a inner scope to ensure shadowing since the table
// variable exclusively borrows temporary_page as long as it's alive
let table = temporary_page.map_table_frame(frame.clone(),
active_table);
// Zeroing table is done here *duh*
table.zero();
// Recursive mapping for the table
table[511].set(frame.clone(), PRESENT | WRITABLE);
}
temporary_page.unmap(active_table);
InactivePageTable {p4_frame: frame }
} | use core::ops::Range;
// Create a temporary page at some page number, in this case 0xcafebabe
let mut temporary_page =
TemporaryPage::new(Page { number: 0xcafebabe }, allocator);
// Created by constructor
let mut active_table = unsafe { ActivePageTable::new() };
// Created by constructor
let mut new_table = {
let frame = allocator.allocate_frame().expect("no more frames");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
let elf_sections_tag = boot_info.elf_sections_tag()
.expect("Memory map tag required");
// Identity map the allocated kernel sections
// Skip sections that are not loaded to memory.
// We require pages to be aligned, see src/arch/x86_64/linker.ld for implementations
for section in elf_sections_tag.sections() {
if !section.is_allocated() {
// section is not loaded to memory
continue;
}
assert!(section.addr as usize % PAGE_SIZE == 0,
"sections need to be page aligned");
// println!("mapping section at addr: {:#x}, size: {:#x}",
// section.addr,
// section.size);
let flags = EntryFlags::from_elf_section_flags(section);
let start_frame = Frame::containing_address(section.start_address());
let end_frame = Frame::containing_address(section.end_address() - 1);
// 'range_inclusive' iterates over all frames on a section
for frame in Frame::range_inclusive(start_frame, end_frame) {
mapper.identity_map(frame, flags, allocator);
}
}
// identity map the VGA text buffer
let vga_buffer_frame = Frame::containing_address(0xb8000);
mapper.identity_map(vga_buffer_frame, WRITABLE, allocator);
// identity map the multiboot info structure
let multiboot_start =
Frame::containing_address(boot_info.start_address());
let multiboot_end =
Frame::containing_address(boot_info.end_address() - 1);
for frame in Frame::range_inclusive(multiboot_start, multiboot_end) {
mapper.identity_map(frame, PRESENT, allocator);
}
for (start, end, next) in &mut sdt_loc.into_iter() {
//println!("Allocating addresses {:x} to {:x} and {:x}", start, end, next);
let start_addr = Frame::containing_address(start);
let end_addr = Frame::containing_address(end);
for frame in Frame::range_inclusive(start_addr, end_addr) {
if mapper.is_unused(&frame, allocator) {
mapper.identity_map(frame, PRESENT, allocator);
}
}
if next != 0 {
let next_header_frame = Frame::containing_address(next);
if mapper.is_unused(&next_header_frame, allocator) {
mapper.identity_map(next_header_frame, PRESENT, allocator);
}
}
}
let ioapic_start = Frame::containing_address(sdt_loc.ioapic_start);
let ioapic_end = Frame::containing_address(sdt_loc.ioapic_end);
for frame in Frame::range_inclusive(ioapic_start, ioapic_end) {
if mapper.is_unused(&frame, allocator) {
mapper.identity_map(frame, WRITABLE, allocator);
}
}
let lapic_addr = Frame::containing_address(sdt_loc.lapic_ctrl);
if mapper.is_unused(&lapic_addr, allocator) {
mapper.identity_map(lapic_addr, WRITABLE, allocator);
}
});
// TODO: Delete when appropriate
let old_table = active_table.switch(new_table);
//println!("NEW TABLE!!!");
// TODO: Delete when appropriate
let old_p4_page = Page::containing_address(old_table.p4_frame.start_address());
active_table.unmap(old_p4_page, allocator);
//println!("guard page at {:#x}", old_p4_page.start_address());
active_table
}
/// Basic tresting of different page table levels and allocations as well as mapping specific bits in specific levels
pub fn test_paging<A>(allocator: &mut A)
where A: FrameAllocator
{
let mut page_table = unsafe { ActivePageTable::new() };
// test translate
println!("Some = {:?}", page_table.translate(0));
// second P1 entry
println!("Some = {:?}", page_table.translate(4096));
// second P2 entry
println!("Some = {:?}", page_table.translate(512 * 4096));
// 300th P2 entry
println!("Some = {:?}", page_table.translate(300 * 512 * 4096));
// second P3 entry
println!("None = {:?}", page_table.translate(512 * 512 * 4096));
// last mapped byte
println!("Some = {:?}", page_table.translate(512 * 512 * 4096 - 1));
// test map_to
// 42th P3 entry
let addr = 42 * 512 * 512 * 4096;
let page = Page::containing_address(addr);
let frame = allocator.allocate_frame().expect("no more frames");
println!("None = {:?}, map to {:?}",
page_table.translate(addr),
frame);
page_table.map_to(page, frame, EntryFlags::empty(), allocator);
println!("Some = {:?}", page_table.translate(addr));
println!("next free frame: {:?}", allocator.allocate_frame());
// test unmap
println!("{:#x}",
unsafe { *(Page::containing_address(addr).start_address() as *const u64) });
page_table.unmap(Page::containing_address(addr), allocator);
println!("None = {:?}", page_table.translate(addr));
} | }
/// Remaps the kernel sections by creating a temporary page.
pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation, sdt_loc: &mut SDT_Loc)
-> ActivePageTable
where A: FrameAllocator{ | random_line_split |
mod.rs | /// Purpose of these submudules is to create a recursive page table hierarchy
/// with four levels of pagetables in it.
/// To acomplish this a enum Hierarchy to differentiate between the top three
/// levels and the fourth. As a result of this we can extract the addresses stored
/// in the first three levels then jump to the last level (level 1) retrive
/// its address and then move to the offsets.
pub use self::entry::*;
///FrameAllocator is the method used to create Frames from Pages
use memory::{PAGE_SIZE, Frame, FrameAllocator}; // needed later
use self::table::{Table, Level4};
pub use self::mapper::Mapper;
use core::ptr::Unique;
use core::ops::{Deref, DerefMut};
use self::temporary_page::TemporaryPage;
use multiboot2::BootInformation;
//use acpi::rsdp;
use acpi::*;
use core::mem;
use core::option;
//use self::paging::PhysicalAddress;
//use self::entry::HUGE_PAGE;
mod entry;
mod table;
mod mapper;
///Used to temporary map a frame to virtal address
mod temporary_page;
const ENTRY_COUNT: usize = 512;
pub type PhysicalAddress = usize;
pub type VirtualAddress = usize;
/// Copy so that it can be used after passing 'map_to' and similar functions.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Page {
number: usize,
}
impl Page {
/// The address space is split into two halves , high/low, where the higher
/// half contains addresses and the sign extentions, the lower half contains
/// just adresses. This is checked here with an assert.
pub fn containing_address(address: VirtualAddress) -> Page {
assert!(address < 0x0000_8000_0000_0000 ||
address >= 0xffff_8000_0000_0000,
"invalid address: 0x{:x}", address);
Page { number: address / PAGE_SIZE }
}
/// Takes a VirtualAddress and calculates start address
fn start_address(&self) -> PhysicalAddress {
self.number * PAGE_SIZE
}
/// Calculates the p4-starting index/point
fn p4_index(&self) -> usize {
(self.number >> 27) & 0o777
}
/// Calculates the p3-starting index/point
fn p3_index(&self) -> usize {
(self.number >> 18) & 0o777
}
/// Calculates the p2-starting index/point
fn p2_index(&self) -> usize {
(self.number >> 9) & 0o777
}
/// Calculates the p1-starting index/point
fn p1_index(&self) -> usize |
/// Returns inclusive range iterator of pages
pub fn range_inclusive(start: Page, end: Page) -> PageIter {
PageIter {
start: start,
end: end
}
}
}
pub struct PageIter {
start: Page,
end: Page
}
impl Iterator for PageIter {
type Item = Page;
fn next(&mut self) -> Option<Page> {
if self.start <= self.end {
let page = self.start;
self.start.number += 1;
Some(page)
} else {
None
}
}
}
pub struct ActivePageTable {
mapper: Mapper,
}
/// Dereference the ActivePageTable
/// Returns reference to Mapper
impl Deref for ActivePageTable {
type Target = Mapper;
fn deref(&self) -> &Mapper {
&self.mapper
}
}
/// Dereference the ActivePageTable
/// Returns a mutable reference to Mapper
impl DerefMut for ActivePageTable {
fn deref_mut(&mut self) -> &mut Mapper {
&mut self.mapper
}
}
/// Does the recursive mapping to the four levels of page tables.
impl ActivePageTable {
unsafe fn new() -> ActivePageTable {
ActivePageTable {
mapper: Mapper::new(),
}
}
/// Module that temporarily changes the recursive mapping.
/// It overwrites the 511th P4 entry and points it to the
/// inactive table frame.
/// "It overwrites the 511th P4 entry and points it to the inactive table frame. Then it flushes the translation lookaside buffer (TLB), which still contains some old translations. We need to flush all pages that are part of the recursive mapping, so the easiest way is to flush the TLB completely.""
pub fn with<F>(&mut self,
table: &mut InactivePageTable,
temporary_page: &mut temporary_page::TemporaryPage,
f: F)
where F: FnOnce(&mut Mapper)
{
use x86::{controlregs, tlb};
let flush_tlb = || unsafe { tlb::flush_all() };
{
let backup = Frame::containing_address (
unsafe { controlregs::cr3() } as usize);
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), self);
// overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE);
flush_tlb();
// execute f in the new context
f(self);
// restore recursive mapping to original p4 table
p4_table[511].set(backup, PRESENT | WRITABLE);
flush_tlb();
}
temporary_page.unmap(self);
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::controlregs;
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(
unsafe { controlregs::cr3() } as usize),
};
unsafe {
controlregs::cr3_write(new_table.p4_frame.start_address() as u64);
}
old_table
}
}
pub struct InactivePageTable {
p4_frame: Frame,
}
/// Creates valid, inactive page tables that are zeroed and recursively mapped.
impl InactivePageTable {
pub fn new(frame: Frame,
active_table: &mut ActivePageTable,
temporary_page: &mut TemporaryPage)
-> InactivePageTable {
{
// The 'active_table' and 'temporary_table' arguments needs to
// be in a inner scope to ensure shadowing since the table
// variable exclusively borrows temporary_page as long as it's alive
let table = temporary_page.map_table_frame(frame.clone(),
active_table);
// Zeroing table is done here *duh*
table.zero();
// Recursive mapping for the table
table[511].set(frame.clone(), PRESENT | WRITABLE);
}
temporary_page.unmap(active_table);
InactivePageTable {p4_frame: frame }
}
}
/// Remaps the kernel sections by creating a temporary page.
pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation, sdt_loc: &mut SDT_Loc)
-> ActivePageTable
where A: FrameAllocator{
use core::ops::Range;
// Create a temporary page at some page number, in this case 0xcafebabe
let mut temporary_page =
TemporaryPage::new(Page { number: 0xcafebabe }, allocator);
// Created by constructor
let mut active_table = unsafe { ActivePageTable::new() };
// Created by constructor
let mut new_table = {
let frame = allocator.allocate_frame().expect("no more frames");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
let elf_sections_tag = boot_info.elf_sections_tag()
.expect("Memory map tag required");
// Identity map the allocated kernel sections
// Skip sections that are not loaded to memory.
// We require pages to be aligned, see src/arch/x86_64/linker.ld for implementations
for section in elf_sections_tag.sections() {
if !section.is_allocated() {
// section is not loaded to memory
continue;
}
assert!(section.addr as usize % PAGE_SIZE == 0,
"sections need to be page aligned");
// println!("mapping section at addr: {:#x}, size: {:#x}",
// section.addr,
// section.size);
let flags = EntryFlags::from_elf_section_flags(section);
let start_frame = Frame::containing_address(section.start_address());
let end_frame = Frame::containing_address(section.end_address() - 1);
// 'range_inclusive' iterates over all frames on a section
for frame in Frame::range_inclusive(start_frame, end_frame) {
mapper.identity_map(frame, flags, allocator);
}
}
// identity map the VGA text buffer
let vga_buffer_frame = Frame::containing_address(0xb8000);
mapper.identity_map(vga_buffer_frame, WRITABLE, allocator);
// identity map the multiboot info structure
let multiboot_start =
Frame::containing_address(boot_info.start_address());
let multiboot_end =
Frame::containing_address(boot_info.end_address() - 1);
for frame in Frame::range_inclusive(multiboot_start, multiboot_end) {
mapper.identity_map(frame, PRESENT, allocator);
}
for (start, end, next) in &mut sdt_loc.into_iter() {
//println!("Allocating addresses {:x} to {:x} and {:x}", start, end, next);
let start_addr = Frame::containing_address(start);
let end_addr = Frame::containing_address(end);
for frame in Frame::range_inclusive(start_addr, end_addr) {
if mapper.is_unused(&frame, allocator) {
mapper.identity_map(frame, PRESENT, allocator);
}
}
if next != 0 {
let next_header_frame = Frame::containing_address(next);
if mapper.is_unused(&next_header_frame, allocator) {
mapper.identity_map(next_header_frame, PRESENT, allocator);
}
}
}
let ioapic_start = Frame::containing_address(sdt_loc.ioapic_start);
let ioapic_end = Frame::containing_address(sdt_loc.ioapic_end);
for frame in Frame::range_inclusive(ioapic_start, ioapic_end) {
if mapper.is_unused(&frame, allocator) {
mapper.identity_map(frame, WRITABLE, allocator);
}
}
let lapic_addr = Frame::containing_address(sdt_loc.lapic_ctrl);
if mapper.is_unused(&lapic_addr, allocator) {
mapper.identity_map(lapic_addr, WRITABLE, allocator);
}
});
// TODO: Delete when appropriate
let old_table = active_table.switch(new_table);
//println!("NEW TABLE!!!");
// TODO: Delete when appropriate
let old_p4_page = Page::containing_address(old_table.p4_frame.start_address());
active_table.unmap(old_p4_page, allocator);
//println!("guard page at {:#x}", old_p4_page.start_address());
active_table
}
/// Basic tresting of different page table levels and allocations as well as mapping specific bits in specific levels
pub fn test_paging<A>(allocator: &mut A)
where A: FrameAllocator
{
let mut page_table = unsafe { ActivePageTable::new() };
// test translate
println!("Some = {:?}", page_table.translate(0));
// second P1 entry
println!("Some = {:?}", page_table.translate(4096));
// second P2 entry
println!("Some = {:?}", page_table.translate(512 * 4096));
// 300th P2 entry
println!("Some = {:?}", page_table.translate(300 * 512 * 4096));
// second P3 entry
println!("None = {:?}", page_table.translate(512 * 512 * 4096));
// last mapped byte
println!("Some = {:?}", page_table.translate(512 * 512 * 4096 - 1));
// test map_to
// 42th P3 entry
let addr = 42 * 512 * 512 * 4096;
let page = Page::containing_address(addr);
let frame = allocator.allocate_frame().expect("no more frames");
println!("None = {:?}, map to {:?}",
page_table.translate(addr),
frame);
page_table.map_to(page, frame, EntryFlags::empty(), allocator);
println!("Some = {:?}", page_table.translate(addr));
println!("next free frame: {:?}", allocator.allocate_frame());
// test unmap
println!("{:#x}",
unsafe { *(Page::containing_address(addr).start_address() as *const u64) });
page_table.unmap(Page::containing_address(addr), allocator);
println!("None = {:?}", page_table.translate(addr));
}
| {
(self.number >> 0) & 0o777
} | identifier_body |
mod.rs | /// Purpose of these submudules is to create a recursive page table hierarchy
/// with four levels of pagetables in it.
/// To acomplish this a enum Hierarchy to differentiate between the top three
/// levels and the fourth. As a result of this we can extract the addresses stored
/// in the first three levels then jump to the last level (level 1) retrive
/// its address and then move to the offsets.
pub use self::entry::*;
///FrameAllocator is the method used to create Frames from Pages
use memory::{PAGE_SIZE, Frame, FrameAllocator}; // needed later
use self::table::{Table, Level4};
pub use self::mapper::Mapper;
use core::ptr::Unique;
use core::ops::{Deref, DerefMut};
use self::temporary_page::TemporaryPage;
use multiboot2::BootInformation;
//use acpi::rsdp;
use acpi::*;
use core::mem;
use core::option;
//use self::paging::PhysicalAddress;
//use self::entry::HUGE_PAGE;
mod entry;
mod table;
mod mapper;
///Used to temporary map a frame to virtal address
mod temporary_page;
const ENTRY_COUNT: usize = 512;
pub type PhysicalAddress = usize;
pub type VirtualAddress = usize;
/// Copy so that it can be used after passing 'map_to' and similar functions.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Page {
number: usize,
}
impl Page {
/// The address space is split into two halves , high/low, where the higher
/// half contains addresses and the sign extentions, the lower half contains
/// just adresses. This is checked here with an assert.
pub fn containing_address(address: VirtualAddress) -> Page {
assert!(address < 0x0000_8000_0000_0000 ||
address >= 0xffff_8000_0000_0000,
"invalid address: 0x{:x}", address);
Page { number: address / PAGE_SIZE }
}
/// Takes a VirtualAddress and calculates start address
fn start_address(&self) -> PhysicalAddress {
self.number * PAGE_SIZE
}
/// Calculates the p4-starting index/point
fn p4_index(&self) -> usize {
(self.number >> 27) & 0o777
}
/// Calculates the p3-starting index/point
fn p3_index(&self) -> usize {
(self.number >> 18) & 0o777
}
/// Calculates the p2-starting index/point
fn p2_index(&self) -> usize {
(self.number >> 9) & 0o777
}
/// Calculates the p1-starting index/point
fn | (&self) -> usize {
(self.number >> 0) & 0o777
}
/// Returns inclusive range iterator of pages
pub fn range_inclusive(start: Page, end: Page) -> PageIter {
PageIter {
start: start,
end: end
}
}
}
pub struct PageIter {
start: Page,
end: Page
}
impl Iterator for PageIter {
type Item = Page;
fn next(&mut self) -> Option<Page> {
if self.start <= self.end {
let page = self.start;
self.start.number += 1;
Some(page)
} else {
None
}
}
}
pub struct ActivePageTable {
mapper: Mapper,
}
/// Dereference the ActivePageTable
/// Returns reference to Mapper
impl Deref for ActivePageTable {
type Target = Mapper;
fn deref(&self) -> &Mapper {
&self.mapper
}
}
/// Dereference the ActivePageTable
/// Returns a mutable reference to Mapper
impl DerefMut for ActivePageTable {
fn deref_mut(&mut self) -> &mut Mapper {
&mut self.mapper
}
}
/// Does the recursive mapping to the four levels of page tables.
impl ActivePageTable {
unsafe fn new() -> ActivePageTable {
ActivePageTable {
mapper: Mapper::new(),
}
}
/// Module that temporarily changes the recursive mapping.
/// It overwrites the 511th P4 entry and points it to the
/// inactive table frame.
/// "It overwrites the 511th P4 entry and points it to the inactive table frame. Then it flushes the translation lookaside buffer (TLB), which still contains some old translations. We need to flush all pages that are part of the recursive mapping, so the easiest way is to flush the TLB completely.""
pub fn with<F>(&mut self,
table: &mut InactivePageTable,
temporary_page: &mut temporary_page::TemporaryPage,
f: F)
where F: FnOnce(&mut Mapper)
{
use x86::{controlregs, tlb};
let flush_tlb = || unsafe { tlb::flush_all() };
{
let backup = Frame::containing_address (
unsafe { controlregs::cr3() } as usize);
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), self);
// overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE);
flush_tlb();
// execute f in the new context
f(self);
// restore recursive mapping to original p4 table
p4_table[511].set(backup, PRESENT | WRITABLE);
flush_tlb();
}
temporary_page.unmap(self);
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::controlregs;
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(
unsafe { controlregs::cr3() } as usize),
};
unsafe {
controlregs::cr3_write(new_table.p4_frame.start_address() as u64);
}
old_table
}
}
pub struct InactivePageTable {
p4_frame: Frame,
}
/// Creates valid, inactive page tables that are zeroed and recursively mapped.
impl InactivePageTable {
pub fn new(frame: Frame,
active_table: &mut ActivePageTable,
temporary_page: &mut TemporaryPage)
-> InactivePageTable {
{
// The 'active_table' and 'temporary_table' arguments needs to
// be in a inner scope to ensure shadowing since the table
// variable exclusively borrows temporary_page as long as it's alive
let table = temporary_page.map_table_frame(frame.clone(),
active_table);
// Zeroing table is done here *duh*
table.zero();
// Recursive mapping for the table
table[511].set(frame.clone(), PRESENT | WRITABLE);
}
temporary_page.unmap(active_table);
InactivePageTable {p4_frame: frame }
}
}
/// Remaps the kernel sections by creating a temporary page.
pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation, sdt_loc: &mut SDT_Loc)
-> ActivePageTable
where A: FrameAllocator{
use core::ops::Range;
// Create a temporary page at some page number, in this case 0xcafebabe
let mut temporary_page =
TemporaryPage::new(Page { number: 0xcafebabe }, allocator);
// Created by constructor
let mut active_table = unsafe { ActivePageTable::new() };
// Created by constructor
let mut new_table = {
let frame = allocator.allocate_frame().expect("no more frames");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
let elf_sections_tag = boot_info.elf_sections_tag()
.expect("Memory map tag required");
// Identity map the allocated kernel sections
// Skip sections that are not loaded to memory.
// We require pages to be aligned, see src/arch/x86_64/linker.ld for implementations
for section in elf_sections_tag.sections() {
if !section.is_allocated() {
// section is not loaded to memory
continue;
}
assert!(section.addr as usize % PAGE_SIZE == 0,
"sections need to be page aligned");
// println!("mapping section at addr: {:#x}, size: {:#x}",
// section.addr,
// section.size);
let flags = EntryFlags::from_elf_section_flags(section);
let start_frame = Frame::containing_address(section.start_address());
let end_frame = Frame::containing_address(section.end_address() - 1);
// 'range_inclusive' iterates over all frames on a section
for frame in Frame::range_inclusive(start_frame, end_frame) {
mapper.identity_map(frame, flags, allocator);
}
}
// identity map the VGA text buffer
let vga_buffer_frame = Frame::containing_address(0xb8000);
mapper.identity_map(vga_buffer_frame, WRITABLE, allocator);
// identity map the multiboot info structure
let multiboot_start =
Frame::containing_address(boot_info.start_address());
let multiboot_end =
Frame::containing_address(boot_info.end_address() - 1);
for frame in Frame::range_inclusive(multiboot_start, multiboot_end) {
mapper.identity_map(frame, PRESENT, allocator);
}
for (start, end, next) in &mut sdt_loc.into_iter() {
//println!("Allocating addresses {:x} to {:x} and {:x}", start, end, next);
let start_addr = Frame::containing_address(start);
let end_addr = Frame::containing_address(end);
for frame in Frame::range_inclusive(start_addr, end_addr) {
if mapper.is_unused(&frame, allocator) {
mapper.identity_map(frame, PRESENT, allocator);
}
}
if next != 0 {
let next_header_frame = Frame::containing_address(next);
if mapper.is_unused(&next_header_frame, allocator) {
mapper.identity_map(next_header_frame, PRESENT, allocator);
}
}
}
let ioapic_start = Frame::containing_address(sdt_loc.ioapic_start);
let ioapic_end = Frame::containing_address(sdt_loc.ioapic_end);
for frame in Frame::range_inclusive(ioapic_start, ioapic_end) {
if mapper.is_unused(&frame, allocator) {
mapper.identity_map(frame, WRITABLE, allocator);
}
}
let lapic_addr = Frame::containing_address(sdt_loc.lapic_ctrl);
if mapper.is_unused(&lapic_addr, allocator) {
mapper.identity_map(lapic_addr, WRITABLE, allocator);
}
});
// TODO: Delete when appropriate
let old_table = active_table.switch(new_table);
//println!("NEW TABLE!!!");
// TODO: Delete when appropriate
let old_p4_page = Page::containing_address(old_table.p4_frame.start_address());
active_table.unmap(old_p4_page, allocator);
//println!("guard page at {:#x}", old_p4_page.start_address());
active_table
}
/// Basic tresting of different page table levels and allocations as well as mapping specific bits in specific levels
pub fn test_paging<A>(allocator: &mut A)
where A: FrameAllocator
{
let mut page_table = unsafe { ActivePageTable::new() };
// test translate
println!("Some = {:?}", page_table.translate(0));
// second P1 entry
println!("Some = {:?}", page_table.translate(4096));
// second P2 entry
println!("Some = {:?}", page_table.translate(512 * 4096));
// 300th P2 entry
println!("Some = {:?}", page_table.translate(300 * 512 * 4096));
// second P3 entry
println!("None = {:?}", page_table.translate(512 * 512 * 4096));
// last mapped byte
println!("Some = {:?}", page_table.translate(512 * 512 * 4096 - 1));
// test map_to
// 42th P3 entry
let addr = 42 * 512 * 512 * 4096;
let page = Page::containing_address(addr);
let frame = allocator.allocate_frame().expect("no more frames");
println!("None = {:?}, map to {:?}",
page_table.translate(addr),
frame);
page_table.map_to(page, frame, EntryFlags::empty(), allocator);
println!("Some = {:?}", page_table.translate(addr));
println!("next free frame: {:?}", allocator.allocate_frame());
// test unmap
println!("{:#x}",
unsafe { *(Page::containing_address(addr).start_address() as *const u64) });
page_table.unmap(Page::containing_address(addr), allocator);
println!("None = {:?}", page_table.translate(addr));
}
| p1_index | identifier_name |
mod.rs | /// Purpose of these submudules is to create a recursive page table hierarchy
/// with four levels of pagetables in it.
/// To acomplish this a enum Hierarchy to differentiate between the top three
/// levels and the fourth. As a result of this we can extract the addresses stored
/// in the first three levels then jump to the last level (level 1) retrive
/// its address and then move to the offsets.
pub use self::entry::*;
///FrameAllocator is the method used to create Frames from Pages
use memory::{PAGE_SIZE, Frame, FrameAllocator}; // needed later
use self::table::{Table, Level4};
pub use self::mapper::Mapper;
use core::ptr::Unique;
use core::ops::{Deref, DerefMut};
use self::temporary_page::TemporaryPage;
use multiboot2::BootInformation;
//use acpi::rsdp;
use acpi::*;
use core::mem;
use core::option;
//use self::paging::PhysicalAddress;
//use self::entry::HUGE_PAGE;
mod entry;
mod table;
mod mapper;
///Used to temporary map a frame to virtal address
mod temporary_page;
const ENTRY_COUNT: usize = 512;
pub type PhysicalAddress = usize;
pub type VirtualAddress = usize;
/// Copy so that it can be used after passing 'map_to' and similar functions.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Page {
number: usize,
}
impl Page {
/// The address space is split into two halves , high/low, where the higher
/// half contains addresses and the sign extentions, the lower half contains
/// just adresses. This is checked here with an assert.
pub fn containing_address(address: VirtualAddress) -> Page {
assert!(address < 0x0000_8000_0000_0000 ||
address >= 0xffff_8000_0000_0000,
"invalid address: 0x{:x}", address);
Page { number: address / PAGE_SIZE }
}
/// Takes a VirtualAddress and calculates start address
fn start_address(&self) -> PhysicalAddress {
self.number * PAGE_SIZE
}
/// Calculates the p4-starting index/point
fn p4_index(&self) -> usize {
(self.number >> 27) & 0o777
}
/// Calculates the p3-starting index/point
fn p3_index(&self) -> usize {
(self.number >> 18) & 0o777
}
/// Calculates the p2-starting index/point
fn p2_index(&self) -> usize {
(self.number >> 9) & 0o777
}
/// Calculates the p1-starting index/point
fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
/// Returns inclusive range iterator of pages
pub fn range_inclusive(start: Page, end: Page) -> PageIter {
PageIter {
start: start,
end: end
}
}
}
pub struct PageIter {
start: Page,
end: Page
}
impl Iterator for PageIter {
type Item = Page;
fn next(&mut self) -> Option<Page> {
if self.start <= self.end {
let page = self.start;
self.start.number += 1;
Some(page)
} else {
None
}
}
}
pub struct ActivePageTable {
mapper: Mapper,
}
/// Dereference the ActivePageTable
/// Returns reference to Mapper
impl Deref for ActivePageTable {
type Target = Mapper;
fn deref(&self) -> &Mapper {
&self.mapper
}
}
/// Dereference the ActivePageTable
/// Returns a mutable reference to Mapper
impl DerefMut for ActivePageTable {
fn deref_mut(&mut self) -> &mut Mapper {
&mut self.mapper
}
}
/// Does the recursive mapping to the four levels of page tables.
impl ActivePageTable {
unsafe fn new() -> ActivePageTable {
ActivePageTable {
mapper: Mapper::new(),
}
}
/// Module that temporarily changes the recursive mapping.
/// It overwrites the 511th P4 entry and points it to the
/// inactive table frame.
/// "It overwrites the 511th P4 entry and points it to the inactive table frame. Then it flushes the translation lookaside buffer (TLB), which still contains some old translations. We need to flush all pages that are part of the recursive mapping, so the easiest way is to flush the TLB completely.""
pub fn with<F>(&mut self,
table: &mut InactivePageTable,
temporary_page: &mut temporary_page::TemporaryPage,
f: F)
where F: FnOnce(&mut Mapper)
{
use x86::{controlregs, tlb};
let flush_tlb = || unsafe { tlb::flush_all() };
{
let backup = Frame::containing_address (
unsafe { controlregs::cr3() } as usize);
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), self);
// overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE);
flush_tlb();
// execute f in the new context
f(self);
// restore recursive mapping to original p4 table
p4_table[511].set(backup, PRESENT | WRITABLE);
flush_tlb();
}
temporary_page.unmap(self);
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86::controlregs;
let old_table = InactivePageTable {
p4_frame: Frame::containing_address(
unsafe { controlregs::cr3() } as usize),
};
unsafe {
controlregs::cr3_write(new_table.p4_frame.start_address() as u64);
}
old_table
}
}
pub struct InactivePageTable {
p4_frame: Frame,
}
/// Creates valid, inactive page tables that are zeroed and recursively mapped.
impl InactivePageTable {
pub fn new(frame: Frame,
active_table: &mut ActivePageTable,
temporary_page: &mut TemporaryPage)
-> InactivePageTable {
{
// The 'active_table' and 'temporary_table' arguments needs to
// be in a inner scope to ensure shadowing since the table
// variable exclusively borrows temporary_page as long as it's alive
let table = temporary_page.map_table_frame(frame.clone(),
active_table);
// Zeroing table is done here *duh*
table.zero();
// Recursive mapping for the table
table[511].set(frame.clone(), PRESENT | WRITABLE);
}
temporary_page.unmap(active_table);
InactivePageTable {p4_frame: frame }
}
}
/// Remaps the kernel sections by creating a temporary page.
pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation, sdt_loc: &mut SDT_Loc)
-> ActivePageTable
where A: FrameAllocator{
use core::ops::Range;
// Create a temporary page at some page number, in this case 0xcafebabe
let mut temporary_page =
TemporaryPage::new(Page { number: 0xcafebabe }, allocator);
// Created by constructor
let mut active_table = unsafe { ActivePageTable::new() };
// Created by constructor
let mut new_table = {
let frame = allocator.allocate_frame().expect("no more frames");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
let elf_sections_tag = boot_info.elf_sections_tag()
.expect("Memory map tag required");
// Identity map the allocated kernel sections
// Skip sections that are not loaded to memory.
// We require pages to be aligned, see src/arch/x86_64/linker.ld for implementations
for section in elf_sections_tag.sections() {
if !section.is_allocated() {
// section is not loaded to memory
continue;
}
assert!(section.addr as usize % PAGE_SIZE == 0,
"sections need to be page aligned");
// println!("mapping section at addr: {:#x}, size: {:#x}",
// section.addr,
// section.size);
let flags = EntryFlags::from_elf_section_flags(section);
let start_frame = Frame::containing_address(section.start_address());
let end_frame = Frame::containing_address(section.end_address() - 1);
// 'range_inclusive' iterates over all frames on a section
for frame in Frame::range_inclusive(start_frame, end_frame) {
mapper.identity_map(frame, flags, allocator);
}
}
// identity map the VGA text buffer
let vga_buffer_frame = Frame::containing_address(0xb8000);
mapper.identity_map(vga_buffer_frame, WRITABLE, allocator);
// identity map the multiboot info structure
let multiboot_start =
Frame::containing_address(boot_info.start_address());
let multiboot_end =
Frame::containing_address(boot_info.end_address() - 1);
for frame in Frame::range_inclusive(multiboot_start, multiboot_end) {
mapper.identity_map(frame, PRESENT, allocator);
}
for (start, end, next) in &mut sdt_loc.into_iter() {
//println!("Allocating addresses {:x} to {:x} and {:x}", start, end, next);
let start_addr = Frame::containing_address(start);
let end_addr = Frame::containing_address(end);
for frame in Frame::range_inclusive(start_addr, end_addr) {
if mapper.is_unused(&frame, allocator) {
mapper.identity_map(frame, PRESENT, allocator);
}
}
if next != 0 {
let next_header_frame = Frame::containing_address(next);
if mapper.is_unused(&next_header_frame, allocator) |
}
}
let ioapic_start = Frame::containing_address(sdt_loc.ioapic_start);
let ioapic_end = Frame::containing_address(sdt_loc.ioapic_end);
for frame in Frame::range_inclusive(ioapic_start, ioapic_end) {
if mapper.is_unused(&frame, allocator) {
mapper.identity_map(frame, WRITABLE, allocator);
}
}
let lapic_addr = Frame::containing_address(sdt_loc.lapic_ctrl);
if mapper.is_unused(&lapic_addr, allocator) {
mapper.identity_map(lapic_addr, WRITABLE, allocator);
}
});
// TODO: Delete when appropriate
let old_table = active_table.switch(new_table);
//println!("NEW TABLE!!!");
// TODO: Delete when appropriate
let old_p4_page = Page::containing_address(old_table.p4_frame.start_address());
active_table.unmap(old_p4_page, allocator);
//println!("guard page at {:#x}", old_p4_page.start_address());
active_table
}
/// Basic tresting of different page table levels and allocations as well as mapping specific bits in specific levels
pub fn test_paging<A>(allocator: &mut A)
where A: FrameAllocator
{
let mut page_table = unsafe { ActivePageTable::new() };
// test translate
println!("Some = {:?}", page_table.translate(0));
// second P1 entry
println!("Some = {:?}", page_table.translate(4096));
// second P2 entry
println!("Some = {:?}", page_table.translate(512 * 4096));
// 300th P2 entry
println!("Some = {:?}", page_table.translate(300 * 512 * 4096));
// second P3 entry
println!("None = {:?}", page_table.translate(512 * 512 * 4096));
// last mapped byte
println!("Some = {:?}", page_table.translate(512 * 512 * 4096 - 1));
// test map_to
// 42th P3 entry
let addr = 42 * 512 * 512 * 4096;
let page = Page::containing_address(addr);
let frame = allocator.allocate_frame().expect("no more frames");
println!("None = {:?}, map to {:?}",
page_table.translate(addr),
frame);
page_table.map_to(page, frame, EntryFlags::empty(), allocator);
println!("Some = {:?}", page_table.translate(addr));
println!("next free frame: {:?}", allocator.allocate_frame());
// test unmap
println!("{:#x}",
unsafe { *(Page::containing_address(addr).start_address() as *const u64) });
page_table.unmap(Page::containing_address(addr), allocator);
println!("None = {:?}", page_table.translate(addr));
}
| {
mapper.identity_map(next_header_frame, PRESENT, allocator);
} | conditional_block |
revoked.go | // Copyright 2017 Google LLC. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"bytes"
"encoding/pem"
"time"
"github.com/google/certificate-transparency-go/asn1"
"github.com/google/certificate-transparency-go/x509/pkix"
)
// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
var (
OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20}
OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27}
OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28}
)
// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3
var (
OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21}
OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24}
OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29}
)
// RevocationReasonCode represents the reason for a certificate revocation; see RFC 5280 s5.3.1.
type RevocationReasonCode asn1.Enumerated
// RevocationReasonCode values.
var (
Unspecified = RevocationReasonCode(0)
KeyCompromise = RevocationReasonCode(1)
CACompromise = RevocationReasonCode(2)
AffiliationChanged = RevocationReasonCode(3)
Superseded = RevocationReasonCode(4)
CessationOfOperation = RevocationReasonCode(5)
CertificateHold = RevocationReasonCode(6)
RemoveFromCRL = RevocationReasonCode(8)
PrivilegeWithdrawn = RevocationReasonCode(9)
AACompromise = RevocationReasonCode(10)
)
// ReasonFlag holds a bitmask of applicable revocation reasons, from RFC 5280 s4.2.1.13
type ReasonFlag int
// ReasonFlag values.
const (
UnusedFlag ReasonFlag = 1 << iota
KeyCompromiseFlag
CACompromiseFlag
AffiliationChangedFlag
SupersededFlag
CessationOfOperationFlag
CertificateHoldFlag
PrivilegeWithdrawnFlag
AACompromiseFlag
)
// CertificateList represents the ASN.1 structure of the same name from RFC 5280, s5.1.
// It has the same content as pkix.CertificateList, but the contents include parsed versions
// of any extensions.
type CertificateList struct {
Raw asn1.RawContent
TBSCertList TBSCertList
SignatureAlgorithm pkix.AlgorithmIdentifier
SignatureValue asn1.BitString
}
// ExpiredAt reports whether now is past the expiry time of certList.
func (certList *CertificateList) ExpiredAt(now time.Time) bool {
return now.After(certList.TBSCertList.NextUpdate)
}
// Indication of whether extensions need to be critical or non-critical. Extensions that
// can be either are omitted from the map.
var listExtCritical = map[string]bool{
// From RFC 5280...
OIDExtensionAuthorityKeyId.String(): false, // s5.2.1
OIDExtensionIssuerAltName.String(): false, // s5.2.2
OIDExtensionCRLNumber.String(): false, // s5.2.3
OIDExtensionDeltaCRLIndicator.String(): true, // s5.2.4
OIDExtensionIssuingDistributionPoint.String(): true, // s5.2.5
OIDExtensionFreshestCRL.String(): false, // s5.2.6
OIDExtensionAuthorityInfoAccess.String(): false, // s5.2.7
}
var certExtCritical = map[string]bool{
// From RFC 5280...
OIDExtensionCRLReasons.String(): false, // s5.3.1
OIDExtensionInvalidityDate.String(): false, // s5.3.2
OIDExtensionCertificateIssuer.String(): true, // s5.3.3
}
// IssuingDistributionPoint represents the ASN.1 structure of the same
// name
type IssuingDistributionPoint struct {
DistributionPoint distributionPointName `asn1:"optional,tag:0"`
OnlyContainsUserCerts bool `asn1:"optional,tag:1"`
OnlyContainsCACerts bool `asn1:"optional,tag:2"`
OnlySomeReasons asn1.BitString `asn1:"optional,tag:3"`
IndirectCRL bool `asn1:"optional,tag:4"`
OnlyContainsAttributeCerts bool `asn1:"optional,tag:5"`
}
// TBSCertList represents the ASN.1 structure of the same name from RFC
// 5280, section 5.1. It has the same content as pkix.TBSCertificateList
// but the extensions are included in a parsed format.
type TBSCertList struct {
Raw asn1.RawContent
Version int
Signature pkix.AlgorithmIdentifier
Issuer pkix.RDNSequence
ThisUpdate time.Time
NextUpdate time.Time
RevokedCertificates []*RevokedCertificate
Extensions []pkix.Extension
// Cracked out extensions:
AuthorityKeyID []byte
IssuerAltNames GeneralNames
CRLNumber int
BaseCRLNumber int // -1 if no delta CRL present
IssuingDistributionPoint IssuingDistributionPoint
IssuingDPFullNames GeneralNames
FreshestCRLDistributionPoint []string
OCSPServer []string
IssuingCertificateURL []string
}
// ParseCertificateList parses a CertificateList (e.g. a CRL) from the given
// bytes. It's often the case that PEM encoded CRLs will appear where they
// should be DER encoded, so this function will transparently handle PEM
// encoding as long as there isn't any leading garbage.
func ParseCertificateList(clBytes []byte) (*CertificateList, error) {
if bytes.HasPrefix(clBytes, pemCRLPrefix) {
block, _ := pem.Decode(clBytes)
if block != nil && block.Type == pemType {
clBytes = block.Bytes
}
}
return ParseCertificateListDER(clBytes)
}
// ParseCertificateListDER parses a DER encoded CertificateList from the given bytes.
// For non-fatal errors, this function returns both an error and a CertificateList
// object.
func ParseCertificateListDER(derBytes []byte) (*CertificateList, error) {
var errs Errors
// First parse the DER into the pkix structures.
pkixList := new(pkix.CertificateList)
if rest, err := asn1.Unmarshal(derBytes, pkixList); err != nil {
errs.AddID(ErrInvalidCertList, err)
return nil, &errs
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertList)
return nil, &errs
}
// Transcribe the revoked certs but crack out extensions.
revokedCerts := make([]*RevokedCertificate, len(pkixList.TBSCertList.RevokedCertificates))
for i, pkixRevoked := range pkixList.TBSCertList.RevokedCertificates {
revokedCerts[i] = parseRevokedCertificate(pkixRevoked, &errs)
if revokedCerts[i] == nil {
return nil, &errs
}
}
certList := CertificateList{
Raw: derBytes,
TBSCertList: TBSCertList{
Raw: pkixList.TBSCertList.Raw,
Version: pkixList.TBSCertList.Version,
Signature: pkixList.TBSCertList.Signature,
Issuer: pkixList.TBSCertList.Issuer,
ThisUpdate: pkixList.TBSCertList.ThisUpdate,
NextUpdate: pkixList.TBSCertList.NextUpdate,
RevokedCertificates: revokedCerts,
Extensions: pkixList.TBSCertList.Extensions,
CRLNumber: -1,
BaseCRLNumber: -1,
},
SignatureAlgorithm: pkixList.SignatureAlgorithm,
SignatureValue: pkixList.SignatureValue,
}
// Now crack out extensions.
for _, e := range certList.TBSCertList.Extensions {
if expectCritical, present := listExtCritical[e.Id.String()]; present {
if e.Critical && !expectCritical {
errs.AddID(ErrUnexpectedlyCriticalCertListExtension, e.Id)
} else if !e.Critical && expectCritical {
errs.AddID(ErrUnexpectedlyNonCriticalCertListExtension, e.Id)
}
}
switch {
case e.Id.Equal(OIDExtensionAuthorityKeyId):
// RFC 5280 s5.2.1
var a authKeyId
if rest, err := asn1.Unmarshal(e.Value, &a); err != nil {
errs.AddID(ErrInvalidCertListAuthKeyID, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListAuthKeyID)
}
certList.TBSCertList.AuthorityKeyID = a.Id
case e.Id.Equal(OIDExtensionIssuerAltName):
// RFC 5280 s5.2.2
if err := parseGeneralNames(e.Value, &certList.TBSCertList.IssuerAltNames); err != nil {
errs.AddID(ErrInvalidCertListIssuerAltName, err)
}
case e.Id.Equal(OIDExtensionCRLNumber):
// RFC 5280 s5.2.3
if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.CRLNumber); err != nil {
errs.AddID(ErrInvalidCertListCRLNumber, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListCRLNumber)
}
if certList.TBSCertList.CRLNumber < 0 {
errs.AddID(ErrNegativeCertListCRLNumber, certList.TBSCertList.CRLNumber)
}
case e.Id.Equal(OIDExtensionDeltaCRLIndicator):
// RFC 5280 s5.2.4
if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.BaseCRLNumber); err != nil {
errs.AddID(ErrInvalidCertListDeltaCRL, err)
} else if len(rest) != 0 { | }
if certList.TBSCertList.BaseCRLNumber < 0 {
errs.AddID(ErrNegativeCertListDeltaCRL, certList.TBSCertList.BaseCRLNumber)
}
case e.Id.Equal(OIDExtensionIssuingDistributionPoint):
parseIssuingDistributionPoint(e.Value, &certList.TBSCertList.IssuingDistributionPoint, &certList.TBSCertList.IssuingDPFullNames, &errs)
case e.Id.Equal(OIDExtensionFreshestCRL):
// RFC 5280 s5.2.6
if err := parseDistributionPoints(e.Value, &certList.TBSCertList.FreshestCRLDistributionPoint); err != nil {
errs.AddID(ErrInvalidCertListFreshestCRL, err)
return nil, err
}
case e.Id.Equal(OIDExtensionAuthorityInfoAccess):
// RFC 5280 s5.2.7
var aia []accessDescription
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
errs.AddID(ErrInvalidCertListAuthInfoAccess, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListAuthInfoAccess)
}
for _, v := range aia {
// GeneralName: uniformResourceIdentifier [6] IA5String
if v.Location.Tag != tagURI {
continue
}
switch {
case v.Method.Equal(OIDAuthorityInfoAccessOCSP):
certList.TBSCertList.OCSPServer = append(certList.TBSCertList.OCSPServer, string(v.Location.Bytes))
case v.Method.Equal(OIDAuthorityInfoAccessIssuers):
certList.TBSCertList.IssuingCertificateURL = append(certList.TBSCertList.IssuingCertificateURL, string(v.Location.Bytes))
}
// TODO(drysdale): cope with more possibilities
}
default:
if e.Critical {
errs.AddID(ErrUnhandledCriticalCertListExtension, e.Id)
}
}
}
if errs.Fatal() {
return nil, &errs
}
if errs.Empty() {
return &certList, nil
}
return &certList, &errs
}
func parseIssuingDistributionPoint(data []byte, idp *IssuingDistributionPoint, name *GeneralNames, errs *Errors) {
// RFC 5280 s5.2.5
if rest, err := asn1.Unmarshal(data, idp); err != nil {
errs.AddID(ErrInvalidCertListIssuingDP, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListIssuingDP)
}
typeCount := 0
if idp.OnlyContainsUserCerts {
typeCount++
}
if idp.OnlyContainsCACerts {
typeCount++
}
if idp.OnlyContainsAttributeCerts {
typeCount++
}
if typeCount > 1 {
errs.AddID(ErrCertListIssuingDPMultipleTypes, idp.OnlyContainsUserCerts, idp.OnlyContainsCACerts, idp.OnlyContainsAttributeCerts)
}
for _, fn := range idp.DistributionPoint.FullName {
if _, err := parseGeneralName(fn.FullBytes, name, false); err != nil {
errs.AddID(ErrCertListIssuingDPInvalidFullName, err)
}
}
}
// RevokedCertificate represents the unnamed ASN.1 structure that makes up the
// revokedCertificates member of the TBSCertList structure from RFC 5280, s5.1.
// It has the same content as pkix.RevokedCertificate but the extensions are
// included in a parsed format.
type RevokedCertificate struct {
pkix.RevokedCertificate
// Cracked out extensions:
RevocationReason RevocationReasonCode
InvalidityDate time.Time
Issuer GeneralNames
}
func parseRevokedCertificate(pkixRevoked pkix.RevokedCertificate, errs *Errors) *RevokedCertificate {
result := RevokedCertificate{RevokedCertificate: pkixRevoked}
for _, e := range pkixRevoked.Extensions {
if expectCritical, present := certExtCritical[e.Id.String()]; present {
if e.Critical && !expectCritical {
errs.AddID(ErrUnexpectedlyCriticalRevokedCertExtension, e.Id)
} else if !e.Critical && expectCritical {
errs.AddID(ErrUnexpectedlyNonCriticalRevokedCertExtension, e.Id)
}
}
switch {
case e.Id.Equal(OIDExtensionCRLReasons):
// RFC 5280, s5.3.1
var reason asn1.Enumerated
if rest, err := asn1.Unmarshal(e.Value, &reason); err != nil {
errs.AddID(ErrInvalidRevocationReason, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingRevocationReason)
}
result.RevocationReason = RevocationReasonCode(reason)
case e.Id.Equal(OIDExtensionInvalidityDate):
// RFC 5280, s5.3.2
if rest, err := asn1.Unmarshal(e.Value, &result.InvalidityDate); err != nil {
errs.AddID(ErrInvalidRevocationInvalidityDate, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingRevocationInvalidityDate)
}
case e.Id.Equal(OIDExtensionCertificateIssuer):
// RFC 5280, s5.3.3
if err := parseGeneralNames(e.Value, &result.Issuer); err != nil {
errs.AddID(ErrInvalidRevocationIssuer, err)
}
default:
if e.Critical {
errs.AddID(ErrUnhandledCriticalRevokedCertExtension, e.Id)
}
}
}
return &result
}
// CheckCertificateListSignature checks that the signature in crl is from c.
func (c *Certificate) CheckCertificateListSignature(crl *CertificateList) error {
algo := SignatureAlgorithmFromAI(crl.SignatureAlgorithm)
return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
} | errs.AddID(ErrTrailingCertListDeltaCRL) | random_line_split |
revoked.go | // Copyright 2017 Google LLC. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"bytes"
"encoding/pem"
"time"
"github.com/google/certificate-transparency-go/asn1"
"github.com/google/certificate-transparency-go/x509/pkix"
)
// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
var (
OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20}
OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27}
OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28}
)
// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3
var (
OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21}
OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24}
OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29}
)
// RevocationReasonCode represents the reason for a certificate revocation; see RFC 5280 s5.3.1.
type RevocationReasonCode asn1.Enumerated
// RevocationReasonCode values.
var (
Unspecified = RevocationReasonCode(0)
KeyCompromise = RevocationReasonCode(1)
CACompromise = RevocationReasonCode(2)
AffiliationChanged = RevocationReasonCode(3)
Superseded = RevocationReasonCode(4)
CessationOfOperation = RevocationReasonCode(5)
CertificateHold = RevocationReasonCode(6)
RemoveFromCRL = RevocationReasonCode(8)
PrivilegeWithdrawn = RevocationReasonCode(9)
AACompromise = RevocationReasonCode(10)
)
// ReasonFlag holds a bitmask of applicable revocation reasons, from RFC 5280 s4.2.1.13
type ReasonFlag int
// ReasonFlag values.
const (
UnusedFlag ReasonFlag = 1 << iota
KeyCompromiseFlag
CACompromiseFlag
AffiliationChangedFlag
SupersededFlag
CessationOfOperationFlag
CertificateHoldFlag
PrivilegeWithdrawnFlag
AACompromiseFlag
)
// CertificateList represents the ASN.1 structure of the same name from RFC 5280, s5.1.
// It has the same content as pkix.CertificateList, but the contents include parsed versions
// of any extensions.
type CertificateList struct {
Raw asn1.RawContent
TBSCertList TBSCertList
SignatureAlgorithm pkix.AlgorithmIdentifier
SignatureValue asn1.BitString
}
// ExpiredAt reports whether now is past the expiry time of certList.
func (certList *CertificateList) ExpiredAt(now time.Time) bool {
return now.After(certList.TBSCertList.NextUpdate)
}
// Indication of whether extensions need to be critical or non-critical. Extensions that
// can be either are omitted from the map.
var listExtCritical = map[string]bool{
// From RFC 5280...
OIDExtensionAuthorityKeyId.String(): false, // s5.2.1
OIDExtensionIssuerAltName.String(): false, // s5.2.2
OIDExtensionCRLNumber.String(): false, // s5.2.3
OIDExtensionDeltaCRLIndicator.String(): true, // s5.2.4
OIDExtensionIssuingDistributionPoint.String(): true, // s5.2.5
OIDExtensionFreshestCRL.String(): false, // s5.2.6
OIDExtensionAuthorityInfoAccess.String(): false, // s5.2.7
}
var certExtCritical = map[string]bool{
// From RFC 5280...
OIDExtensionCRLReasons.String(): false, // s5.3.1
OIDExtensionInvalidityDate.String(): false, // s5.3.2
OIDExtensionCertificateIssuer.String(): true, // s5.3.3
}
// IssuingDistributionPoint represents the ASN.1 structure of the same
// name
type IssuingDistributionPoint struct {
DistributionPoint distributionPointName `asn1:"optional,tag:0"`
OnlyContainsUserCerts bool `asn1:"optional,tag:1"`
OnlyContainsCACerts bool `asn1:"optional,tag:2"`
OnlySomeReasons asn1.BitString `asn1:"optional,tag:3"`
IndirectCRL bool `asn1:"optional,tag:4"`
OnlyContainsAttributeCerts bool `asn1:"optional,tag:5"`
}
// TBSCertList represents the ASN.1 structure of the same name from RFC
// 5280, section 5.1. It has the same content as pkix.TBSCertificateList
// but the extensions are included in a parsed format.
type TBSCertList struct {
Raw asn1.RawContent
Version int
Signature pkix.AlgorithmIdentifier
Issuer pkix.RDNSequence
ThisUpdate time.Time
NextUpdate time.Time
RevokedCertificates []*RevokedCertificate
Extensions []pkix.Extension
// Cracked out extensions:
AuthorityKeyID []byte
IssuerAltNames GeneralNames
CRLNumber int
BaseCRLNumber int // -1 if no delta CRL present
IssuingDistributionPoint IssuingDistributionPoint
IssuingDPFullNames GeneralNames
FreshestCRLDistributionPoint []string
OCSPServer []string
IssuingCertificateURL []string
}
// ParseCertificateList parses a CertificateList (e.g. a CRL) from the given
// bytes. It's often the case that PEM encoded CRLs will appear where they
// should be DER encoded, so this function will transparently handle PEM
// encoding as long as there isn't any leading garbage.
func ParseCertificateList(clBytes []byte) (*CertificateList, error) {
if bytes.HasPrefix(clBytes, pemCRLPrefix) {
block, _ := pem.Decode(clBytes)
if block != nil && block.Type == pemType {
clBytes = block.Bytes
}
}
return ParseCertificateListDER(clBytes)
}
// ParseCertificateListDER parses a DER encoded CertificateList from the given bytes.
// For non-fatal errors, this function returns both an error and a CertificateList
// object.
func ParseCertificateListDER(derBytes []byte) (*CertificateList, error) {
var errs Errors
// First parse the DER into the pkix structures.
pkixList := new(pkix.CertificateList)
if rest, err := asn1.Unmarshal(derBytes, pkixList); err != nil {
errs.AddID(ErrInvalidCertList, err)
return nil, &errs
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertList)
return nil, &errs
}
// Transcribe the revoked certs but crack out extensions.
revokedCerts := make([]*RevokedCertificate, len(pkixList.TBSCertList.RevokedCertificates))
for i, pkixRevoked := range pkixList.TBSCertList.RevokedCertificates {
revokedCerts[i] = parseRevokedCertificate(pkixRevoked, &errs)
if revokedCerts[i] == nil {
return nil, &errs
}
}
certList := CertificateList{
Raw: derBytes,
TBSCertList: TBSCertList{
Raw: pkixList.TBSCertList.Raw,
Version: pkixList.TBSCertList.Version,
Signature: pkixList.TBSCertList.Signature,
Issuer: pkixList.TBSCertList.Issuer,
ThisUpdate: pkixList.TBSCertList.ThisUpdate,
NextUpdate: pkixList.TBSCertList.NextUpdate,
RevokedCertificates: revokedCerts,
Extensions: pkixList.TBSCertList.Extensions,
CRLNumber: -1,
BaseCRLNumber: -1,
},
SignatureAlgorithm: pkixList.SignatureAlgorithm,
SignatureValue: pkixList.SignatureValue,
}
// Now crack out extensions.
for _, e := range certList.TBSCertList.Extensions {
if expectCritical, present := listExtCritical[e.Id.String()]; present {
if e.Critical && !expectCritical {
errs.AddID(ErrUnexpectedlyCriticalCertListExtension, e.Id)
} else if !e.Critical && expectCritical {
errs.AddID(ErrUnexpectedlyNonCriticalCertListExtension, e.Id)
}
}
switch {
case e.Id.Equal(OIDExtensionAuthorityKeyId):
// RFC 5280 s5.2.1
var a authKeyId
if rest, err := asn1.Unmarshal(e.Value, &a); err != nil {
errs.AddID(ErrInvalidCertListAuthKeyID, err)
} else if len(rest) != 0 |
certList.TBSCertList.AuthorityKeyID = a.Id
case e.Id.Equal(OIDExtensionIssuerAltName):
// RFC 5280 s5.2.2
if err := parseGeneralNames(e.Value, &certList.TBSCertList.IssuerAltNames); err != nil {
errs.AddID(ErrInvalidCertListIssuerAltName, err)
}
case e.Id.Equal(OIDExtensionCRLNumber):
// RFC 5280 s5.2.3
if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.CRLNumber); err != nil {
errs.AddID(ErrInvalidCertListCRLNumber, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListCRLNumber)
}
if certList.TBSCertList.CRLNumber < 0 {
errs.AddID(ErrNegativeCertListCRLNumber, certList.TBSCertList.CRLNumber)
}
case e.Id.Equal(OIDExtensionDeltaCRLIndicator):
// RFC 5280 s5.2.4
if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.BaseCRLNumber); err != nil {
errs.AddID(ErrInvalidCertListDeltaCRL, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListDeltaCRL)
}
if certList.TBSCertList.BaseCRLNumber < 0 {
errs.AddID(ErrNegativeCertListDeltaCRL, certList.TBSCertList.BaseCRLNumber)
}
case e.Id.Equal(OIDExtensionIssuingDistributionPoint):
parseIssuingDistributionPoint(e.Value, &certList.TBSCertList.IssuingDistributionPoint, &certList.TBSCertList.IssuingDPFullNames, &errs)
case e.Id.Equal(OIDExtensionFreshestCRL):
// RFC 5280 s5.2.6
if err := parseDistributionPoints(e.Value, &certList.TBSCertList.FreshestCRLDistributionPoint); err != nil {
errs.AddID(ErrInvalidCertListFreshestCRL, err)
return nil, err
}
case e.Id.Equal(OIDExtensionAuthorityInfoAccess):
// RFC 5280 s5.2.7
var aia []accessDescription
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
errs.AddID(ErrInvalidCertListAuthInfoAccess, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListAuthInfoAccess)
}
for _, v := range aia {
// GeneralName: uniformResourceIdentifier [6] IA5String
if v.Location.Tag != tagURI {
continue
}
switch {
case v.Method.Equal(OIDAuthorityInfoAccessOCSP):
certList.TBSCertList.OCSPServer = append(certList.TBSCertList.OCSPServer, string(v.Location.Bytes))
case v.Method.Equal(OIDAuthorityInfoAccessIssuers):
certList.TBSCertList.IssuingCertificateURL = append(certList.TBSCertList.IssuingCertificateURL, string(v.Location.Bytes))
}
// TODO(drysdale): cope with more possibilities
}
default:
if e.Critical {
errs.AddID(ErrUnhandledCriticalCertListExtension, e.Id)
}
}
}
if errs.Fatal() {
return nil, &errs
}
if errs.Empty() {
return &certList, nil
}
return &certList, &errs
}
func parseIssuingDistributionPoint(data []byte, idp *IssuingDistributionPoint, name *GeneralNames, errs *Errors) {
// RFC 5280 s5.2.5
if rest, err := asn1.Unmarshal(data, idp); err != nil {
errs.AddID(ErrInvalidCertListIssuingDP, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListIssuingDP)
}
typeCount := 0
if idp.OnlyContainsUserCerts {
typeCount++
}
if idp.OnlyContainsCACerts {
typeCount++
}
if idp.OnlyContainsAttributeCerts {
typeCount++
}
if typeCount > 1 {
errs.AddID(ErrCertListIssuingDPMultipleTypes, idp.OnlyContainsUserCerts, idp.OnlyContainsCACerts, idp.OnlyContainsAttributeCerts)
}
for _, fn := range idp.DistributionPoint.FullName {
if _, err := parseGeneralName(fn.FullBytes, name, false); err != nil {
errs.AddID(ErrCertListIssuingDPInvalidFullName, err)
}
}
}
// RevokedCertificate represents the unnamed ASN.1 structure that makes up the
// revokedCertificates member of the TBSCertList structure from RFC 5280, s5.1.
// It has the same content as pkix.RevokedCertificate but the extensions are
// included in a parsed format.
type RevokedCertificate struct {
pkix.RevokedCertificate
// Cracked out extensions:
RevocationReason RevocationReasonCode
InvalidityDate time.Time
Issuer GeneralNames
}
func parseRevokedCertificate(pkixRevoked pkix.RevokedCertificate, errs *Errors) *RevokedCertificate {
result := RevokedCertificate{RevokedCertificate: pkixRevoked}
for _, e := range pkixRevoked.Extensions {
if expectCritical, present := certExtCritical[e.Id.String()]; present {
if e.Critical && !expectCritical {
errs.AddID(ErrUnexpectedlyCriticalRevokedCertExtension, e.Id)
} else if !e.Critical && expectCritical {
errs.AddID(ErrUnexpectedlyNonCriticalRevokedCertExtension, e.Id)
}
}
switch {
case e.Id.Equal(OIDExtensionCRLReasons):
// RFC 5280, s5.3.1
var reason asn1.Enumerated
if rest, err := asn1.Unmarshal(e.Value, &reason); err != nil {
errs.AddID(ErrInvalidRevocationReason, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingRevocationReason)
}
result.RevocationReason = RevocationReasonCode(reason)
case e.Id.Equal(OIDExtensionInvalidityDate):
// RFC 5280, s5.3.2
if rest, err := asn1.Unmarshal(e.Value, &result.InvalidityDate); err != nil {
errs.AddID(ErrInvalidRevocationInvalidityDate, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingRevocationInvalidityDate)
}
case e.Id.Equal(OIDExtensionCertificateIssuer):
// RFC 5280, s5.3.3
if err := parseGeneralNames(e.Value, &result.Issuer); err != nil {
errs.AddID(ErrInvalidRevocationIssuer, err)
}
default:
if e.Critical {
errs.AddID(ErrUnhandledCriticalRevokedCertExtension, e.Id)
}
}
}
return &result
}
// CheckCertificateListSignature checks that the signature in crl is from c.
func (c *Certificate) CheckCertificateListSignature(crl *CertificateList) error {
algo := SignatureAlgorithmFromAI(crl.SignatureAlgorithm)
return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
}
| {
errs.AddID(ErrTrailingCertListAuthKeyID)
} | conditional_block |
revoked.go | // Copyright 2017 Google LLC. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"bytes"
"encoding/pem"
"time"
"github.com/google/certificate-transparency-go/asn1"
"github.com/google/certificate-transparency-go/x509/pkix"
)
// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
var (
OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20}
OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27}
OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28}
)
// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3
var (
OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21}
OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24}
OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29}
)
// RevocationReasonCode represents the reason for a certificate revocation; see RFC 5280 s5.3.1.
type RevocationReasonCode asn1.Enumerated
// RevocationReasonCode values.
var (
Unspecified = RevocationReasonCode(0)
KeyCompromise = RevocationReasonCode(1)
CACompromise = RevocationReasonCode(2)
AffiliationChanged = RevocationReasonCode(3)
Superseded = RevocationReasonCode(4)
CessationOfOperation = RevocationReasonCode(5)
CertificateHold = RevocationReasonCode(6)
RemoveFromCRL = RevocationReasonCode(8)
PrivilegeWithdrawn = RevocationReasonCode(9)
AACompromise = RevocationReasonCode(10)
)
// ReasonFlag holds a bitmask of applicable revocation reasons, from RFC 5280 s4.2.1.13
type ReasonFlag int
// ReasonFlag values.
const (
UnusedFlag ReasonFlag = 1 << iota
KeyCompromiseFlag
CACompromiseFlag
AffiliationChangedFlag
SupersededFlag
CessationOfOperationFlag
CertificateHoldFlag
PrivilegeWithdrawnFlag
AACompromiseFlag
)
// CertificateList represents the ASN.1 structure of the same name from RFC 5280, s5.1.
// It has the same content as pkix.CertificateList, but the contents include parsed versions
// of any extensions.
type CertificateList struct {
Raw asn1.RawContent
TBSCertList TBSCertList
SignatureAlgorithm pkix.AlgorithmIdentifier
SignatureValue asn1.BitString
}
// ExpiredAt reports whether now is past the expiry time of certList.
func (certList *CertificateList) ExpiredAt(now time.Time) bool {
return now.After(certList.TBSCertList.NextUpdate)
}
// Indication of whether extensions need to be critical or non-critical. Extensions that
// can be either are omitted from the map.
var listExtCritical = map[string]bool{
// From RFC 5280...
OIDExtensionAuthorityKeyId.String(): false, // s5.2.1
OIDExtensionIssuerAltName.String(): false, // s5.2.2
OIDExtensionCRLNumber.String(): false, // s5.2.3
OIDExtensionDeltaCRLIndicator.String(): true, // s5.2.4
OIDExtensionIssuingDistributionPoint.String(): true, // s5.2.5
OIDExtensionFreshestCRL.String(): false, // s5.2.6
OIDExtensionAuthorityInfoAccess.String(): false, // s5.2.7
}
var certExtCritical = map[string]bool{
// From RFC 5280...
OIDExtensionCRLReasons.String(): false, // s5.3.1
OIDExtensionInvalidityDate.String(): false, // s5.3.2
OIDExtensionCertificateIssuer.String(): true, // s5.3.3
}
// IssuingDistributionPoint represents the ASN.1 structure of the same
// name
type IssuingDistributionPoint struct {
DistributionPoint distributionPointName `asn1:"optional,tag:0"`
OnlyContainsUserCerts bool `asn1:"optional,tag:1"`
OnlyContainsCACerts bool `asn1:"optional,tag:2"`
OnlySomeReasons asn1.BitString `asn1:"optional,tag:3"`
IndirectCRL bool `asn1:"optional,tag:4"`
OnlyContainsAttributeCerts bool `asn1:"optional,tag:5"`
}
// TBSCertList represents the ASN.1 structure of the same name from RFC
// 5280, section 5.1. It has the same content as pkix.TBSCertificateList
// but the extensions are included in a parsed format.
type TBSCertList struct {
Raw asn1.RawContent
Version int
Signature pkix.AlgorithmIdentifier
Issuer pkix.RDNSequence
ThisUpdate time.Time
NextUpdate time.Time
RevokedCertificates []*RevokedCertificate
Extensions []pkix.Extension
// Cracked out extensions:
AuthorityKeyID []byte
IssuerAltNames GeneralNames
CRLNumber int
BaseCRLNumber int // -1 if no delta CRL present
IssuingDistributionPoint IssuingDistributionPoint
IssuingDPFullNames GeneralNames
FreshestCRLDistributionPoint []string
OCSPServer []string
IssuingCertificateURL []string
}
// ParseCertificateList parses a CertificateList (e.g. a CRL) from the given
// bytes. It's often the case that PEM encoded CRLs will appear where they
// should be DER encoded, so this function will transparently handle PEM
// encoding as long as there isn't any leading garbage.
func ParseCertificateList(clBytes []byte) (*CertificateList, error) {
if bytes.HasPrefix(clBytes, pemCRLPrefix) {
block, _ := pem.Decode(clBytes)
if block != nil && block.Type == pemType {
clBytes = block.Bytes
}
}
return ParseCertificateListDER(clBytes)
}
// ParseCertificateListDER parses a DER encoded CertificateList from the given bytes.
// For non-fatal errors, this function returns both an error and a CertificateList
// object.
func ParseCertificateListDER(derBytes []byte) (*CertificateList, error) {
var errs Errors
// First parse the DER into the pkix structures.
pkixList := new(pkix.CertificateList)
if rest, err := asn1.Unmarshal(derBytes, pkixList); err != nil {
errs.AddID(ErrInvalidCertList, err)
return nil, &errs
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertList)
return nil, &errs
}
// Transcribe the revoked certs but crack out extensions.
revokedCerts := make([]*RevokedCertificate, len(pkixList.TBSCertList.RevokedCertificates))
for i, pkixRevoked := range pkixList.TBSCertList.RevokedCertificates {
revokedCerts[i] = parseRevokedCertificate(pkixRevoked, &errs)
if revokedCerts[i] == nil {
return nil, &errs
}
}
certList := CertificateList{
Raw: derBytes,
TBSCertList: TBSCertList{
Raw: pkixList.TBSCertList.Raw,
Version: pkixList.TBSCertList.Version,
Signature: pkixList.TBSCertList.Signature,
Issuer: pkixList.TBSCertList.Issuer,
ThisUpdate: pkixList.TBSCertList.ThisUpdate,
NextUpdate: pkixList.TBSCertList.NextUpdate,
RevokedCertificates: revokedCerts,
Extensions: pkixList.TBSCertList.Extensions,
CRLNumber: -1,
BaseCRLNumber: -1,
},
SignatureAlgorithm: pkixList.SignatureAlgorithm,
SignatureValue: pkixList.SignatureValue,
}
// Now crack out extensions.
for _, e := range certList.TBSCertList.Extensions {
if expectCritical, present := listExtCritical[e.Id.String()]; present {
if e.Critical && !expectCritical {
errs.AddID(ErrUnexpectedlyCriticalCertListExtension, e.Id)
} else if !e.Critical && expectCritical {
errs.AddID(ErrUnexpectedlyNonCriticalCertListExtension, e.Id)
}
}
switch {
case e.Id.Equal(OIDExtensionAuthorityKeyId):
// RFC 5280 s5.2.1
var a authKeyId
if rest, err := asn1.Unmarshal(e.Value, &a); err != nil {
errs.AddID(ErrInvalidCertListAuthKeyID, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListAuthKeyID)
}
certList.TBSCertList.AuthorityKeyID = a.Id
case e.Id.Equal(OIDExtensionIssuerAltName):
// RFC 5280 s5.2.2
if err := parseGeneralNames(e.Value, &certList.TBSCertList.IssuerAltNames); err != nil {
errs.AddID(ErrInvalidCertListIssuerAltName, err)
}
case e.Id.Equal(OIDExtensionCRLNumber):
// RFC 5280 s5.2.3
if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.CRLNumber); err != nil {
errs.AddID(ErrInvalidCertListCRLNumber, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListCRLNumber)
}
if certList.TBSCertList.CRLNumber < 0 {
errs.AddID(ErrNegativeCertListCRLNumber, certList.TBSCertList.CRLNumber)
}
case e.Id.Equal(OIDExtensionDeltaCRLIndicator):
// RFC 5280 s5.2.4
if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.BaseCRLNumber); err != nil {
errs.AddID(ErrInvalidCertListDeltaCRL, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListDeltaCRL)
}
if certList.TBSCertList.BaseCRLNumber < 0 {
errs.AddID(ErrNegativeCertListDeltaCRL, certList.TBSCertList.BaseCRLNumber)
}
case e.Id.Equal(OIDExtensionIssuingDistributionPoint):
parseIssuingDistributionPoint(e.Value, &certList.TBSCertList.IssuingDistributionPoint, &certList.TBSCertList.IssuingDPFullNames, &errs)
case e.Id.Equal(OIDExtensionFreshestCRL):
// RFC 5280 s5.2.6
if err := parseDistributionPoints(e.Value, &certList.TBSCertList.FreshestCRLDistributionPoint); err != nil {
errs.AddID(ErrInvalidCertListFreshestCRL, err)
return nil, err
}
case e.Id.Equal(OIDExtensionAuthorityInfoAccess):
// RFC 5280 s5.2.7
var aia []accessDescription
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
errs.AddID(ErrInvalidCertListAuthInfoAccess, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListAuthInfoAccess)
}
for _, v := range aia {
// GeneralName: uniformResourceIdentifier [6] IA5String
if v.Location.Tag != tagURI {
continue
}
switch {
case v.Method.Equal(OIDAuthorityInfoAccessOCSP):
certList.TBSCertList.OCSPServer = append(certList.TBSCertList.OCSPServer, string(v.Location.Bytes))
case v.Method.Equal(OIDAuthorityInfoAccessIssuers):
certList.TBSCertList.IssuingCertificateURL = append(certList.TBSCertList.IssuingCertificateURL, string(v.Location.Bytes))
}
// TODO(drysdale): cope with more possibilities
}
default:
if e.Critical {
errs.AddID(ErrUnhandledCriticalCertListExtension, e.Id)
}
}
}
if errs.Fatal() {
return nil, &errs
}
if errs.Empty() {
return &certList, nil
}
return &certList, &errs
}
func | (data []byte, idp *IssuingDistributionPoint, name *GeneralNames, errs *Errors) {
// RFC 5280 s5.2.5
if rest, err := asn1.Unmarshal(data, idp); err != nil {
errs.AddID(ErrInvalidCertListIssuingDP, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListIssuingDP)
}
typeCount := 0
if idp.OnlyContainsUserCerts {
typeCount++
}
if idp.OnlyContainsCACerts {
typeCount++
}
if idp.OnlyContainsAttributeCerts {
typeCount++
}
if typeCount > 1 {
errs.AddID(ErrCertListIssuingDPMultipleTypes, idp.OnlyContainsUserCerts, idp.OnlyContainsCACerts, idp.OnlyContainsAttributeCerts)
}
for _, fn := range idp.DistributionPoint.FullName {
if _, err := parseGeneralName(fn.FullBytes, name, false); err != nil {
errs.AddID(ErrCertListIssuingDPInvalidFullName, err)
}
}
}
// RevokedCertificate represents the unnamed ASN.1 structure that makes up the
// revokedCertificates member of the TBSCertList structure from RFC 5280, s5.1.
// It has the same content as pkix.RevokedCertificate but the extensions are
// included in a parsed format.
type RevokedCertificate struct {
pkix.RevokedCertificate
// Cracked out extensions:
RevocationReason RevocationReasonCode
InvalidityDate time.Time
Issuer GeneralNames
}
func parseRevokedCertificate(pkixRevoked pkix.RevokedCertificate, errs *Errors) *RevokedCertificate {
result := RevokedCertificate{RevokedCertificate: pkixRevoked}
for _, e := range pkixRevoked.Extensions {
if expectCritical, present := certExtCritical[e.Id.String()]; present {
if e.Critical && !expectCritical {
errs.AddID(ErrUnexpectedlyCriticalRevokedCertExtension, e.Id)
} else if !e.Critical && expectCritical {
errs.AddID(ErrUnexpectedlyNonCriticalRevokedCertExtension, e.Id)
}
}
switch {
case e.Id.Equal(OIDExtensionCRLReasons):
// RFC 5280, s5.3.1
var reason asn1.Enumerated
if rest, err := asn1.Unmarshal(e.Value, &reason); err != nil {
errs.AddID(ErrInvalidRevocationReason, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingRevocationReason)
}
result.RevocationReason = RevocationReasonCode(reason)
case e.Id.Equal(OIDExtensionInvalidityDate):
// RFC 5280, s5.3.2
if rest, err := asn1.Unmarshal(e.Value, &result.InvalidityDate); err != nil {
errs.AddID(ErrInvalidRevocationInvalidityDate, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingRevocationInvalidityDate)
}
case e.Id.Equal(OIDExtensionCertificateIssuer):
// RFC 5280, s5.3.3
if err := parseGeneralNames(e.Value, &result.Issuer); err != nil {
errs.AddID(ErrInvalidRevocationIssuer, err)
}
default:
if e.Critical {
errs.AddID(ErrUnhandledCriticalRevokedCertExtension, e.Id)
}
}
}
return &result
}
// CheckCertificateListSignature checks that the signature in crl is from c.
func (c *Certificate) CheckCertificateListSignature(crl *CertificateList) error {
algo := SignatureAlgorithmFromAI(crl.SignatureAlgorithm)
return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
}
| parseIssuingDistributionPoint | identifier_name |
revoked.go | // Copyright 2017 Google LLC. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"bytes"
"encoding/pem"
"time"
"github.com/google/certificate-transparency-go/asn1"
"github.com/google/certificate-transparency-go/x509/pkix"
)
// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
var (
OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20}
OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27}
OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28}
)
// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3
var (
OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21}
OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24}
OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29}
)
// RevocationReasonCode represents the reason for a certificate revocation; see RFC 5280 s5.3.1.
type RevocationReasonCode asn1.Enumerated
// RevocationReasonCode values.
var (
Unspecified = RevocationReasonCode(0)
KeyCompromise = RevocationReasonCode(1)
CACompromise = RevocationReasonCode(2)
AffiliationChanged = RevocationReasonCode(3)
Superseded = RevocationReasonCode(4)
CessationOfOperation = RevocationReasonCode(5)
CertificateHold = RevocationReasonCode(6)
RemoveFromCRL = RevocationReasonCode(8)
PrivilegeWithdrawn = RevocationReasonCode(9)
AACompromise = RevocationReasonCode(10)
)
// ReasonFlag holds a bitmask of applicable revocation reasons, from RFC 5280 s4.2.1.13
type ReasonFlag int
// ReasonFlag values.
const (
UnusedFlag ReasonFlag = 1 << iota
KeyCompromiseFlag
CACompromiseFlag
AffiliationChangedFlag
SupersededFlag
CessationOfOperationFlag
CertificateHoldFlag
PrivilegeWithdrawnFlag
AACompromiseFlag
)
// CertificateList represents the ASN.1 structure of the same name from RFC 5280, s5.1.
// It has the same content as pkix.CertificateList, but the contents include parsed versions
// of any extensions.
type CertificateList struct {
Raw asn1.RawContent
TBSCertList TBSCertList
SignatureAlgorithm pkix.AlgorithmIdentifier
SignatureValue asn1.BitString
}
// ExpiredAt reports whether now is past the expiry time of certList.
func (certList *CertificateList) ExpiredAt(now time.Time) bool {
return now.After(certList.TBSCertList.NextUpdate)
}
// Indication of whether extensions need to be critical or non-critical. Extensions that
// can be either are omitted from the map.
var listExtCritical = map[string]bool{
// From RFC 5280...
OIDExtensionAuthorityKeyId.String(): false, // s5.2.1
OIDExtensionIssuerAltName.String(): false, // s5.2.2
OIDExtensionCRLNumber.String(): false, // s5.2.3
OIDExtensionDeltaCRLIndicator.String(): true, // s5.2.4
OIDExtensionIssuingDistributionPoint.String(): true, // s5.2.5
OIDExtensionFreshestCRL.String(): false, // s5.2.6
OIDExtensionAuthorityInfoAccess.String(): false, // s5.2.7
}
var certExtCritical = map[string]bool{
// From RFC 5280...
OIDExtensionCRLReasons.String(): false, // s5.3.1
OIDExtensionInvalidityDate.String(): false, // s5.3.2
OIDExtensionCertificateIssuer.String(): true, // s5.3.3
}
// IssuingDistributionPoint represents the ASN.1 structure of the same
// name
type IssuingDistributionPoint struct {
DistributionPoint distributionPointName `asn1:"optional,tag:0"`
OnlyContainsUserCerts bool `asn1:"optional,tag:1"`
OnlyContainsCACerts bool `asn1:"optional,tag:2"`
OnlySomeReasons asn1.BitString `asn1:"optional,tag:3"`
IndirectCRL bool `asn1:"optional,tag:4"`
OnlyContainsAttributeCerts bool `asn1:"optional,tag:5"`
}
// TBSCertList represents the ASN.1 structure of the same name from RFC
// 5280, section 5.1. It has the same content as pkix.TBSCertificateList
// but the extensions are included in a parsed format.
type TBSCertList struct {
Raw asn1.RawContent
Version int
Signature pkix.AlgorithmIdentifier
Issuer pkix.RDNSequence
ThisUpdate time.Time
NextUpdate time.Time
RevokedCertificates []*RevokedCertificate
Extensions []pkix.Extension
// Cracked out extensions:
AuthorityKeyID []byte
IssuerAltNames GeneralNames
CRLNumber int
BaseCRLNumber int // -1 if no delta CRL present
IssuingDistributionPoint IssuingDistributionPoint
IssuingDPFullNames GeneralNames
FreshestCRLDistributionPoint []string
OCSPServer []string
IssuingCertificateURL []string
}
// ParseCertificateList parses a CertificateList (e.g. a CRL) from the given
// bytes. It's often the case that PEM encoded CRLs will appear where they
// should be DER encoded, so this function will transparently handle PEM
// encoding as long as there isn't any leading garbage.
func ParseCertificateList(clBytes []byte) (*CertificateList, error) |
// ParseCertificateListDER parses a DER encoded CertificateList from the given bytes.
// For non-fatal errors, this function returns both an error and a CertificateList
// object.
func ParseCertificateListDER(derBytes []byte) (*CertificateList, error) {
var errs Errors
// First parse the DER into the pkix structures.
pkixList := new(pkix.CertificateList)
if rest, err := asn1.Unmarshal(derBytes, pkixList); err != nil {
errs.AddID(ErrInvalidCertList, err)
return nil, &errs
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertList)
return nil, &errs
}
// Transcribe the revoked certs but crack out extensions.
revokedCerts := make([]*RevokedCertificate, len(pkixList.TBSCertList.RevokedCertificates))
for i, pkixRevoked := range pkixList.TBSCertList.RevokedCertificates {
revokedCerts[i] = parseRevokedCertificate(pkixRevoked, &errs)
if revokedCerts[i] == nil {
return nil, &errs
}
}
certList := CertificateList{
Raw: derBytes,
TBSCertList: TBSCertList{
Raw: pkixList.TBSCertList.Raw,
Version: pkixList.TBSCertList.Version,
Signature: pkixList.TBSCertList.Signature,
Issuer: pkixList.TBSCertList.Issuer,
ThisUpdate: pkixList.TBSCertList.ThisUpdate,
NextUpdate: pkixList.TBSCertList.NextUpdate,
RevokedCertificates: revokedCerts,
Extensions: pkixList.TBSCertList.Extensions,
CRLNumber: -1,
BaseCRLNumber: -1,
},
SignatureAlgorithm: pkixList.SignatureAlgorithm,
SignatureValue: pkixList.SignatureValue,
}
// Now crack out extensions.
for _, e := range certList.TBSCertList.Extensions {
if expectCritical, present := listExtCritical[e.Id.String()]; present {
if e.Critical && !expectCritical {
errs.AddID(ErrUnexpectedlyCriticalCertListExtension, e.Id)
} else if !e.Critical && expectCritical {
errs.AddID(ErrUnexpectedlyNonCriticalCertListExtension, e.Id)
}
}
switch {
case e.Id.Equal(OIDExtensionAuthorityKeyId):
// RFC 5280 s5.2.1
var a authKeyId
if rest, err := asn1.Unmarshal(e.Value, &a); err != nil {
errs.AddID(ErrInvalidCertListAuthKeyID, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListAuthKeyID)
}
certList.TBSCertList.AuthorityKeyID = a.Id
case e.Id.Equal(OIDExtensionIssuerAltName):
// RFC 5280 s5.2.2
if err := parseGeneralNames(e.Value, &certList.TBSCertList.IssuerAltNames); err != nil {
errs.AddID(ErrInvalidCertListIssuerAltName, err)
}
case e.Id.Equal(OIDExtensionCRLNumber):
// RFC 5280 s5.2.3
if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.CRLNumber); err != nil {
errs.AddID(ErrInvalidCertListCRLNumber, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListCRLNumber)
}
if certList.TBSCertList.CRLNumber < 0 {
errs.AddID(ErrNegativeCertListCRLNumber, certList.TBSCertList.CRLNumber)
}
case e.Id.Equal(OIDExtensionDeltaCRLIndicator):
// RFC 5280 s5.2.4
if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.BaseCRLNumber); err != nil {
errs.AddID(ErrInvalidCertListDeltaCRL, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListDeltaCRL)
}
if certList.TBSCertList.BaseCRLNumber < 0 {
errs.AddID(ErrNegativeCertListDeltaCRL, certList.TBSCertList.BaseCRLNumber)
}
case e.Id.Equal(OIDExtensionIssuingDistributionPoint):
parseIssuingDistributionPoint(e.Value, &certList.TBSCertList.IssuingDistributionPoint, &certList.TBSCertList.IssuingDPFullNames, &errs)
case e.Id.Equal(OIDExtensionFreshestCRL):
// RFC 5280 s5.2.6
if err := parseDistributionPoints(e.Value, &certList.TBSCertList.FreshestCRLDistributionPoint); err != nil {
errs.AddID(ErrInvalidCertListFreshestCRL, err)
return nil, err
}
case e.Id.Equal(OIDExtensionAuthorityInfoAccess):
// RFC 5280 s5.2.7
var aia []accessDescription
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
errs.AddID(ErrInvalidCertListAuthInfoAccess, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListAuthInfoAccess)
}
for _, v := range aia {
// GeneralName: uniformResourceIdentifier [6] IA5String
if v.Location.Tag != tagURI {
continue
}
switch {
case v.Method.Equal(OIDAuthorityInfoAccessOCSP):
certList.TBSCertList.OCSPServer = append(certList.TBSCertList.OCSPServer, string(v.Location.Bytes))
case v.Method.Equal(OIDAuthorityInfoAccessIssuers):
certList.TBSCertList.IssuingCertificateURL = append(certList.TBSCertList.IssuingCertificateURL, string(v.Location.Bytes))
}
// TODO(drysdale): cope with more possibilities
}
default:
if e.Critical {
errs.AddID(ErrUnhandledCriticalCertListExtension, e.Id)
}
}
}
if errs.Fatal() {
return nil, &errs
}
if errs.Empty() {
return &certList, nil
}
return &certList, &errs
}
func parseIssuingDistributionPoint(data []byte, idp *IssuingDistributionPoint, name *GeneralNames, errs *Errors) {
// RFC 5280 s5.2.5
if rest, err := asn1.Unmarshal(data, idp); err != nil {
errs.AddID(ErrInvalidCertListIssuingDP, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListIssuingDP)
}
typeCount := 0
if idp.OnlyContainsUserCerts {
typeCount++
}
if idp.OnlyContainsCACerts {
typeCount++
}
if idp.OnlyContainsAttributeCerts {
typeCount++
}
if typeCount > 1 {
errs.AddID(ErrCertListIssuingDPMultipleTypes, idp.OnlyContainsUserCerts, idp.OnlyContainsCACerts, idp.OnlyContainsAttributeCerts)
}
for _, fn := range idp.DistributionPoint.FullName {
if _, err := parseGeneralName(fn.FullBytes, name, false); err != nil {
errs.AddID(ErrCertListIssuingDPInvalidFullName, err)
}
}
}
// RevokedCertificate represents the unnamed ASN.1 structure that makes up the
// revokedCertificates member of the TBSCertList structure from RFC 5280, s5.1.
// It has the same content as pkix.RevokedCertificate but the extensions are
// included in a parsed format.
type RevokedCertificate struct {
pkix.RevokedCertificate
// Cracked out extensions:
RevocationReason RevocationReasonCode
InvalidityDate time.Time
Issuer GeneralNames
}
func parseRevokedCertificate(pkixRevoked pkix.RevokedCertificate, errs *Errors) *RevokedCertificate {
result := RevokedCertificate{RevokedCertificate: pkixRevoked}
for _, e := range pkixRevoked.Extensions {
if expectCritical, present := certExtCritical[e.Id.String()]; present {
if e.Critical && !expectCritical {
errs.AddID(ErrUnexpectedlyCriticalRevokedCertExtension, e.Id)
} else if !e.Critical && expectCritical {
errs.AddID(ErrUnexpectedlyNonCriticalRevokedCertExtension, e.Id)
}
}
switch {
case e.Id.Equal(OIDExtensionCRLReasons):
// RFC 5280, s5.3.1
var reason asn1.Enumerated
if rest, err := asn1.Unmarshal(e.Value, &reason); err != nil {
errs.AddID(ErrInvalidRevocationReason, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingRevocationReason)
}
result.RevocationReason = RevocationReasonCode(reason)
case e.Id.Equal(OIDExtensionInvalidityDate):
// RFC 5280, s5.3.2
if rest, err := asn1.Unmarshal(e.Value, &result.InvalidityDate); err != nil {
errs.AddID(ErrInvalidRevocationInvalidityDate, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingRevocationInvalidityDate)
}
case e.Id.Equal(OIDExtensionCertificateIssuer):
// RFC 5280, s5.3.3
if err := parseGeneralNames(e.Value, &result.Issuer); err != nil {
errs.AddID(ErrInvalidRevocationIssuer, err)
}
default:
if e.Critical {
errs.AddID(ErrUnhandledCriticalRevokedCertExtension, e.Id)
}
}
}
return &result
}
// CheckCertificateListSignature checks that the signature in crl is from c.
func (c *Certificate) CheckCertificateListSignature(crl *CertificateList) error {
algo := SignatureAlgorithmFromAI(crl.SignatureAlgorithm)
return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
}
| {
if bytes.HasPrefix(clBytes, pemCRLPrefix) {
block, _ := pem.Decode(clBytes)
if block != nil && block.Type == pemType {
clBytes = block.Bytes
}
}
return ParseCertificateListDER(clBytes)
} | identifier_body |
module.rs | //! Code for module-level double representation processing.
use crate::prelude::*;
use enso_text::index::*;
use crate::alias_analysis;
use crate::definition;
use crate::definition::DefinitionProvider;
use crate::definition::EmptyDefinitionId;
use crate::identifier;
use crate::identifier::Identifier;
use crate::import;
use crate::name::NamePath;
use crate::name::QualifiedName;
use ast::crumbs::ChildAst;
use ast::crumbs::Located;
use ast::crumbs::ModuleCrumb;
use ast::known;
use ast::BlockLine;
use engine_protocol::language_server;
use std::fmt::Formatter;
// ==============
// === Errors ===
// ==============
#[derive(Copy, Clone, Debug, Fail)]
#[fail(display = "Id segment list is empty.")]
#[allow(missing_docs)]
pub struct EmptySegments;
#[derive(Clone, Debug, Fail)]
#[fail(display = "Import `{}` was not found in the module.", _0)]
#[allow(missing_docs)]
pub struct ImportNotFound(pub String);
#[derive(Clone, Copy, Debug, Fail)]
#[fail(display = "Import with ID `{}` was not found in the module.", _0)]
#[allow(missing_docs)]
pub struct ImportIdNotFound(pub import::Id);
#[derive(Clone, Copy, Debug, Fail)]
#[fail(display = "Line index is out of bounds.")]
#[allow(missing_docs)]
pub struct LineIndexOutOfBounds;
#[allow(missing_docs)]
#[derive(Fail, Clone, Debug)]
#[fail(display = "Cannot find method with pointer {:?}.", _0)]
pub struct CannotFindMethod(language_server::MethodPointer);
#[allow(missing_docs)]
#[derive(Fail, Clone, Debug)]
#[fail(display = "The definition with crumbs {:?} is not a direct child of the module.", _0)]
pub struct NotDirectChild(ast::Crumbs);
// ==========
// === Id ===
// ==========
/// The segments of module name. Allow finding module in the project.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Id {
/// The last segment being a module name. For project's main module it should be equal
/// to [`PROJECTS_MAIN_MODULE`].
pub name: ImString,
/// The segments of all parent modules, from the top module to the direct parent. Does **not**
/// include project name.
pub parent_modules: Vec<ImString>,
}
impl Id {
/// Create module id from list of segments. The list shall not contain the project name nor
/// namespace. Fails if the list is empty (the module name is required).
pub fn try_from_segments(
segments: impl IntoIterator<Item: Into<ImString>>,
) -> FallibleResult<Self> {
let mut segments = segments.into_iter().map(Into::into).collect_vec();
let name = segments.pop().ok_or(EmptySegments)?;
Ok(Self { name, parent_modules: segments })
}
/// Return the iterator over id's segments.
pub fn segments(&self) -> impl Iterator<Item = &ImString> {
self.parent_modules.iter().chain(iter::once(&self.name))
}
}
impl IntoIterator for Id {
type Item = ImString;
type IntoIter = impl Iterator<Item = Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.parent_modules.into_iter().chain(iter::once(self.name))
}
}
impl From<Id> for NamePath {
fn from(id: Id) -> Self {
id.into_iter().collect()
}
}
impl Display for Id {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.segments().format("."))
}
}
// ============
// === Info ===
// ============
/// Wrapper allowing getting information about the module and updating it.
#[derive(Clone, Debug)]
pub struct Info {
#[allow(missing_docs)]
pub ast: known::Module,
}
impl Info {
/// Generate a name for a definition that can be introduced without side-effects.
///
/// The name shall be generated by appending number to the given base string.
pub fn generate_name(&self, base: &str) -> FallibleResult<Identifier> {
let used_names = self.used_names();
let used_names = used_names.iter().map(|name| name.item.as_str());
identifier::generate_name(base, used_names)
}
/// Identifiers introduced or referred to in the module's scope.
///
/// Introducing identifier not included on this list should have no side-effects on the name
/// resolution in the code in this graph.
pub fn used_names(&self) -> Vec<Located<String>> {
let usage = alias_analysis::analyze_crumbable(self.ast.shape());
usage.all_identifiers()
}
/// Iterate over all lines in module that contain an import declaration.
pub fn enumerate_imports(&self) -> impl Iterator<Item = (ModuleCrumb, import::Info)> + '_ {
let children = self.ast.shape().enumerate();
children.filter_map(|(crumb, ast)| Some((crumb, import::Info::from_ast(ast)?)))
}
/// Iterate over all import declarations in the module.
///
/// If the caller wants to know *where* the declarations are, use `enumerate_imports`.
pub fn iter_imports(&self) -> impl Iterator<Item = import::Info> + '_ {
self.enumerate_imports().map(|(_, import)| import)
}
/// Check if module contains import with given id.
pub fn contains_import(&self, id: import::Id) -> bool {
self.iter_imports().any(|import| import.id() == id)
}
/// Add a new line to the module's block.
///
/// Note that indices are the "module line" indices, which usually are quite different from text
/// API line indices (because nested blocks doesn't count as separate "module lines").
pub fn add_line(&mut self, index: usize, ast: Option<Ast>) {
let line = BlockLine::new(ast);
self.ast.update_shape(|shape| shape.lines.insert(index, line))
}
/// Remove line with given index.
///
/// Returns removed line. Fails if the index is out of bounds.
pub fn remove_line(&mut self, index: usize) -> FallibleResult<BlockLine<Option<Ast>>> {
self.ast.update_shape(|shape| {
shape.lines.try_remove(index).ok_or_else(|| LineIndexOutOfBounds.into())
})
}
/// Remove a line that matches given import description.
///
/// If there is more than one line matching, only the first one will be removed.
/// Fails if there is no import matching given argument.
pub fn remove_import(&mut self, to_remove: &import::Info) -> FallibleResult {
let lookup_result = self.enumerate_imports().find(|(_, import)| import == to_remove);
let (crumb, _) = lookup_result.ok_or_else(|| ImportNotFound(to_remove.to_string()))?;
self.remove_line(crumb.line_index)?;
Ok(())
}
/// Remove a line that matches given import ID.
///
/// If there is more than one line matching, only the first one will be removed.
/// Fails if there is no import matching given argument.
pub fn remove_import_by_id(&mut self, to_remove: import::Id) -> FallibleResult {
let lookup_result = self.enumerate_imports().find(|(_, import)| import.id() == to_remove);
let (crumb, _) = lookup_result.ok_or(ImportIdNotFound(to_remove))?;
self.remove_line(crumb.line_index)?;
Ok(())
}
/// Add a new import declaration to a module.
///
/// This function will try to keep imports in lexicographic order. It returns the index where
/// import was added (index of import - an element on the list returned by `enumerate_imports`).
// TODO [mwu]
// Ideally we should not require parser but should use some sane way of generating AST from
// the `ImportInfo` value.
pub fn add_import(&mut self, parser: &parser::Parser, to_add: import::Info) -> usize {
// Find last import that is not "after" the added one lexicographically.
let previous_import =
self.enumerate_imports().take_while(|(_, import)| &to_add > import).last();
let index_to_place_at = previous_import.map_or(0, |(crumb, _)| crumb.line_index + 1);
let import_ast = parser.parse_line_ast(to_add.to_string()).unwrap();
self.add_line(index_to_place_at, Some(import_ast));
index_to_place_at
}
/// Add a new import declaration to a module.
///
/// For more details the mechanics see [`add_import`] documentation.
pub fn add_import_if_missing(
&mut self,
parser: &parser::Parser,
to_add: import::Info,
) -> Option<usize> {
(!self.contains_import(to_add.id())).then(|| self.add_import(parser, to_add))
}
/// Place the line with given AST in the module's body.
///
/// Unlike `add_line` (which is more low-level) will introduce empty lines around introduced
/// line and describes the added line location in relation to other definitions.
///
/// Typically used to place lines with definitions in the module.
pub fn add_ast(&mut self, ast: Ast, location: Placement) -> FallibleResult {
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum BlankLinePlacement {
Before,
After,
None,
}
let blank_line = match location {
_ if self.ast.lines.is_empty() => BlankLinePlacement::None,
Placement::Begin => BlankLinePlacement::After,
Placement::End => BlankLinePlacement::Before,
Placement::After(_) => BlankLinePlacement::Before,
Placement::Before(_) => BlankLinePlacement::After,
};
let mut index = match location {
Placement::Begin => 0,
Placement::End => self.ast.lines.len(),
Placement::Before(next_def) => locate_line_with(&self.ast, &next_def)?.line_index,
Placement::After(next_def) => locate_line_with(&self.ast, &next_def)?.line_index + 1,
};
let mut add_line = |ast_opt: Option<Ast>| {
self.add_line(index, ast_opt);
index += 1;
};
if blank_line == BlankLinePlacement::Before |
add_line(Some(ast));
if blank_line == BlankLinePlacement::After {
add_line(None);
}
Ok(())
}
/// Add a new method definition to the module.
pub fn add_method(
&mut self,
method: definition::ToAdd,
location: Placement,
parser: &parser::Parser,
) -> FallibleResult {
let no_indent = 0;
let definition_ast = method.ast(no_indent, parser)?;
self.add_ast(definition_ast, location)
}
/// Updates the given definition using the passed invokable.
pub fn update_definition(
&mut self,
id: &definition::Id,
f: impl FnOnce(definition::DefinitionInfo) -> FallibleResult<definition::DefinitionInfo>,
) -> FallibleResult {
let definition = locate(&self.ast, id)?;
let new_definition = f(definition.item)?;
let new_ast = new_definition.ast.into();
self.ast = self.ast.set_traversing(&definition.crumbs, new_ast)?;
Ok(())
}
#[cfg(test)]
pub fn expect_code(&self, expected_code: impl AsRef<str>) {
assert_eq!(self.ast.repr(), expected_code.as_ref());
}
}
impl From<known::Module> for Info {
fn from(ast: known::Module) -> Self {
Info { ast }
}
}
// =================
// === Placement ===
// =================
/// Structure describing where to place something being added to the module.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Placement {
/// Place at the beginning of the module.
Begin,
/// Place at the end of the module.
End,
/// Place after given definition;
Before(definition::Crumb),
/// Place before given definition;
After(definition::Crumb),
}
// =======================
// === ChildDefinition ===
// =======================
/// Represents information about a definition being a direct child of this module, including its
/// location.
///
/// Internally it is `definition::ChildDefinition` with only a single `ModuleCrumb` as location.
#[derive(Clone, Debug, Deref)]
pub struct ChildDefinition(definition::ChildDefinition);
impl ChildDefinition {
fn try_retrieving_crumb(child: &definition::ChildDefinition) -> Option<ModuleCrumb> {
match child.crumbs.as_slice() {
[ast::crumbs::Crumb::Module(crumb)] => Some(*crumb),
_ => None,
}
}
/// Try constructing value from `definition::ChildDefinition`. Fails if it is not a direct child
/// of a module.
pub fn new(child: definition::ChildDefinition) -> Result<Self, NotDirectChild> {
if Self::try_retrieving_crumb(&child).is_some() {
Ok(Self(child))
} else {
Err(NotDirectChild(child.crumbs))
}
}
/// The location of this definition child in the module.
pub fn crumb(&self) -> ModuleCrumb {
// Safe, because our only constructor checks that this works. This is the type's invariant.
Self::try_retrieving_crumb(&self.0).unwrap()
}
}
impl TryFrom<definition::ChildDefinition> for ChildDefinition {
type Error = NotDirectChild;
fn try_from(value: definition::ChildDefinition) -> Result<Self, Self::Error> {
Self::new(value)
}
}
// ========================
// === Module Utilities ===
// ========================
/// Looks up graph in the module.
pub fn get_definition(
ast: &known::Module,
id: &definition::Id,
) -> FallibleResult<definition::DefinitionInfo> {
Ok(locate(ast, id)?.item)
}
/// Locate the line with given definition and return crumb that denotes it.
///
/// Fails if there is no matching definition being a direct child of the module.
pub fn locate_line_with(
ast: &known::Module,
crumb: &definition::Crumb,
) -> FallibleResult<ModuleCrumb> {
locate_child(ast, crumb).map(|child| child.crumb())
}
/// Locate the definition being the module's direct child.
pub fn locate_child(
ast: &known::Module,
crumb: &definition::Crumb,
) -> FallibleResult<ChildDefinition> {
let child = ast.def_iter().find_by_name(crumb)?;
Ok(ChildDefinition::try_from(child)?)
}
/// Traverses the module's definition tree following the given Id crumbs, looking up the definition.
pub fn locate(
ast: &known::Module,
id: &definition::Id,
) -> FallibleResult<definition::ChildDefinition> {
let mut crumbs_iter = id.crumbs.iter();
// Not exactly regular - we need special case for the first crumb as it is not a definition nor
// a children. After this we can go just from one definition to another.
let first_crumb = crumbs_iter.next().ok_or(EmptyDefinitionId)?;
let mut child = ast.def_iter().find_by_name(first_crumb)?;
for crumb in crumbs_iter {
child = definition::resolve_single_name(child, crumb)?;
}
Ok(child)
}
/// Get a definition ID that points to a method matching given pointer.
///
/// The module is assumed to be in the file identified by the `method.file` (for the purpose of
/// desugaring implicit extensions methods for modules).
///
/// The `module_name` parameter is the name of the module that contains `ast`.
pub fn lookup_method(
module_name: &QualifiedName,
ast: &known::Module,
method: &language_server::MethodPointer,
) -> FallibleResult<definition::Id> {
let qualified_typename = QualifiedName::from_text(&method.defined_on_type)?;
let defined_in_this_module = module_name == &qualified_typename;
let method_module_name = QualifiedName::from_text(&method.module)?;
let implicit_extension_allowed = method.defined_on_type == method_module_name.to_string();
for child in ast.def_iter() {
let child_name = &child.name.item;
let name_matches = child_name.name.item == method.name;
let type_matches = match child_name.extended_target.as_slice() {
[] => implicit_extension_allowed || defined_in_this_module,
[typename] => typename.item == qualified_typename.name(),
_ => child_name.explicitly_extends_type(&method.defined_on_type),
};
if name_matches && type_matches {
return Ok(definition::Id::new_single_crumb(child_name.clone()));
}
}
Err(CannotFindMethod(method.clone()).into())
}
/// Get a span in module's text representation where the given definition is located.
pub fn definition_span(
ast: &known::Module,
id: &definition::Id,
) -> FallibleResult<enso_text::Range<Byte>> {
let location = locate(ast, id)?;
ast.range_of_descendant_at(&location.crumbs)
}
impl DefinitionProvider for known::Module {
fn indent(&self) -> usize {
0
}
fn scope_kind(&self) -> definition::ScopeKind {
definition::ScopeKind::Root
}
fn enumerate_asts<'a>(&'a self) -> Box<dyn Iterator<Item = ChildAst<'a>> + 'a> {
self.ast().children()
}
}
// ================
// === MethodId ===
// ================
/// A structure identifying a method.
///
/// It is very similar to MethodPointer from language_server API, however it may point to the method
/// outside the currently opened project.
#[derive(Clone, Debug, serde::Deserialize, Eq, Hash, PartialEq, serde::Serialize)]
#[allow(missing_docs)]
pub struct MethodId {
pub module: QualifiedName,
pub defined_on_type: QualifiedName,
pub name: String,
}
// ============
// === Test ===
// ============
#[cfg(test)]
mod tests {
use super::*;
use crate::definition::DefinitionName;
use engine_protocol::language_server::MethodPointer;
#[test]
fn import_listing() {
let parser = parser::Parser::new();
let expect_imports = |code: &str, expected: &[&[&str]]| {
let ast = parser.parse_module(code, default()).unwrap();
let info = Info { ast };
let imports = info.iter_imports().collect_vec();
assert_eq!(imports.len(), expected.len());
for (import, expected_segments) in imports.iter().zip(expected) {
itertools::assert_equal(import.module.iter(), expected_segments.iter());
}
};
// TODO [mwu] waiting for fix https://github.com/enso-org/enso/issues/1016
// expect_imports("import", &[&[]]);
expect_imports("import Foo", &[&["Foo"]]);
expect_imports("import Foo.Bar", &[&["Foo", "Bar"]]);
expect_imports("foo = bar\nimport Foo.Bar", &[&["Foo", "Bar"]]);
expect_imports("import Foo.Bar\nfoo=bar\nimport Foo.Bar", &[&["Foo", "Bar"], &[
"Foo", "Bar",
]]);
}
#[test]
fn import_adding_and_removing() {
let parser = parser::Parser::new();
let code = "import Foo.Bar.Baz";
let ast = parser.parse_module(code, default()).unwrap();
let mut info = Info { ast };
let import = |code| {
let ast = parser.parse_line_ast(code).unwrap();
import::Info::from_ast(&ast).unwrap()
};
info.add_import(&parser, import("import Bar.Gar"));
info.expect_code("import Bar.Gar\nimport Foo.Bar.Baz");
info.add_import(&parser, import("import Gar.Bar"));
info.expect_code("import Bar.Gar\nimport Foo.Bar.Baz\nimport Gar.Bar");
info.remove_import(&import("import Foo.Bar.Baz")).unwrap();
info.expect_code("import Bar.Gar\nimport Gar.Bar");
info.remove_import(&import("import Foo.Bar.Baz")).unwrap_err();
info.expect_code("import Bar.Gar\nimport Gar.Bar");
info.remove_import(&import("import Gar.Bar")).unwrap();
info.expect_code("import Bar.Gar");
info.remove_import(&import("import Bar.Gar")).unwrap();
info.expect_code("");
info.add_import(&parser, import("import Bar.Gar"));
info.expect_code("import Bar.Gar");
}
#[test]
fn implicit_method_resolution() {
let parser = parser::Parser::new();
let module_name =
QualifiedName::from_all_segments(["local", "ProjectName", "Main"]).unwrap();
let expect_find = |method: &MethodPointer, code, expected: &definition::Id| {
let module = parser.parse_module(code, default()).unwrap();
let result = lookup_method(&module_name, &module, method);
assert_eq!(result.unwrap().to_string(), expected.to_string());
// TODO [mwu]
// We should be able to use `assert_eq!(result.unwrap(),expected);`
// But we can't, because definition::Id uses located fields and crumbs won't match.
// Eventually we'll likely need to split definition names into located and unlocated
// ones. Definition ID should not require any location info.
};
let expect_not_found = |method: &MethodPointer, code| {
let module = parser.parse_module(code, default()).unwrap();
lookup_method(&module_name, &module, method).expect_err("expected method not found");
};
// === Lookup the Main (local module type) extension method ===
let ptr = MethodPointer {
defined_on_type: "local.ProjectName.Main".into(),
module: "local.ProjectName.Main".into(),
name: "foo".into(),
};
// Implicit module extension method.
let id = definition::Id::new_plain_name("foo");
expect_find(&ptr, "foo a b = a + b", &id);
// Explicit module extension method.
let id = definition::Id::new_single_crumb(DefinitionName::new_method("Main", "foo"));
expect_find(&ptr, "Main.foo a b = a + b", &id);
// Matching name but extending wrong type.
expect_not_found(&ptr, "Number.foo a b = a + b");
// Mismatched name.
expect_not_found(&ptr, "bar a b = a + b");
// === Lookup the Int (non-local type) extension method ===
let ptr = MethodPointer {
defined_on_type: "std.Base.Main.Number".into(),
module: "local.ProjectName.Main".into(),
name: "foo".into(),
};
expect_not_found(&ptr, "foo a b = a + b");
let id = definition::Id::new_single_crumb(DefinitionName::new_method("Number", "foo"));
expect_find(&ptr, "Number.foo a b = a + b", &id);
expect_not_found(&ptr, "Text.foo a b = a + b");
expect_not_found(&ptr, "bar a b = a + b");
}
#[test]
fn test_definition_location() {
let code = r"
some def =
first line
second line
other def =
first line
second line
nested def =
nested body
last line of other def
last def = inline expression";
let parser = parser::Parser::new();
let module = parser.parse_module(code, default()).unwrap();
let module = Info { ast: module };
let id = definition::Id::new_plain_name("other");
let span = definition_span(&module.ast, &id).unwrap();
assert!(code[span].ends_with("last line of other def"));
let id = definition::Id::new_plain_name("last");
let span = definition_span(&module.ast, &id).unwrap();
assert!(code[span].ends_with("inline expression"));
let id = definition::Id::new_plain_names(["other", "nested"]);
let span = definition_span(&module.ast, &id).unwrap();
assert!(code[span].ends_with("nested body"));
}
#[test]
fn add_method() {
let parser = parser::Parser::new();
let module = r#"Main.method1 arg = body
main = Main.method1 10"#;
let module = Info::from(parser.parse_module(module, default()).unwrap());
let method1_id = DefinitionName::new_method("Main", "method1");
let main_id = DefinitionName::new_plain("main");
let to_add = definition::ToAdd {
name: DefinitionName::new_method("Main", "add"),
explicit_parameter_names: vec!["arg1".into(), "arg2".into()],
body_head: Ast::infix_var("arg1", "+", "arg2"),
body_tail: default(),
};
let repr_after_insertion = |location| {
let mut module = module.clone();
module.add_method(to_add.clone(), location, &parser).unwrap();
module.ast.repr()
};
let expected = r#"Main.add arg1 arg2 = arg1 + arg2
Main.method1 arg = body
main = Main.method1 10"#;
assert_eq!(repr_after_insertion(Placement::Begin), expected);
let expected = r#"Main.method1 arg = body
main = Main.method1 10
Main.add arg1 arg2 = arg1 + arg2"#;
assert_eq!(repr_after_insertion(Placement::End), expected);
let expected = r#"Main.method1 arg = body
Main.add arg1 arg2 = arg1 + arg2
main = Main.method1 10"#;
assert_eq!(repr_after_insertion(Placement::After(method1_id.clone())), expected);
assert_eq!(
repr_after_insertion(Placement::Before(method1_id.clone())),
repr_after_insertion(Placement::Begin)
);
assert_eq!(
repr_after_insertion(Placement::After(method1_id)),
repr_after_insertion(Placement::Before(main_id.clone()))
);
assert_eq!(
repr_after_insertion(Placement::After(main_id)),
repr_after_insertion(Placement::End)
);
// TODO [mwu]
// This test doesn't include multi-lines functions, as the result may seem somewhat
// unexpected due to the way that parser assigns blank lines to the former block
// rather than module. If anyone will care, we might revisit this after the parser
// 2.0 rewrite.
}
}
| {
add_line(None);
} | conditional_block |
module.rs | //! Code for module-level double representation processing.
use crate::prelude::*;
use enso_text::index::*;
use crate::alias_analysis;
use crate::definition;
use crate::definition::DefinitionProvider;
use crate::definition::EmptyDefinitionId;
use crate::identifier;
use crate::identifier::Identifier;
use crate::import;
use crate::name::NamePath;
use crate::name::QualifiedName;
use ast::crumbs::ChildAst;
use ast::crumbs::Located;
use ast::crumbs::ModuleCrumb;
use ast::known;
use ast::BlockLine;
use engine_protocol::language_server;
use std::fmt::Formatter;
// ==============
// === Errors ===
// ==============
#[derive(Copy, Clone, Debug, Fail)]
#[fail(display = "Id segment list is empty.")]
#[allow(missing_docs)]
pub struct EmptySegments;
#[derive(Clone, Debug, Fail)]
#[fail(display = "Import `{}` was not found in the module.", _0)]
#[allow(missing_docs)]
pub struct ImportNotFound(pub String);
#[derive(Clone, Copy, Debug, Fail)]
#[fail(display = "Import with ID `{}` was not found in the module.", _0)]
#[allow(missing_docs)]
pub struct ImportIdNotFound(pub import::Id);
#[derive(Clone, Copy, Debug, Fail)]
#[fail(display = "Line index is out of bounds.")]
#[allow(missing_docs)]
pub struct LineIndexOutOfBounds;
#[allow(missing_docs)]
#[derive(Fail, Clone, Debug)]
#[fail(display = "Cannot find method with pointer {:?}.", _0)]
pub struct CannotFindMethod(language_server::MethodPointer);
#[allow(missing_docs)]
#[derive(Fail, Clone, Debug)]
#[fail(display = "The definition with crumbs {:?} is not a direct child of the module.", _0)]
pub struct NotDirectChild(ast::Crumbs);
// ==========
// === Id ===
// ==========
/// The segments of module name. Allow finding module in the project.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Id {
/// The last segment being a module name. For project's main module it should be equal
/// to [`PROJECTS_MAIN_MODULE`].
pub name: ImString,
/// The segments of all parent modules, from the top module to the direct parent. Does **not**
/// include project name.
pub parent_modules: Vec<ImString>,
}
impl Id {
/// Create module id from list of segments. The list shall not contain the project name nor
/// namespace. Fails if the list is empty (the module name is required).
pub fn try_from_segments(
segments: impl IntoIterator<Item: Into<ImString>>,
) -> FallibleResult<Self> {
let mut segments = segments.into_iter().map(Into::into).collect_vec();
let name = segments.pop().ok_or(EmptySegments)?;
Ok(Self { name, parent_modules: segments })
}
/// Return the iterator over id's segments.
pub fn segments(&self) -> impl Iterator<Item = &ImString> {
self.parent_modules.iter().chain(iter::once(&self.name))
}
}
impl IntoIterator for Id {
type Item = ImString;
type IntoIter = impl Iterator<Item = Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.parent_modules.into_iter().chain(iter::once(self.name))
}
}
impl From<Id> for NamePath {
fn from(id: Id) -> Self {
id.into_iter().collect()
}
}
impl Display for Id {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.segments().format("."))
}
}
// ============
// === Info ===
// ============
/// Wrapper allowing getting information about the module and updating it.
#[derive(Clone, Debug)]
pub struct Info {
#[allow(missing_docs)]
pub ast: known::Module,
}
impl Info {
/// Generate a name for a definition that can be introduced without side-effects.
///
/// The name shall be generated by appending number to the given base string.
pub fn generate_name(&self, base: &str) -> FallibleResult<Identifier> {
let used_names = self.used_names();
let used_names = used_names.iter().map(|name| name.item.as_str());
identifier::generate_name(base, used_names)
}
/// Identifiers introduced or referred to in the module's scope.
///
/// Introducing identifier not included on this list should have no side-effects on the name
/// resolution in the code in this graph.
pub fn used_names(&self) -> Vec<Located<String>> {
let usage = alias_analysis::analyze_crumbable(self.ast.shape());
usage.all_identifiers()
}
/// Iterate over all lines in module that contain an import declaration.
pub fn enumerate_imports(&self) -> impl Iterator<Item = (ModuleCrumb, import::Info)> + '_ {
let children = self.ast.shape().enumerate();
children.filter_map(|(crumb, ast)| Some((crumb, import::Info::from_ast(ast)?)))
}
/// Iterate over all import declarations in the module.
///
/// If the caller wants to know *where* the declarations are, use `enumerate_imports`.
pub fn iter_imports(&self) -> impl Iterator<Item = import::Info> + '_ {
self.enumerate_imports().map(|(_, import)| import)
}
/// Check if module contains import with given id.
pub fn contains_import(&self, id: import::Id) -> bool {
self.iter_imports().any(|import| import.id() == id)
}
/// Add a new line to the module's block.
///
/// Note that indices are the "module line" indices, which usually are quite different from text
/// API line indices (because nested blocks doesn't count as separate "module lines").
pub fn add_line(&mut self, index: usize, ast: Option<Ast>) {
let line = BlockLine::new(ast);
self.ast.update_shape(|shape| shape.lines.insert(index, line))
}
/// Remove line with given index.
///
/// Returns removed line. Fails if the index is out of bounds.
pub fn remove_line(&mut self, index: usize) -> FallibleResult<BlockLine<Option<Ast>>> {
self.ast.update_shape(|shape| {
shape.lines.try_remove(index).ok_or_else(|| LineIndexOutOfBounds.into())
})
}
/// Remove a line that matches given import description.
///
/// If there is more than one line matching, only the first one will be removed.
/// Fails if there is no import matching given argument.
pub fn remove_import(&mut self, to_remove: &import::Info) -> FallibleResult {
let lookup_result = self.enumerate_imports().find(|(_, import)| import == to_remove);
let (crumb, _) = lookup_result.ok_or_else(|| ImportNotFound(to_remove.to_string()))?;
self.remove_line(crumb.line_index)?;
Ok(())
}
/// Remove a line that matches given import ID.
///
/// If there is more than one line matching, only the first one will be removed.
/// Fails if there is no import matching given argument.
pub fn remove_import_by_id(&mut self, to_remove: import::Id) -> FallibleResult {
let lookup_result = self.enumerate_imports().find(|(_, import)| import.id() == to_remove);
let (crumb, _) = lookup_result.ok_or(ImportIdNotFound(to_remove))?;
self.remove_line(crumb.line_index)?;
Ok(())
}
/// Add a new import declaration to a module.
///
/// This function will try to keep imports in lexicographic order. It returns the index where
/// import was added (index of import - an element on the list returned by `enumerate_imports`).
// TODO [mwu]
// Ideally we should not require parser but should use some sane way of generating AST from
// the `ImportInfo` value.
pub fn add_import(&mut self, parser: &parser::Parser, to_add: import::Info) -> usize {
// Find last import that is not "after" the added one lexicographically.
let previous_import =
self.enumerate_imports().take_while(|(_, import)| &to_add > import).last();
let index_to_place_at = previous_import.map_or(0, |(crumb, _)| crumb.line_index + 1);
let import_ast = parser.parse_line_ast(to_add.to_string()).unwrap();
self.add_line(index_to_place_at, Some(import_ast));
index_to_place_at
}
/// Add a new import declaration to a module.
///
/// For more details the mechanics see [`add_import`] documentation.
pub fn | (
&mut self,
parser: &parser::Parser,
to_add: import::Info,
) -> Option<usize> {
(!self.contains_import(to_add.id())).then(|| self.add_import(parser, to_add))
}
/// Place the line with given AST in the module's body.
///
/// Unlike `add_line` (which is more low-level) will introduce empty lines around introduced
/// line and describes the added line location in relation to other definitions.
///
/// Typically used to place lines with definitions in the module.
pub fn add_ast(&mut self, ast: Ast, location: Placement) -> FallibleResult {
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum BlankLinePlacement {
Before,
After,
None,
}
let blank_line = match location {
_ if self.ast.lines.is_empty() => BlankLinePlacement::None,
Placement::Begin => BlankLinePlacement::After,
Placement::End => BlankLinePlacement::Before,
Placement::After(_) => BlankLinePlacement::Before,
Placement::Before(_) => BlankLinePlacement::After,
};
let mut index = match location {
Placement::Begin => 0,
Placement::End => self.ast.lines.len(),
Placement::Before(next_def) => locate_line_with(&self.ast, &next_def)?.line_index,
Placement::After(next_def) => locate_line_with(&self.ast, &next_def)?.line_index + 1,
};
let mut add_line = |ast_opt: Option<Ast>| {
self.add_line(index, ast_opt);
index += 1;
};
if blank_line == BlankLinePlacement::Before {
add_line(None);
}
add_line(Some(ast));
if blank_line == BlankLinePlacement::After {
add_line(None);
}
Ok(())
}
/// Add a new method definition to the module.
pub fn add_method(
&mut self,
method: definition::ToAdd,
location: Placement,
parser: &parser::Parser,
) -> FallibleResult {
let no_indent = 0;
let definition_ast = method.ast(no_indent, parser)?;
self.add_ast(definition_ast, location)
}
/// Updates the given definition using the passed invokable.
pub fn update_definition(
&mut self,
id: &definition::Id,
f: impl FnOnce(definition::DefinitionInfo) -> FallibleResult<definition::DefinitionInfo>,
) -> FallibleResult {
let definition = locate(&self.ast, id)?;
let new_definition = f(definition.item)?;
let new_ast = new_definition.ast.into();
self.ast = self.ast.set_traversing(&definition.crumbs, new_ast)?;
Ok(())
}
#[cfg(test)]
pub fn expect_code(&self, expected_code: impl AsRef<str>) {
assert_eq!(self.ast.repr(), expected_code.as_ref());
}
}
impl From<known::Module> for Info {
fn from(ast: known::Module) -> Self {
Info { ast }
}
}
// =================
// === Placement ===
// =================
/// Structure describing where to place something being added to the module.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Placement {
/// Place at the beginning of the module.
Begin,
/// Place at the end of the module.
End,
/// Place after given definition;
Before(definition::Crumb),
/// Place before given definition;
After(definition::Crumb),
}
// =======================
// === ChildDefinition ===
// =======================
/// Represents information about a definition being a direct child of this module, including its
/// location.
///
/// Internally it is `definition::ChildDefinition` with only a single `ModuleCrumb` as location.
#[derive(Clone, Debug, Deref)]
pub struct ChildDefinition(definition::ChildDefinition);
impl ChildDefinition {
fn try_retrieving_crumb(child: &definition::ChildDefinition) -> Option<ModuleCrumb> {
match child.crumbs.as_slice() {
[ast::crumbs::Crumb::Module(crumb)] => Some(*crumb),
_ => None,
}
}
/// Try constructing value from `definition::ChildDefinition`. Fails if it is not a direct child
/// of a module.
pub fn new(child: definition::ChildDefinition) -> Result<Self, NotDirectChild> {
if Self::try_retrieving_crumb(&child).is_some() {
Ok(Self(child))
} else {
Err(NotDirectChild(child.crumbs))
}
}
/// The location of this definition child in the module.
pub fn crumb(&self) -> ModuleCrumb {
// Safe, because our only constructor checks that this works. This is the type's invariant.
Self::try_retrieving_crumb(&self.0).unwrap()
}
}
impl TryFrom<definition::ChildDefinition> for ChildDefinition {
type Error = NotDirectChild;
fn try_from(value: definition::ChildDefinition) -> Result<Self, Self::Error> {
Self::new(value)
}
}
// ========================
// === Module Utilities ===
// ========================
/// Looks up graph in the module.
pub fn get_definition(
ast: &known::Module,
id: &definition::Id,
) -> FallibleResult<definition::DefinitionInfo> {
Ok(locate(ast, id)?.item)
}
/// Locate the line with given definition and return crumb that denotes it.
///
/// Fails if there is no matching definition being a direct child of the module.
pub fn locate_line_with(
ast: &known::Module,
crumb: &definition::Crumb,
) -> FallibleResult<ModuleCrumb> {
locate_child(ast, crumb).map(|child| child.crumb())
}
/// Locate the definition being the module's direct child.
pub fn locate_child(
ast: &known::Module,
crumb: &definition::Crumb,
) -> FallibleResult<ChildDefinition> {
let child = ast.def_iter().find_by_name(crumb)?;
Ok(ChildDefinition::try_from(child)?)
}
/// Traverses the module's definition tree following the given Id crumbs, looking up the definition.
pub fn locate(
ast: &known::Module,
id: &definition::Id,
) -> FallibleResult<definition::ChildDefinition> {
let mut crumbs_iter = id.crumbs.iter();
// Not exactly regular - we need special case for the first crumb as it is not a definition nor
// a children. After this we can go just from one definition to another.
let first_crumb = crumbs_iter.next().ok_or(EmptyDefinitionId)?;
let mut child = ast.def_iter().find_by_name(first_crumb)?;
for crumb in crumbs_iter {
child = definition::resolve_single_name(child, crumb)?;
}
Ok(child)
}
/// Get a definition ID that points to a method matching given pointer.
///
/// The module is assumed to be in the file identified by the `method.file` (for the purpose of
/// desugaring implicit extensions methods for modules).
///
/// The `module_name` parameter is the name of the module that contains `ast`.
pub fn lookup_method(
module_name: &QualifiedName,
ast: &known::Module,
method: &language_server::MethodPointer,
) -> FallibleResult<definition::Id> {
let qualified_typename = QualifiedName::from_text(&method.defined_on_type)?;
let defined_in_this_module = module_name == &qualified_typename;
let method_module_name = QualifiedName::from_text(&method.module)?;
let implicit_extension_allowed = method.defined_on_type == method_module_name.to_string();
for child in ast.def_iter() {
let child_name = &child.name.item;
let name_matches = child_name.name.item == method.name;
let type_matches = match child_name.extended_target.as_slice() {
[] => implicit_extension_allowed || defined_in_this_module,
[typename] => typename.item == qualified_typename.name(),
_ => child_name.explicitly_extends_type(&method.defined_on_type),
};
if name_matches && type_matches {
return Ok(definition::Id::new_single_crumb(child_name.clone()));
}
}
Err(CannotFindMethod(method.clone()).into())
}
/// Get a span in module's text representation where the given definition is located.
pub fn definition_span(
ast: &known::Module,
id: &definition::Id,
) -> FallibleResult<enso_text::Range<Byte>> {
let location = locate(ast, id)?;
ast.range_of_descendant_at(&location.crumbs)
}
impl DefinitionProvider for known::Module {
fn indent(&self) -> usize {
0
}
fn scope_kind(&self) -> definition::ScopeKind {
definition::ScopeKind::Root
}
fn enumerate_asts<'a>(&'a self) -> Box<dyn Iterator<Item = ChildAst<'a>> + 'a> {
self.ast().children()
}
}
// ================
// === MethodId ===
// ================
/// A structure identifying a method.
///
/// It is very similar to MethodPointer from language_server API, however it may point to the method
/// outside the currently opened project.
#[derive(Clone, Debug, serde::Deserialize, Eq, Hash, PartialEq, serde::Serialize)]
#[allow(missing_docs)]
pub struct MethodId {
pub module: QualifiedName,
pub defined_on_type: QualifiedName,
pub name: String,
}
// ============
// === Test ===
// ============
#[cfg(test)]
mod tests {
use super::*;
use crate::definition::DefinitionName;
use engine_protocol::language_server::MethodPointer;
#[test]
fn import_listing() {
let parser = parser::Parser::new();
let expect_imports = |code: &str, expected: &[&[&str]]| {
let ast = parser.parse_module(code, default()).unwrap();
let info = Info { ast };
let imports = info.iter_imports().collect_vec();
assert_eq!(imports.len(), expected.len());
for (import, expected_segments) in imports.iter().zip(expected) {
itertools::assert_equal(import.module.iter(), expected_segments.iter());
}
};
// TODO [mwu] waiting for fix https://github.com/enso-org/enso/issues/1016
// expect_imports("import", &[&[]]);
expect_imports("import Foo", &[&["Foo"]]);
expect_imports("import Foo.Bar", &[&["Foo", "Bar"]]);
expect_imports("foo = bar\nimport Foo.Bar", &[&["Foo", "Bar"]]);
expect_imports("import Foo.Bar\nfoo=bar\nimport Foo.Bar", &[&["Foo", "Bar"], &[
"Foo", "Bar",
]]);
}
#[test]
fn import_adding_and_removing() {
let parser = parser::Parser::new();
let code = "import Foo.Bar.Baz";
let ast = parser.parse_module(code, default()).unwrap();
let mut info = Info { ast };
let import = |code| {
let ast = parser.parse_line_ast(code).unwrap();
import::Info::from_ast(&ast).unwrap()
};
info.add_import(&parser, import("import Bar.Gar"));
info.expect_code("import Bar.Gar\nimport Foo.Bar.Baz");
info.add_import(&parser, import("import Gar.Bar"));
info.expect_code("import Bar.Gar\nimport Foo.Bar.Baz\nimport Gar.Bar");
info.remove_import(&import("import Foo.Bar.Baz")).unwrap();
info.expect_code("import Bar.Gar\nimport Gar.Bar");
info.remove_import(&import("import Foo.Bar.Baz")).unwrap_err();
info.expect_code("import Bar.Gar\nimport Gar.Bar");
info.remove_import(&import("import Gar.Bar")).unwrap();
info.expect_code("import Bar.Gar");
info.remove_import(&import("import Bar.Gar")).unwrap();
info.expect_code("");
info.add_import(&parser, import("import Bar.Gar"));
info.expect_code("import Bar.Gar");
}
#[test]
fn implicit_method_resolution() {
let parser = parser::Parser::new();
let module_name =
QualifiedName::from_all_segments(["local", "ProjectName", "Main"]).unwrap();
let expect_find = |method: &MethodPointer, code, expected: &definition::Id| {
let module = parser.parse_module(code, default()).unwrap();
let result = lookup_method(&module_name, &module, method);
assert_eq!(result.unwrap().to_string(), expected.to_string());
// TODO [mwu]
// We should be able to use `assert_eq!(result.unwrap(),expected);`
// But we can't, because definition::Id uses located fields and crumbs won't match.
// Eventually we'll likely need to split definition names into located and unlocated
// ones. Definition ID should not require any location info.
};
let expect_not_found = |method: &MethodPointer, code| {
let module = parser.parse_module(code, default()).unwrap();
lookup_method(&module_name, &module, method).expect_err("expected method not found");
};
// === Lookup the Main (local module type) extension method ===
let ptr = MethodPointer {
defined_on_type: "local.ProjectName.Main".into(),
module: "local.ProjectName.Main".into(),
name: "foo".into(),
};
// Implicit module extension method.
let id = definition::Id::new_plain_name("foo");
expect_find(&ptr, "foo a b = a + b", &id);
// Explicit module extension method.
let id = definition::Id::new_single_crumb(DefinitionName::new_method("Main", "foo"));
expect_find(&ptr, "Main.foo a b = a + b", &id);
// Matching name but extending wrong type.
expect_not_found(&ptr, "Number.foo a b = a + b");
// Mismatched name.
expect_not_found(&ptr, "bar a b = a + b");
// === Lookup the Int (non-local type) extension method ===
let ptr = MethodPointer {
defined_on_type: "std.Base.Main.Number".into(),
module: "local.ProjectName.Main".into(),
name: "foo".into(),
};
expect_not_found(&ptr, "foo a b = a + b");
let id = definition::Id::new_single_crumb(DefinitionName::new_method("Number", "foo"));
expect_find(&ptr, "Number.foo a b = a + b", &id);
expect_not_found(&ptr, "Text.foo a b = a + b");
expect_not_found(&ptr, "bar a b = a + b");
}
#[test]
fn test_definition_location() {
let code = r"
some def =
first line
second line
other def =
first line
second line
nested def =
nested body
last line of other def
last def = inline expression";
let parser = parser::Parser::new();
let module = parser.parse_module(code, default()).unwrap();
let module = Info { ast: module };
let id = definition::Id::new_plain_name("other");
let span = definition_span(&module.ast, &id).unwrap();
assert!(code[span].ends_with("last line of other def"));
let id = definition::Id::new_plain_name("last");
let span = definition_span(&module.ast, &id).unwrap();
assert!(code[span].ends_with("inline expression"));
let id = definition::Id::new_plain_names(["other", "nested"]);
let span = definition_span(&module.ast, &id).unwrap();
assert!(code[span].ends_with("nested body"));
}
#[test]
fn add_method() {
let parser = parser::Parser::new();
let module = r#"Main.method1 arg = body
main = Main.method1 10"#;
let module = Info::from(parser.parse_module(module, default()).unwrap());
let method1_id = DefinitionName::new_method("Main", "method1");
let main_id = DefinitionName::new_plain("main");
let to_add = definition::ToAdd {
name: DefinitionName::new_method("Main", "add"),
explicit_parameter_names: vec!["arg1".into(), "arg2".into()],
body_head: Ast::infix_var("arg1", "+", "arg2"),
body_tail: default(),
};
let repr_after_insertion = |location| {
let mut module = module.clone();
module.add_method(to_add.clone(), location, &parser).unwrap();
module.ast.repr()
};
let expected = r#"Main.add arg1 arg2 = arg1 + arg2
Main.method1 arg = body
main = Main.method1 10"#;
assert_eq!(repr_after_insertion(Placement::Begin), expected);
let expected = r#"Main.method1 arg = body
main = Main.method1 10
Main.add arg1 arg2 = arg1 + arg2"#;
assert_eq!(repr_after_insertion(Placement::End), expected);
let expected = r#"Main.method1 arg = body
Main.add arg1 arg2 = arg1 + arg2
main = Main.method1 10"#;
assert_eq!(repr_after_insertion(Placement::After(method1_id.clone())), expected);
assert_eq!(
repr_after_insertion(Placement::Before(method1_id.clone())),
repr_after_insertion(Placement::Begin)
);
assert_eq!(
repr_after_insertion(Placement::After(method1_id)),
repr_after_insertion(Placement::Before(main_id.clone()))
);
assert_eq!(
repr_after_insertion(Placement::After(main_id)),
repr_after_insertion(Placement::End)
);
// TODO [mwu]
// This test doesn't include multi-lines functions, as the result may seem somewhat
// unexpected due to the way that parser assigns blank lines to the former block
// rather than module. If anyone will care, we might revisit this after the parser
// 2.0 rewrite.
}
}
| add_import_if_missing | identifier_name |
module.rs | //! Code for module-level double representation processing.
use crate::prelude::*;
use enso_text::index::*;
use crate::alias_analysis;
use crate::definition;
use crate::definition::DefinitionProvider;
use crate::definition::EmptyDefinitionId;
use crate::identifier;
use crate::identifier::Identifier;
use crate::import;
use crate::name::NamePath;
use crate::name::QualifiedName;
use ast::crumbs::ChildAst;
use ast::crumbs::Located;
use ast::crumbs::ModuleCrumb;
use ast::known;
use ast::BlockLine;
use engine_protocol::language_server;
use std::fmt::Formatter;
// ==============
// === Errors ===
// ==============
#[derive(Copy, Clone, Debug, Fail)]
#[fail(display = "Id segment list is empty.")]
#[allow(missing_docs)]
pub struct EmptySegments;
#[derive(Clone, Debug, Fail)]
#[fail(display = "Import `{}` was not found in the module.", _0)]
#[allow(missing_docs)]
pub struct ImportNotFound(pub String);
#[derive(Clone, Copy, Debug, Fail)]
#[fail(display = "Import with ID `{}` was not found in the module.", _0)]
#[allow(missing_docs)]
pub struct ImportIdNotFound(pub import::Id);
#[derive(Clone, Copy, Debug, Fail)]
#[fail(display = "Line index is out of bounds.")]
#[allow(missing_docs)]
pub struct LineIndexOutOfBounds;
#[allow(missing_docs)]
#[derive(Fail, Clone, Debug)]
#[fail(display = "Cannot find method with pointer {:?}.", _0)]
pub struct CannotFindMethod(language_server::MethodPointer);
#[allow(missing_docs)]
#[derive(Fail, Clone, Debug)]
#[fail(display = "The definition with crumbs {:?} is not a direct child of the module.", _0)]
pub struct NotDirectChild(ast::Crumbs);
// ==========
// === Id ===
// ==========
/// The segments of module name. Allow finding module in the project.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] | /// The segments of all parent modules, from the top module to the direct parent. Does **not**
/// include project name.
pub parent_modules: Vec<ImString>,
}
impl Id {
/// Create module id from list of segments. The list shall not contain the project name nor
/// namespace. Fails if the list is empty (the module name is required).
pub fn try_from_segments(
segments: impl IntoIterator<Item: Into<ImString>>,
) -> FallibleResult<Self> {
let mut segments = segments.into_iter().map(Into::into).collect_vec();
let name = segments.pop().ok_or(EmptySegments)?;
Ok(Self { name, parent_modules: segments })
}
/// Return the iterator over id's segments.
pub fn segments(&self) -> impl Iterator<Item = &ImString> {
self.parent_modules.iter().chain(iter::once(&self.name))
}
}
impl IntoIterator for Id {
type Item = ImString;
type IntoIter = impl Iterator<Item = Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.parent_modules.into_iter().chain(iter::once(self.name))
}
}
impl From<Id> for NamePath {
fn from(id: Id) -> Self {
id.into_iter().collect()
}
}
impl Display for Id {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.segments().format("."))
}
}
// ============
// === Info ===
// ============
/// Wrapper allowing getting information about the module and updating it.
#[derive(Clone, Debug)]
pub struct Info {
#[allow(missing_docs)]
pub ast: known::Module,
}
impl Info {
/// Generate a name for a definition that can be introduced without side-effects.
///
/// The name shall be generated by appending number to the given base string.
pub fn generate_name(&self, base: &str) -> FallibleResult<Identifier> {
let used_names = self.used_names();
let used_names = used_names.iter().map(|name| name.item.as_str());
identifier::generate_name(base, used_names)
}
/// Identifiers introduced or referred to in the module's scope.
///
/// Introducing identifier not included on this list should have no side-effects on the name
/// resolution in the code in this graph.
pub fn used_names(&self) -> Vec<Located<String>> {
let usage = alias_analysis::analyze_crumbable(self.ast.shape());
usage.all_identifiers()
}
/// Iterate over all lines in module that contain an import declaration.
pub fn enumerate_imports(&self) -> impl Iterator<Item = (ModuleCrumb, import::Info)> + '_ {
let children = self.ast.shape().enumerate();
children.filter_map(|(crumb, ast)| Some((crumb, import::Info::from_ast(ast)?)))
}
/// Iterate over all import declarations in the module.
///
/// If the caller wants to know *where* the declarations are, use `enumerate_imports`.
pub fn iter_imports(&self) -> impl Iterator<Item = import::Info> + '_ {
self.enumerate_imports().map(|(_, import)| import)
}
/// Check if module contains import with given id.
pub fn contains_import(&self, id: import::Id) -> bool {
self.iter_imports().any(|import| import.id() == id)
}
/// Add a new line to the module's block.
///
/// Note that indices are the "module line" indices, which usually are quite different from text
/// API line indices (because nested blocks doesn't count as separate "module lines").
pub fn add_line(&mut self, index: usize, ast: Option<Ast>) {
let line = BlockLine::new(ast);
self.ast.update_shape(|shape| shape.lines.insert(index, line))
}
/// Remove line with given index.
///
/// Returns removed line. Fails if the index is out of bounds.
pub fn remove_line(&mut self, index: usize) -> FallibleResult<BlockLine<Option<Ast>>> {
self.ast.update_shape(|shape| {
shape.lines.try_remove(index).ok_or_else(|| LineIndexOutOfBounds.into())
})
}
/// Remove a line that matches given import description.
///
/// If there is more than one line matching, only the first one will be removed.
/// Fails if there is no import matching given argument.
pub fn remove_import(&mut self, to_remove: &import::Info) -> FallibleResult {
let lookup_result = self.enumerate_imports().find(|(_, import)| import == to_remove);
let (crumb, _) = lookup_result.ok_or_else(|| ImportNotFound(to_remove.to_string()))?;
self.remove_line(crumb.line_index)?;
Ok(())
}
/// Remove a line that matches given import ID.
///
/// If there is more than one line matching, only the first one will be removed.
/// Fails if there is no import matching given argument.
pub fn remove_import_by_id(&mut self, to_remove: import::Id) -> FallibleResult {
let lookup_result = self.enumerate_imports().find(|(_, import)| import.id() == to_remove);
let (crumb, _) = lookup_result.ok_or(ImportIdNotFound(to_remove))?;
self.remove_line(crumb.line_index)?;
Ok(())
}
/// Add a new import declaration to a module.
///
/// This function will try to keep imports in lexicographic order. It returns the index where
/// import was added (index of import - an element on the list returned by `enumerate_imports`).
// TODO [mwu]
// Ideally we should not require parser but should use some sane way of generating AST from
// the `ImportInfo` value.
pub fn add_import(&mut self, parser: &parser::Parser, to_add: import::Info) -> usize {
// Find last import that is not "after" the added one lexicographically.
let previous_import =
self.enumerate_imports().take_while(|(_, import)| &to_add > import).last();
let index_to_place_at = previous_import.map_or(0, |(crumb, _)| crumb.line_index + 1);
let import_ast = parser.parse_line_ast(to_add.to_string()).unwrap();
self.add_line(index_to_place_at, Some(import_ast));
index_to_place_at
}
/// Add a new import declaration to a module.
///
/// For more details the mechanics see [`add_import`] documentation.
pub fn add_import_if_missing(
&mut self,
parser: &parser::Parser,
to_add: import::Info,
) -> Option<usize> {
(!self.contains_import(to_add.id())).then(|| self.add_import(parser, to_add))
}
/// Place the line with given AST in the module's body.
///
/// Unlike `add_line` (which is more low-level) will introduce empty lines around introduced
/// line and describes the added line location in relation to other definitions.
///
/// Typically used to place lines with definitions in the module.
pub fn add_ast(&mut self, ast: Ast, location: Placement) -> FallibleResult {
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum BlankLinePlacement {
Before,
After,
None,
}
let blank_line = match location {
_ if self.ast.lines.is_empty() => BlankLinePlacement::None,
Placement::Begin => BlankLinePlacement::After,
Placement::End => BlankLinePlacement::Before,
Placement::After(_) => BlankLinePlacement::Before,
Placement::Before(_) => BlankLinePlacement::After,
};
let mut index = match location {
Placement::Begin => 0,
Placement::End => self.ast.lines.len(),
Placement::Before(next_def) => locate_line_with(&self.ast, &next_def)?.line_index,
Placement::After(next_def) => locate_line_with(&self.ast, &next_def)?.line_index + 1,
};
let mut add_line = |ast_opt: Option<Ast>| {
self.add_line(index, ast_opt);
index += 1;
};
if blank_line == BlankLinePlacement::Before {
add_line(None);
}
add_line(Some(ast));
if blank_line == BlankLinePlacement::After {
add_line(None);
}
Ok(())
}
/// Add a new method definition to the module.
pub fn add_method(
&mut self,
method: definition::ToAdd,
location: Placement,
parser: &parser::Parser,
) -> FallibleResult {
let no_indent = 0;
let definition_ast = method.ast(no_indent, parser)?;
self.add_ast(definition_ast, location)
}
/// Updates the given definition using the passed invokable.
pub fn update_definition(
&mut self,
id: &definition::Id,
f: impl FnOnce(definition::DefinitionInfo) -> FallibleResult<definition::DefinitionInfo>,
) -> FallibleResult {
let definition = locate(&self.ast, id)?;
let new_definition = f(definition.item)?;
let new_ast = new_definition.ast.into();
self.ast = self.ast.set_traversing(&definition.crumbs, new_ast)?;
Ok(())
}
#[cfg(test)]
pub fn expect_code(&self, expected_code: impl AsRef<str>) {
assert_eq!(self.ast.repr(), expected_code.as_ref());
}
}
impl From<known::Module> for Info {
fn from(ast: known::Module) -> Self {
Info { ast }
}
}
// =================
// === Placement ===
// =================
/// Structure describing where to place something being added to the module.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Placement {
/// Place at the beginning of the module.
Begin,
/// Place at the end of the module.
End,
/// Place after given definition;
Before(definition::Crumb),
/// Place before given definition;
After(definition::Crumb),
}
// =======================
// === ChildDefinition ===
// =======================
/// Represents information about a definition being a direct child of this module, including its
/// location.
///
/// Internally it is `definition::ChildDefinition` with only a single `ModuleCrumb` as location.
#[derive(Clone, Debug, Deref)]
pub struct ChildDefinition(definition::ChildDefinition);
impl ChildDefinition {
fn try_retrieving_crumb(child: &definition::ChildDefinition) -> Option<ModuleCrumb> {
match child.crumbs.as_slice() {
[ast::crumbs::Crumb::Module(crumb)] => Some(*crumb),
_ => None,
}
}
/// Try constructing value from `definition::ChildDefinition`. Fails if it is not a direct child
/// of a module.
pub fn new(child: definition::ChildDefinition) -> Result<Self, NotDirectChild> {
if Self::try_retrieving_crumb(&child).is_some() {
Ok(Self(child))
} else {
Err(NotDirectChild(child.crumbs))
}
}
/// The location of this definition child in the module.
pub fn crumb(&self) -> ModuleCrumb {
// Safe, because our only constructor checks that this works. This is the type's invariant.
Self::try_retrieving_crumb(&self.0).unwrap()
}
}
impl TryFrom<definition::ChildDefinition> for ChildDefinition {
type Error = NotDirectChild;
fn try_from(value: definition::ChildDefinition) -> Result<Self, Self::Error> {
Self::new(value)
}
}
// ========================
// === Module Utilities ===
// ========================
/// Looks up graph in the module.
pub fn get_definition(
ast: &known::Module,
id: &definition::Id,
) -> FallibleResult<definition::DefinitionInfo> {
Ok(locate(ast, id)?.item)
}
/// Locate the line with given definition and return crumb that denotes it.
///
/// Fails if there is no matching definition being a direct child of the module.
pub fn locate_line_with(
ast: &known::Module,
crumb: &definition::Crumb,
) -> FallibleResult<ModuleCrumb> {
locate_child(ast, crumb).map(|child| child.crumb())
}
/// Locate the definition being the module's direct child.
pub fn locate_child(
ast: &known::Module,
crumb: &definition::Crumb,
) -> FallibleResult<ChildDefinition> {
let child = ast.def_iter().find_by_name(crumb)?;
Ok(ChildDefinition::try_from(child)?)
}
/// Traverses the module's definition tree following the given Id crumbs, looking up the definition.
pub fn locate(
ast: &known::Module,
id: &definition::Id,
) -> FallibleResult<definition::ChildDefinition> {
let mut crumbs_iter = id.crumbs.iter();
// Not exactly regular - we need special case for the first crumb as it is not a definition nor
// a children. After this we can go just from one definition to another.
let first_crumb = crumbs_iter.next().ok_or(EmptyDefinitionId)?;
let mut child = ast.def_iter().find_by_name(first_crumb)?;
for crumb in crumbs_iter {
child = definition::resolve_single_name(child, crumb)?;
}
Ok(child)
}
/// Get a definition ID that points to a method matching given pointer.
///
/// The module is assumed to be in the file identified by the `method.file` (for the purpose of
/// desugaring implicit extensions methods for modules).
///
/// The `module_name` parameter is the name of the module that contains `ast`.
pub fn lookup_method(
module_name: &QualifiedName,
ast: &known::Module,
method: &language_server::MethodPointer,
) -> FallibleResult<definition::Id> {
let qualified_typename = QualifiedName::from_text(&method.defined_on_type)?;
let defined_in_this_module = module_name == &qualified_typename;
let method_module_name = QualifiedName::from_text(&method.module)?;
let implicit_extension_allowed = method.defined_on_type == method_module_name.to_string();
for child in ast.def_iter() {
let child_name = &child.name.item;
let name_matches = child_name.name.item == method.name;
let type_matches = match child_name.extended_target.as_slice() {
[] => implicit_extension_allowed || defined_in_this_module,
[typename] => typename.item == qualified_typename.name(),
_ => child_name.explicitly_extends_type(&method.defined_on_type),
};
if name_matches && type_matches {
return Ok(definition::Id::new_single_crumb(child_name.clone()));
}
}
Err(CannotFindMethod(method.clone()).into())
}
/// Get a span in module's text representation where the given definition is located.
pub fn definition_span(
ast: &known::Module,
id: &definition::Id,
) -> FallibleResult<enso_text::Range<Byte>> {
let location = locate(ast, id)?;
ast.range_of_descendant_at(&location.crumbs)
}
impl DefinitionProvider for known::Module {
fn indent(&self) -> usize {
0
}
fn scope_kind(&self) -> definition::ScopeKind {
definition::ScopeKind::Root
}
fn enumerate_asts<'a>(&'a self) -> Box<dyn Iterator<Item = ChildAst<'a>> + 'a> {
self.ast().children()
}
}
// ================
// === MethodId ===
// ================
/// A structure identifying a method.
///
/// It is very similar to MethodPointer from language_server API, however it may point to the method
/// outside the currently opened project.
#[derive(Clone, Debug, serde::Deserialize, Eq, Hash, PartialEq, serde::Serialize)]
#[allow(missing_docs)]
pub struct MethodId {
pub module: QualifiedName,
pub defined_on_type: QualifiedName,
pub name: String,
}
// ============
// === Test ===
// ============
#[cfg(test)]
mod tests {
use super::*;
use crate::definition::DefinitionName;
use engine_protocol::language_server::MethodPointer;
#[test]
fn import_listing() {
let parser = parser::Parser::new();
let expect_imports = |code: &str, expected: &[&[&str]]| {
let ast = parser.parse_module(code, default()).unwrap();
let info = Info { ast };
let imports = info.iter_imports().collect_vec();
assert_eq!(imports.len(), expected.len());
for (import, expected_segments) in imports.iter().zip(expected) {
itertools::assert_equal(import.module.iter(), expected_segments.iter());
}
};
// TODO [mwu] waiting for fix https://github.com/enso-org/enso/issues/1016
// expect_imports("import", &[&[]]);
expect_imports("import Foo", &[&["Foo"]]);
expect_imports("import Foo.Bar", &[&["Foo", "Bar"]]);
expect_imports("foo = bar\nimport Foo.Bar", &[&["Foo", "Bar"]]);
expect_imports("import Foo.Bar\nfoo=bar\nimport Foo.Bar", &[&["Foo", "Bar"], &[
"Foo", "Bar",
]]);
}
#[test]
fn import_adding_and_removing() {
let parser = parser::Parser::new();
let code = "import Foo.Bar.Baz";
let ast = parser.parse_module(code, default()).unwrap();
let mut info = Info { ast };
let import = |code| {
let ast = parser.parse_line_ast(code).unwrap();
import::Info::from_ast(&ast).unwrap()
};
info.add_import(&parser, import("import Bar.Gar"));
info.expect_code("import Bar.Gar\nimport Foo.Bar.Baz");
info.add_import(&parser, import("import Gar.Bar"));
info.expect_code("import Bar.Gar\nimport Foo.Bar.Baz\nimport Gar.Bar");
info.remove_import(&import("import Foo.Bar.Baz")).unwrap();
info.expect_code("import Bar.Gar\nimport Gar.Bar");
info.remove_import(&import("import Foo.Bar.Baz")).unwrap_err();
info.expect_code("import Bar.Gar\nimport Gar.Bar");
info.remove_import(&import("import Gar.Bar")).unwrap();
info.expect_code("import Bar.Gar");
info.remove_import(&import("import Bar.Gar")).unwrap();
info.expect_code("");
info.add_import(&parser, import("import Bar.Gar"));
info.expect_code("import Bar.Gar");
}
#[test]
fn implicit_method_resolution() {
let parser = parser::Parser::new();
let module_name =
QualifiedName::from_all_segments(["local", "ProjectName", "Main"]).unwrap();
let expect_find = |method: &MethodPointer, code, expected: &definition::Id| {
let module = parser.parse_module(code, default()).unwrap();
let result = lookup_method(&module_name, &module, method);
assert_eq!(result.unwrap().to_string(), expected.to_string());
// TODO [mwu]
// We should be able to use `assert_eq!(result.unwrap(),expected);`
// But we can't, because definition::Id uses located fields and crumbs won't match.
// Eventually we'll likely need to split definition names into located and unlocated
// ones. Definition ID should not require any location info.
};
let expect_not_found = |method: &MethodPointer, code| {
let module = parser.parse_module(code, default()).unwrap();
lookup_method(&module_name, &module, method).expect_err("expected method not found");
};
// === Lookup the Main (local module type) extension method ===
let ptr = MethodPointer {
defined_on_type: "local.ProjectName.Main".into(),
module: "local.ProjectName.Main".into(),
name: "foo".into(),
};
// Implicit module extension method.
let id = definition::Id::new_plain_name("foo");
expect_find(&ptr, "foo a b = a + b", &id);
// Explicit module extension method.
let id = definition::Id::new_single_crumb(DefinitionName::new_method("Main", "foo"));
expect_find(&ptr, "Main.foo a b = a + b", &id);
// Matching name but extending wrong type.
expect_not_found(&ptr, "Number.foo a b = a + b");
// Mismatched name.
expect_not_found(&ptr, "bar a b = a + b");
// === Lookup the Int (non-local type) extension method ===
let ptr = MethodPointer {
defined_on_type: "std.Base.Main.Number".into(),
module: "local.ProjectName.Main".into(),
name: "foo".into(),
};
expect_not_found(&ptr, "foo a b = a + b");
let id = definition::Id::new_single_crumb(DefinitionName::new_method("Number", "foo"));
expect_find(&ptr, "Number.foo a b = a + b", &id);
expect_not_found(&ptr, "Text.foo a b = a + b");
expect_not_found(&ptr, "bar a b = a + b");
}
#[test]
fn test_definition_location() {
let code = r"
some def =
first line
second line
other def =
first line
second line
nested def =
nested body
last line of other def
last def = inline expression";
let parser = parser::Parser::new();
let module = parser.parse_module(code, default()).unwrap();
let module = Info { ast: module };
let id = definition::Id::new_plain_name("other");
let span = definition_span(&module.ast, &id).unwrap();
assert!(code[span].ends_with("last line of other def"));
let id = definition::Id::new_plain_name("last");
let span = definition_span(&module.ast, &id).unwrap();
assert!(code[span].ends_with("inline expression"));
let id = definition::Id::new_plain_names(["other", "nested"]);
let span = definition_span(&module.ast, &id).unwrap();
assert!(code[span].ends_with("nested body"));
}
#[test]
fn add_method() {
let parser = parser::Parser::new();
let module = r#"Main.method1 arg = body
main = Main.method1 10"#;
let module = Info::from(parser.parse_module(module, default()).unwrap());
let method1_id = DefinitionName::new_method("Main", "method1");
let main_id = DefinitionName::new_plain("main");
let to_add = definition::ToAdd {
name: DefinitionName::new_method("Main", "add"),
explicit_parameter_names: vec!["arg1".into(), "arg2".into()],
body_head: Ast::infix_var("arg1", "+", "arg2"),
body_tail: default(),
};
let repr_after_insertion = |location| {
let mut module = module.clone();
module.add_method(to_add.clone(), location, &parser).unwrap();
module.ast.repr()
};
let expected = r#"Main.add arg1 arg2 = arg1 + arg2
Main.method1 arg = body
main = Main.method1 10"#;
assert_eq!(repr_after_insertion(Placement::Begin), expected);
let expected = r#"Main.method1 arg = body
main = Main.method1 10
Main.add arg1 arg2 = arg1 + arg2"#;
assert_eq!(repr_after_insertion(Placement::End), expected);
let expected = r#"Main.method1 arg = body
Main.add arg1 arg2 = arg1 + arg2
main = Main.method1 10"#;
assert_eq!(repr_after_insertion(Placement::After(method1_id.clone())), expected);
assert_eq!(
repr_after_insertion(Placement::Before(method1_id.clone())),
repr_after_insertion(Placement::Begin)
);
assert_eq!(
repr_after_insertion(Placement::After(method1_id)),
repr_after_insertion(Placement::Before(main_id.clone()))
);
assert_eq!(
repr_after_insertion(Placement::After(main_id)),
repr_after_insertion(Placement::End)
);
// TODO [mwu]
// This test doesn't include multi-lines functions, as the result may seem somewhat
// unexpected due to the way that parser assigns blank lines to the former block
// rather than module. If anyone will care, we might revisit this after the parser
// 2.0 rewrite.
}
} | pub struct Id {
/// The last segment being a module name. For project's main module it should be equal
/// to [`PROJECTS_MAIN_MODULE`].
pub name: ImString, | random_line_split |
init.rs | // Copyright (c) 2021 ESRLabs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{
clone::clone, fs::Mount, io::Fd, seccomp::AllowList, Checkpoint, Container, PipeRead, Start,
SIGNAL_OFFSET,
};
use nix::{
errno::Errno,
libc::{self, c_int, c_ulong},
sched,
sys::{
self,
signal::{signal, sigprocmask, SigHandler, SigSet, SigmaskHow, Signal, SIGCHLD, SIGKILL},
},
unistd::{self, Uid},
};
use sched::CloneFlags;
use std::{
collections::HashSet, env, ffi::CString, io::Read, os::unix::prelude::RawFd, process::exit,
};
use sys::wait::{waitpid, WaitStatus};
// Init function. Pid 1.
#[allow(clippy::too_many_arguments)]
pub(super) fn init(
container: &Container,
init: &CString,
argv: &[CString],
env: &[CString],
mounts: &[Mount],
fds: &[(RawFd, Fd)],
groups: &[u32],
seccomp: Option<AllowList>,
mut checkpoint: Checkpoint,
tripwire: PipeRead,
) -> ! {
// Install a "default signal handler" that exits on any signal. This process is the "init"
// process of this pid ns and therefore doesn't have any own signal handlers. This handler that just exits
// is needed in case the container is signaled *before* the child is spawned that would otherwise receive the signal.
// If the child is spawn when the signal is sent to this group it shall exit and the init returns from waitpid.
set_init_signal_handlers();
// Become a session group leader
setsid();
// Sync with parent
checkpoint.wait(Start::Start);
checkpoint.send(Start::Started);
drop(checkpoint);
pr_set_name_init();
// Become a subreaper for orphans in this namespace
set_child_subreaper(true);
let manifest = &container.manifest;
let root = container
.root
.canonicalize()
.expect("Failed to canonicalize root");
// Mount
mount(&mounts);
// Chroot
unistd::chroot(&root).expect("Failed to chroot");
// Pwd
env::set_current_dir("/").expect("Failed to set cwd to /");
// UID / GID
setid(manifest.uid, manifest.gid);
// Supplementary groups | set_no_new_privs(true);
// Capabilities
drop_capabilities(manifest.capabilities.as_ref());
// Close and dup fds
file_descriptors(fds);
// Clone
match clone(CloneFlags::empty(), Some(SIGCHLD as i32)) {
Ok(result) => match result {
unistd::ForkResult::Parent { child } => {
wait_for_parent_death(tripwire);
reset_signal_handlers();
// Wait for the child to exit
loop {
match waitpid(Some(child), None) {
Ok(WaitStatus::Exited(_pid, status)) => exit(status),
Ok(WaitStatus::Signaled(_pid, status, _)) => {
// Encode the signal number in the process exit status. It's not possible to raise a
// a signal in this "init" process that is received by our parent
let code = SIGNAL_OFFSET + status as i32;
//debug!("Exiting with {} (signaled {})", code, status);
exit(code);
}
Err(e) if e == nix::Error::Sys(Errno::EINTR) => continue,
e => panic!("Failed to waitpid on {}: {:?}", child, e),
}
}
}
unistd::ForkResult::Child => {
drop(tripwire);
set_parent_death_signal(SIGKILL);
// TODO: Post Linux 5.5 there's a nice clone flag that allows to reset the signal handler during the clone.
reset_signal_handlers();
reset_signal_mask();
// Set seccomp filter
if let Some(mut filter) = seccomp {
filter.apply().expect("Failed to apply seccomp filter.");
}
panic!(
"Execve: {:?} {:?}: {:?}",
&init,
&argv,
unistd::execve(&init, &argv, &env)
)
}
},
Err(e) => panic!("Clone error: {}", e),
}
}
/// Execute list of mount calls
fn mount(mounts: &[Mount]) {
for mount in mounts {
mount.mount();
}
}
/// Apply file descriptor configuration
fn file_descriptors(map: &[(RawFd, Fd)]) {
for (fd, value) in map {
match value {
Fd::Close => {
// Ignore close errors because the fd list contains the ReadDir fd and fds from other tasks.
unistd::close(*fd).ok();
}
Fd::Dup(n) => {
unistd::dup2(*n, *fd).expect("Failed to dup2");
unistd::close(*n).expect("Failed to close");
}
}
}
}
fn set_child_subreaper(value: bool) {
#[cfg(target_os = "android")]
const PR_SET_CHILD_SUBREAPER: c_int = 36;
#[cfg(not(target_os = "android"))]
use libc::PR_SET_CHILD_SUBREAPER;
let value = if value { 1u64 } else { 0u64 };
let result = unsafe { nix::libc::prctl(PR_SET_CHILD_SUBREAPER, value, 0, 0, 0) };
Errno::result(result)
.map(drop)
.expect("Failed to set PR_SET_CHILD_SUBREAPER");
}
fn set_parent_death_signal(signal: Signal) {
#[cfg(target_os = "android")]
const PR_SET_PDEATHSIG: c_int = 1;
#[cfg(not(target_os = "android"))]
use libc::PR_SET_PDEATHSIG;
let result = unsafe { nix::libc::prctl(PR_SET_PDEATHSIG, signal, 0, 0, 0) };
Errno::result(result)
.map(drop)
.expect("Failed to set PR_SET_PDEATHSIG");
}
/// Wait in a separate thread for the parent (runtime) process to terminate. This should normally
/// not happen. If it does, we (init) need to terminate ourselves or we will be adopted by system
/// init. Setting PR_SET_PDEATHSIG is not an option here as we were spawned from a short lived tokio
/// thread (not process) that would trigger the signal once the thread terminates.
/// Performing this step before calling setgroups results in a SIGABRT.
fn wait_for_parent_death(mut tripwire: PipeRead) {
std::thread::spawn(move || {
tripwire.read_exact(&mut [0u8, 1]).ok();
panic!("Runtime died");
});
}
fn set_no_new_privs(value: bool) {
#[cfg(target_os = "android")]
pub const PR_SET_NO_NEW_PRIVS: c_int = 38;
#[cfg(not(target_os = "android"))]
use libc::PR_SET_NO_NEW_PRIVS;
let result = unsafe { nix::libc::prctl(PR_SET_NO_NEW_PRIVS, value as c_ulong, 0, 0, 0) };
Errno::result(result)
.map(drop)
.expect("Failed to set PR_SET_NO_NEW_PRIVS")
}
#[cfg(target_os = "android")]
pub const PR_SET_NAME: c_int = 15;
#[cfg(not(target_os = "android"))]
use libc::PR_SET_NAME;
/// Set the name of the current process to "init"
fn pr_set_name_init() {
let cname = "init\0";
let result = unsafe { libc::prctl(PR_SET_NAME, cname.as_ptr() as c_ulong, 0, 0, 0) };
Errno::result(result)
.map(drop)
.expect("Failed to set PR_SET_NAME");
}
/// Install default signal handler
fn reset_signal_handlers() {
Signal::iterator()
.filter(|s| *s != Signal::SIGCHLD)
.filter(|s| *s != Signal::SIGKILL)
.filter(|s| *s != Signal::SIGSTOP)
.try_for_each(|s| unsafe { signal(s, SigHandler::SigDfl) }.map(drop))
.expect("failed to signal");
}
fn reset_signal_mask() {
sigprocmask(SigmaskHow::SIG_UNBLOCK, Some(&SigSet::all()), None)
.expect("Failed to reset signal maks")
}
/// Install a signal handler that terminates the init process if the signal
/// is received before the clone of the child. If this handler would not be
/// installed the signal would be ignored (and not sent to the group) because
/// the init processes in PID namespace do not have default signal handlers.
fn set_init_signal_handlers() {
extern "C" fn init_signal_handler(signal: c_int) {
exit(SIGNAL_OFFSET + signal);
}
Signal::iterator()
.filter(|s| *s != Signal::SIGCHLD)
.filter(|s| *s != Signal::SIGKILL)
.filter(|s| *s != Signal::SIGSTOP)
.try_for_each(|s| unsafe { signal(s, SigHandler::Handler(init_signal_handler)) }.map(drop))
.expect("Failed to set signal handler");
}
// Reset effective caps to the most possible set
fn reset_effective_caps() {
caps::set(None, caps::CapSet::Effective, &caps::all()).expect("Failed to reset effective caps");
}
/// Set uid/gid
fn setid(uid: u16, gid: u16) {
let rt_privileged = unistd::geteuid() == Uid::from_raw(0);
// If running as uid 0 save our caps across the uid/gid drop
if rt_privileged {
caps::securebits::set_keepcaps(true).expect("Failed to set keep caps");
}
let gid = unistd::Gid::from_raw(gid.into());
unistd::setresgid(gid, gid, gid).expect("Failed to set resgid");
let uid = unistd::Uid::from_raw(uid.into());
unistd::setresuid(uid, uid, uid).expect("Failed to set resuid");
if rt_privileged {
reset_effective_caps();
caps::securebits::set_keepcaps(false).expect("Failed to set keep caps");
}
}
/// Become a session group leader
fn setsid() {
unistd::setsid().expect("Failed to call setsid");
}
fn setgroups(groups: &[u32]) {
let result = unsafe { nix::libc::setgroups(groups.len(), groups.as_ptr()) };
Errno::result(result)
.map(drop)
.expect("Failed to set supplementary groups");
}
/// Drop capabilities
fn drop_capabilities(cs: Option<&HashSet<caps::Capability>>) {
let mut bounded =
caps::read(None, caps::CapSet::Bounding).expect("Failed to read bounding caps");
if let Some(caps) = cs {
bounded.retain(|c| !caps.contains(c));
}
for cap in bounded {
// caps::set cannot be called for bounded
caps::drop(None, caps::CapSet::Bounding, cap).expect("Failed to drop bounding cap");
}
if let Some(caps) = cs {
caps::set(None, caps::CapSet::Effective, caps).expect("Failed to set effective caps");
caps::set(None, caps::CapSet::Permitted, caps).expect("Failed to set permitted caps");
caps::set(None, caps::CapSet::Inheritable, caps).expect("Failed to set inheritable caps");
caps::set(None, caps::CapSet::Ambient, caps).expect("Failed to set ambient caps");
}
} | setgroups(groups);
// No new privileges | random_line_split |
init.rs | // Copyright (c) 2021 ESRLabs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{
clone::clone, fs::Mount, io::Fd, seccomp::AllowList, Checkpoint, Container, PipeRead, Start,
SIGNAL_OFFSET,
};
use nix::{
errno::Errno,
libc::{self, c_int, c_ulong},
sched,
sys::{
self,
signal::{signal, sigprocmask, SigHandler, SigSet, SigmaskHow, Signal, SIGCHLD, SIGKILL},
},
unistd::{self, Uid},
};
use sched::CloneFlags;
use std::{
collections::HashSet, env, ffi::CString, io::Read, os::unix::prelude::RawFd, process::exit,
};
use sys::wait::{waitpid, WaitStatus};
// Init function. Pid 1.
#[allow(clippy::too_many_arguments)]
pub(super) fn init(
container: &Container,
init: &CString,
argv: &[CString],
env: &[CString],
mounts: &[Mount],
fds: &[(RawFd, Fd)],
groups: &[u32],
seccomp: Option<AllowList>,
mut checkpoint: Checkpoint,
tripwire: PipeRead,
) -> ! {
// Install a "default signal handler" that exits on any signal. This process is the "init"
// process of this pid ns and therefore doesn't have any own signal handlers. This handler that just exits
// is needed in case the container is signaled *before* the child is spawned that would otherwise receive the signal.
// If the child is spawn when the signal is sent to this group it shall exit and the init returns from waitpid.
set_init_signal_handlers();
// Become a session group leader
setsid();
// Sync with parent
checkpoint.wait(Start::Start);
checkpoint.send(Start::Started);
drop(checkpoint);
pr_set_name_init();
// Become a subreaper for orphans in this namespace
set_child_subreaper(true);
let manifest = &container.manifest;
let root = container
.root
.canonicalize()
.expect("Failed to canonicalize root");
// Mount
mount(&mounts);
// Chroot
unistd::chroot(&root).expect("Failed to chroot");
// Pwd
env::set_current_dir("/").expect("Failed to set cwd to /");
// UID / GID
setid(manifest.uid, manifest.gid);
// Supplementary groups
setgroups(groups);
// No new privileges
set_no_new_privs(true);
// Capabilities
drop_capabilities(manifest.capabilities.as_ref());
// Close and dup fds
file_descriptors(fds);
// Clone
match clone(CloneFlags::empty(), Some(SIGCHLD as i32)) {
Ok(result) => match result {
unistd::ForkResult::Parent { child } => {
wait_for_parent_death(tripwire);
reset_signal_handlers();
// Wait for the child to exit
loop {
match waitpid(Some(child), None) {
Ok(WaitStatus::Exited(_pid, status)) => exit(status),
Ok(WaitStatus::Signaled(_pid, status, _)) => {
// Encode the signal number in the process exit status. It's not possible to raise a
// a signal in this "init" process that is received by our parent
let code = SIGNAL_OFFSET + status as i32;
//debug!("Exiting with {} (signaled {})", code, status);
exit(code);
}
Err(e) if e == nix::Error::Sys(Errno::EINTR) => continue,
e => panic!("Failed to waitpid on {}: {:?}", child, e),
}
}
}
unistd::ForkResult::Child => {
drop(tripwire);
set_parent_death_signal(SIGKILL);
// TODO: Post Linux 5.5 there's a nice clone flag that allows to reset the signal handler during the clone.
reset_signal_handlers();
reset_signal_mask();
// Set seccomp filter
if let Some(mut filter) = seccomp {
filter.apply().expect("Failed to apply seccomp filter.");
}
panic!(
"Execve: {:?} {:?}: {:?}",
&init,
&argv,
unistd::execve(&init, &argv, &env)
)
}
},
Err(e) => panic!("Clone error: {}", e),
}
}
/// Execute list of mount calls
fn mount(mounts: &[Mount]) {
for mount in mounts {
mount.mount();
}
}
/// Apply file descriptor configuration
fn file_descriptors(map: &[(RawFd, Fd)]) {
for (fd, value) in map {
match value {
Fd::Close => {
// Ignore close errors because the fd list contains the ReadDir fd and fds from other tasks.
unistd::close(*fd).ok();
}
Fd::Dup(n) => {
unistd::dup2(*n, *fd).expect("Failed to dup2");
unistd::close(*n).expect("Failed to close");
}
}
}
}
fn | (value: bool) {
#[cfg(target_os = "android")]
const PR_SET_CHILD_SUBREAPER: c_int = 36;
#[cfg(not(target_os = "android"))]
use libc::PR_SET_CHILD_SUBREAPER;
let value = if value { 1u64 } else { 0u64 };
let result = unsafe { nix::libc::prctl(PR_SET_CHILD_SUBREAPER, value, 0, 0, 0) };
Errno::result(result)
.map(drop)
.expect("Failed to set PR_SET_CHILD_SUBREAPER");
}
fn set_parent_death_signal(signal: Signal) {
#[cfg(target_os = "android")]
const PR_SET_PDEATHSIG: c_int = 1;
#[cfg(not(target_os = "android"))]
use libc::PR_SET_PDEATHSIG;
let result = unsafe { nix::libc::prctl(PR_SET_PDEATHSIG, signal, 0, 0, 0) };
Errno::result(result)
.map(drop)
.expect("Failed to set PR_SET_PDEATHSIG");
}
/// Wait in a separate thread for the parent (runtime) process to terminate. This should normally
/// not happen. If it does, we (init) need to terminate ourselves or we will be adopted by system
/// init. Setting PR_SET_PDEATHSIG is not an option here as we were spawned from a short lived tokio
/// thread (not process) that would trigger the signal once the thread terminates.
/// Performing this step before calling setgroups results in a SIGABRT.
fn wait_for_parent_death(mut tripwire: PipeRead) {
std::thread::spawn(move || {
tripwire.read_exact(&mut [0u8, 1]).ok();
panic!("Runtime died");
});
}
fn set_no_new_privs(value: bool) {
#[cfg(target_os = "android")]
pub const PR_SET_NO_NEW_PRIVS: c_int = 38;
#[cfg(not(target_os = "android"))]
use libc::PR_SET_NO_NEW_PRIVS;
let result = unsafe { nix::libc::prctl(PR_SET_NO_NEW_PRIVS, value as c_ulong, 0, 0, 0) };
Errno::result(result)
.map(drop)
.expect("Failed to set PR_SET_NO_NEW_PRIVS")
}
#[cfg(target_os = "android")]
pub const PR_SET_NAME: c_int = 15;
#[cfg(not(target_os = "android"))]
use libc::PR_SET_NAME;
/// Set the name of the current process to "init"
fn pr_set_name_init() {
let cname = "init\0";
let result = unsafe { libc::prctl(PR_SET_NAME, cname.as_ptr() as c_ulong, 0, 0, 0) };
Errno::result(result)
.map(drop)
.expect("Failed to set PR_SET_NAME");
}
/// Install default signal handler
fn reset_signal_handlers() {
Signal::iterator()
.filter(|s| *s != Signal::SIGCHLD)
.filter(|s| *s != Signal::SIGKILL)
.filter(|s| *s != Signal::SIGSTOP)
.try_for_each(|s| unsafe { signal(s, SigHandler::SigDfl) }.map(drop))
.expect("failed to signal");
}
fn reset_signal_mask() {
sigprocmask(SigmaskHow::SIG_UNBLOCK, Some(&SigSet::all()), None)
.expect("Failed to reset signal maks")
}
/// Install a signal handler that terminates the init process if the signal
/// is received before the clone of the child. If this handler would not be
/// installed the signal would be ignored (and not sent to the group) because
/// the init processes in PID namespace do not have default signal handlers.
fn set_init_signal_handlers() {
extern "C" fn init_signal_handler(signal: c_int) {
exit(SIGNAL_OFFSET + signal);
}
Signal::iterator()
.filter(|s| *s != Signal::SIGCHLD)
.filter(|s| *s != Signal::SIGKILL)
.filter(|s| *s != Signal::SIGSTOP)
.try_for_each(|s| unsafe { signal(s, SigHandler::Handler(init_signal_handler)) }.map(drop))
.expect("Failed to set signal handler");
}
// Reset effective caps to the most possible set
fn reset_effective_caps() {
caps::set(None, caps::CapSet::Effective, &caps::all()).expect("Failed to reset effective caps");
}
/// Set uid/gid
fn setid(uid: u16, gid: u16) {
let rt_privileged = unistd::geteuid() == Uid::from_raw(0);
// If running as uid 0 save our caps across the uid/gid drop
if rt_privileged {
caps::securebits::set_keepcaps(true).expect("Failed to set keep caps");
}
let gid = unistd::Gid::from_raw(gid.into());
unistd::setresgid(gid, gid, gid).expect("Failed to set resgid");
let uid = unistd::Uid::from_raw(uid.into());
unistd::setresuid(uid, uid, uid).expect("Failed to set resuid");
if rt_privileged {
reset_effective_caps();
caps::securebits::set_keepcaps(false).expect("Failed to set keep caps");
}
}
/// Become a session group leader
fn setsid() {
unistd::setsid().expect("Failed to call setsid");
}
fn setgroups(groups: &[u32]) {
let result = unsafe { nix::libc::setgroups(groups.len(), groups.as_ptr()) };
Errno::result(result)
.map(drop)
.expect("Failed to set supplementary groups");
}
/// Drop capabilities
fn drop_capabilities(cs: Option<&HashSet<caps::Capability>>) {
let mut bounded =
caps::read(None, caps::CapSet::Bounding).expect("Failed to read bounding caps");
if let Some(caps) = cs {
bounded.retain(|c| !caps.contains(c));
}
for cap in bounded {
// caps::set cannot be called for bounded
caps::drop(None, caps::CapSet::Bounding, cap).expect("Failed to drop bounding cap");
}
if let Some(caps) = cs {
caps::set(None, caps::CapSet::Effective, caps).expect("Failed to set effective caps");
caps::set(None, caps::CapSet::Permitted, caps).expect("Failed to set permitted caps");
caps::set(None, caps::CapSet::Inheritable, caps).expect("Failed to set inheritable caps");
caps::set(None, caps::CapSet::Ambient, caps).expect("Failed to set ambient caps");
}
}
| set_child_subreaper | identifier_name |
init.rs | // Copyright (c) 2021 ESRLabs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use super::{
clone::clone, fs::Mount, io::Fd, seccomp::AllowList, Checkpoint, Container, PipeRead, Start,
SIGNAL_OFFSET,
};
use nix::{
errno::Errno,
libc::{self, c_int, c_ulong},
sched,
sys::{
self,
signal::{signal, sigprocmask, SigHandler, SigSet, SigmaskHow, Signal, SIGCHLD, SIGKILL},
},
unistd::{self, Uid},
};
use sched::CloneFlags;
use std::{
collections::HashSet, env, ffi::CString, io::Read, os::unix::prelude::RawFd, process::exit,
};
use sys::wait::{waitpid, WaitStatus};
// Init function. Pid 1.
#[allow(clippy::too_many_arguments)]
pub(super) fn init(
container: &Container,
init: &CString,
argv: &[CString],
env: &[CString],
mounts: &[Mount],
fds: &[(RawFd, Fd)],
groups: &[u32],
seccomp: Option<AllowList>,
mut checkpoint: Checkpoint,
tripwire: PipeRead,
) -> ! {
// Install a "default signal handler" that exits on any signal. This process is the "init"
// process of this pid ns and therefore doesn't have any own signal handlers. This handler that just exits
// is needed in case the container is signaled *before* the child is spawned that would otherwise receive the signal.
// If the child is spawn when the signal is sent to this group it shall exit and the init returns from waitpid.
set_init_signal_handlers();
// Become a session group leader
setsid();
// Sync with parent
checkpoint.wait(Start::Start);
checkpoint.send(Start::Started);
drop(checkpoint);
pr_set_name_init();
// Become a subreaper for orphans in this namespace
set_child_subreaper(true);
let manifest = &container.manifest;
let root = container
.root
.canonicalize()
.expect("Failed to canonicalize root");
// Mount
mount(&mounts);
// Chroot
unistd::chroot(&root).expect("Failed to chroot");
// Pwd
env::set_current_dir("/").expect("Failed to set cwd to /");
// UID / GID
setid(manifest.uid, manifest.gid);
// Supplementary groups
setgroups(groups);
// No new privileges
set_no_new_privs(true);
// Capabilities
drop_capabilities(manifest.capabilities.as_ref());
// Close and dup fds
file_descriptors(fds);
// Clone
match clone(CloneFlags::empty(), Some(SIGCHLD as i32)) {
Ok(result) => match result {
unistd::ForkResult::Parent { child } => {
wait_for_parent_death(tripwire);
reset_signal_handlers();
// Wait for the child to exit
loop {
match waitpid(Some(child), None) {
Ok(WaitStatus::Exited(_pid, status)) => exit(status),
Ok(WaitStatus::Signaled(_pid, status, _)) => {
// Encode the signal number in the process exit status. It's not possible to raise a
// a signal in this "init" process that is received by our parent
let code = SIGNAL_OFFSET + status as i32;
//debug!("Exiting with {} (signaled {})", code, status);
exit(code);
}
Err(e) if e == nix::Error::Sys(Errno::EINTR) => continue,
e => panic!("Failed to waitpid on {}: {:?}", child, e),
}
}
}
unistd::ForkResult::Child => {
drop(tripwire);
set_parent_death_signal(SIGKILL);
// TODO: Post Linux 5.5 there's a nice clone flag that allows to reset the signal handler during the clone.
reset_signal_handlers();
reset_signal_mask();
// Set seccomp filter
if let Some(mut filter) = seccomp {
filter.apply().expect("Failed to apply seccomp filter.");
}
panic!(
"Execve: {:?} {:?}: {:?}",
&init,
&argv,
unistd::execve(&init, &argv, &env)
)
}
},
Err(e) => panic!("Clone error: {}", e),
}
}
/// Execute list of mount calls
fn mount(mounts: &[Mount]) {
for mount in mounts {
mount.mount();
}
}
/// Apply file descriptor configuration
fn file_descriptors(map: &[(RawFd, Fd)]) {
for (fd, value) in map {
match value {
Fd::Close => {
// Ignore close errors because the fd list contains the ReadDir fd and fds from other tasks.
unistd::close(*fd).ok();
}
Fd::Dup(n) => {
unistd::dup2(*n, *fd).expect("Failed to dup2");
unistd::close(*n).expect("Failed to close");
}
}
}
}
fn set_child_subreaper(value: bool) {
#[cfg(target_os = "android")]
const PR_SET_CHILD_SUBREAPER: c_int = 36;
#[cfg(not(target_os = "android"))]
use libc::PR_SET_CHILD_SUBREAPER;
let value = if value { 1u64 } else { 0u64 };
let result = unsafe { nix::libc::prctl(PR_SET_CHILD_SUBREAPER, value, 0, 0, 0) };
Errno::result(result)
.map(drop)
.expect("Failed to set PR_SET_CHILD_SUBREAPER");
}
fn set_parent_death_signal(signal: Signal) {
#[cfg(target_os = "android")]
const PR_SET_PDEATHSIG: c_int = 1;
#[cfg(not(target_os = "android"))]
use libc::PR_SET_PDEATHSIG;
let result = unsafe { nix::libc::prctl(PR_SET_PDEATHSIG, signal, 0, 0, 0) };
Errno::result(result)
.map(drop)
.expect("Failed to set PR_SET_PDEATHSIG");
}
/// Wait in a separate thread for the parent (runtime) process to terminate. This should normally
/// not happen. If it does, we (init) need to terminate ourselves or we will be adopted by system
/// init. Setting PR_SET_PDEATHSIG is not an option here as we were spawned from a short lived tokio
/// thread (not process) that would trigger the signal once the thread terminates.
/// Performing this step before calling setgroups results in a SIGABRT.
fn wait_for_parent_death(mut tripwire: PipeRead) {
std::thread::spawn(move || {
tripwire.read_exact(&mut [0u8, 1]).ok();
panic!("Runtime died");
});
}
fn set_no_new_privs(value: bool) {
#[cfg(target_os = "android")]
pub const PR_SET_NO_NEW_PRIVS: c_int = 38;
#[cfg(not(target_os = "android"))]
use libc::PR_SET_NO_NEW_PRIVS;
let result = unsafe { nix::libc::prctl(PR_SET_NO_NEW_PRIVS, value as c_ulong, 0, 0, 0) };
Errno::result(result)
.map(drop)
.expect("Failed to set PR_SET_NO_NEW_PRIVS")
}
#[cfg(target_os = "android")]
pub const PR_SET_NAME: c_int = 15;
#[cfg(not(target_os = "android"))]
use libc::PR_SET_NAME;
/// Set the name of the current process to "init"
fn pr_set_name_init() {
let cname = "init\0";
let result = unsafe { libc::prctl(PR_SET_NAME, cname.as_ptr() as c_ulong, 0, 0, 0) };
Errno::result(result)
.map(drop)
.expect("Failed to set PR_SET_NAME");
}
/// Install default signal handler
fn reset_signal_handlers() {
Signal::iterator()
.filter(|s| *s != Signal::SIGCHLD)
.filter(|s| *s != Signal::SIGKILL)
.filter(|s| *s != Signal::SIGSTOP)
.try_for_each(|s| unsafe { signal(s, SigHandler::SigDfl) }.map(drop))
.expect("failed to signal");
}
fn reset_signal_mask() {
sigprocmask(SigmaskHow::SIG_UNBLOCK, Some(&SigSet::all()), None)
.expect("Failed to reset signal maks")
}
/// Install a signal handler that terminates the init process if the signal
/// is received before the clone of the child. If this handler would not be
/// installed the signal would be ignored (and not sent to the group) because
/// the init processes in PID namespace do not have default signal handlers.
fn set_init_signal_handlers() {
extern "C" fn init_signal_handler(signal: c_int) {
exit(SIGNAL_OFFSET + signal);
}
Signal::iterator()
.filter(|s| *s != Signal::SIGCHLD)
.filter(|s| *s != Signal::SIGKILL)
.filter(|s| *s != Signal::SIGSTOP)
.try_for_each(|s| unsafe { signal(s, SigHandler::Handler(init_signal_handler)) }.map(drop))
.expect("Failed to set signal handler");
}
// Reset effective caps to the most possible set
fn reset_effective_caps() {
caps::set(None, caps::CapSet::Effective, &caps::all()).expect("Failed to reset effective caps");
}
/// Set uid/gid
fn setid(uid: u16, gid: u16) {
let rt_privileged = unistd::geteuid() == Uid::from_raw(0);
// If running as uid 0 save our caps across the uid/gid drop
if rt_privileged {
caps::securebits::set_keepcaps(true).expect("Failed to set keep caps");
}
let gid = unistd::Gid::from_raw(gid.into());
unistd::setresgid(gid, gid, gid).expect("Failed to set resgid");
let uid = unistd::Uid::from_raw(uid.into());
unistd::setresuid(uid, uid, uid).expect("Failed to set resuid");
if rt_privileged {
reset_effective_caps();
caps::securebits::set_keepcaps(false).expect("Failed to set keep caps");
}
}
/// Become a session group leader
fn setsid() |
fn setgroups(groups: &[u32]) {
let result = unsafe { nix::libc::setgroups(groups.len(), groups.as_ptr()) };
Errno::result(result)
.map(drop)
.expect("Failed to set supplementary groups");
}
/// Drop capabilities
fn drop_capabilities(cs: Option<&HashSet<caps::Capability>>) {
let mut bounded =
caps::read(None, caps::CapSet::Bounding).expect("Failed to read bounding caps");
if let Some(caps) = cs {
bounded.retain(|c| !caps.contains(c));
}
for cap in bounded {
// caps::set cannot be called for bounded
caps::drop(None, caps::CapSet::Bounding, cap).expect("Failed to drop bounding cap");
}
if let Some(caps) = cs {
caps::set(None, caps::CapSet::Effective, caps).expect("Failed to set effective caps");
caps::set(None, caps::CapSet::Permitted, caps).expect("Failed to set permitted caps");
caps::set(None, caps::CapSet::Inheritable, caps).expect("Failed to set inheritable caps");
caps::set(None, caps::CapSet::Ambient, caps).expect("Failed to set ambient caps");
}
}
| {
unistd::setsid().expect("Failed to call setsid");
} | identifier_body |
tessbatman.py | """ tessbatman.py
This file contains helper functions for the tessbatman pipeline.
It is divided into Batman, TESS, and Convolve functions.
"""
from time import time
import glob
import os.path as p
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.signal as sig
import scipy.stats as stat
import astropy as ast
import astropy.table as tbl
import batman
# Batman Functions
def make_batman_config(tmin, tmax, tstep, wmin, wmax, wnum, wlog=True, suffix="", path="."):
"""
Write batman parameters to a JSON param file used to generate batmanCurves.
Parameters
----------
tmin (num): minimum time
tmax (num): maximum time
tnum (num): time step
wmin (num): minimum width | suffix (str): append suffix to config and curve file names
"""
params = {}
params["curves_fname"] = p.join(path, 'batmanCurves{}.csv'.format(suffix))
params["params_fname"] = p.join(path, 'batmanParams{}.csv'.format(suffix))
params["tmin"] = tmin
params["tmax"] = tmax
params["tstep"] = tstep
params["wmin"] = wmin
params["wmax"] = wmax
params["wnum"] = wnum
params["wlog"] = wlog
outfile = p.join(path, 'batmanConfig{}.param'.format(suffix))
with open(outfile, "w+") as f:
json.dump(params, f)
print("Batman config written to {}".format(outfile))
def make_lightcurve(t0, r, i, p, width, u_type, u_param, t):
"""
Generate a batman lightcurve with the given parameters.
Parameters
----------
t0 (num): time of inferior conjunction
r (num): planet radius (in stellar radii)
i (num): orbital inclination (in degrees)
p (num): orbital period
width (num): width parameter (defined as a**3/p**2)
u_type (str): limb darkening model
u_param (list): parameters for limb darkening
t: timesteps that you want the fluxes at
assume circular orbit
"""
# Init batman model
params = batman.TransitParams()
params.rp = r
params.inc = i
params.w = 0 # longitude of periastron (degenerate with width)
params.ecc = 0 # eccentricity (0 for circular orbits)
params.per = p # orbital period
params.t0 = t0
params.a = (width * p ** 2) ** (1 / 3) # semi-major axis (stellar radii)
params.limb_dark = u_type
params.u = u_param
model = batman.TransitModel(params, t)
# Generate curve
flux = model.light_curve(params) # compute light curve
return flux
def make_batman(paramfile, outdir, norm=False, write=True, verbose=True):
"""
Return astropy tables of batman params and generated curves based on the
parameters given in paramfile.
Parameters
----------
paramfile (str): path to JSON param file written by make_batman_config
outdir (str): path to write output curve and param files
norm (bool): normalize curves to unit integrated area
write (bool): write param and curve tables to files
verbose (bool): print logging and timing info
"""
# read batman param file
if verbose:
print("Reading param file", flush=True)
with open(paramfile, "r") as f:
d = json.load(f)
# init time array and parameter ranges
if verbose:
print("Setting param ranges", flush=True)
t = np.arange(d['tmin'], d['tmax'], d['tstep'])
if d['wlog']:
widths = np.logspace(d['wmin'], d['wmax'], d['wnum'])
else:
widths = np.linspace(d['wmin'], d['wmax'], d['wnum'])
nparams = len(widths)
radii = 0.1 * np.ones(nparams)
incs = 90 * np.ones(nparams)
u = ['0.1 0.3'] * nparams
ld = ['quadratic'] * nparams
per = 100*np.ones(nparams)
t0 = np.zeros(nparams)
e = np.zeros(nparams)
w = np.zeros(nparams)
# Old
# radii = []
# widths = []
# incs = []
# widths_arr = np.logspace(d['wmin'], d['wmax'], d['wnum'])
# radii_arr = np.logspace(d['rmin'], d['rmax'], d['rnum'])
# for r in radii_arr:
# for w in widths_arr:
# a = (w * (100)**2)**(1.0/3.0)
# lim = np.arccos((1 + r)/(a))/(2 * np.pi) * 360
# inc = np.linspace(90, lim, 11)[:-1] # last inc always fails so exclude
# for i in inc:
# incs.append(i)
# radii.append(r)
# widths.append(w)
# add params to batman param table
curveID = ['curve{}'.format(i) for i in range(nparams)]
cols = [curveID, radii, incs, widths, per, u, ld, t0, e, w]
colnames = ['curveID', 'rp', 'i', 'width', 'per', 'u', 'ld', 't0', 'e', 'w']
batmanParams = tbl.Table(cols, names=colnames)
# generate curves
if verbose:
print("Generating curves", flush=True)
start = time()
batmanDict = {'times': t}
err = 0 # keep track of errored curves
for i in range(len(batmanParams)):
p = batmanParams[i]
cID = p['curveID']
c = make_lightcurve(p['t0'], p['rp'], p['i'], p['per'], p['width'], p['ld'],
[float(val) for val in p['u'].split()], t)
# normalize curve c
if norm:
cmax = np.max(c)
cmin = np.min(c)
c = (c-cmin)/(cmax-cmin) # scale to [0,1]
c = 1-c # flip
c = c / np.sum(c) # normalize area under curve to 1
c = 1-c # flip back
if np.isnan(c).any() or (sum(c==1) < 5):
print("Batman {} failed".format(cID), flush=True)
err += 1
continue
# Save curve to dict
batmanDict[cID] = c
# Progress report every 100
if verbose and (i % 100 == 0):
elapsed = time() - start
print("Generated {}/{} curves in {} s".format(i+1-err, nparams,
elapsed), flush=True)
# add curves to table
batmanCurves = tbl.Table(batmanDict)
if verbose:
elapsed = time() - start
print("Generated {}/{} curves in {} s".format(nparams-err, nparams,
elapsed), flush=True)
# Write batman params and curves tables to files
if write:
if verbose:
start = time()
print("Writing files", flush=True)
ast.io.ascii.write(batmanParams, d['params_fname'], format='csv',
overwrite=True, comment='#', fast_writer=False)
if verbose:
print("Wrote params to {}".format(d['params_fname']))
ast.io.ascii.write(batmanCurves, d['curves_fname'], format='csv',
overwrite=True, comment='#', fast_writer=False)
if verbose:
print("Wrote curves to {}".format(d['curves_fname']))
elapsed = time() - start
print("Wrote files in {} s".format(elapsed), flush=True)
return(batmanParams, batmanCurves)
def read_batman(batmancurves_file):
"""
Return times, cureve name, and batman curves from a batmanCurves file.
Parameters
----------
batmancurves_file (str): Path to a batmanCurves file
Return
------
times (numpy Array): The times array (x axis) of all batmanCurves
curve_names (numpy Array): The name of each batmanCurve
batmanCurves (astropy Table): The table of batmanCurves
"""
# Read in Batman Curves
print("Reading batmanCurves from {}...".format(batmancurves_file))
batmanCurves = ast.io.ascii.read(batmancurves_file, data_start=1, format='csv')
times = np.array(batmanCurves['times'])
curve_names = np.array(batmanCurves.colnames[1:])
return times, curve_names, batmanCurves
# TESS Functions
def read_tess(tess_dir, sector_name, start=0, end=None):
"""
Return list of tess .fits files in tess_dir from [start:end]. Default
to all fits files in directory if start and end are not specified.
Parameters
----------
tess_dir (str): path to tess data directory
sector_name (str): name of sector subdirectory (e.g. Sector1)
start (int): (Optional) Index of file in directory to start at
end (int): (Optional) Index of file to end at
Return
------
tess_names (list): List of file paths to tess .fits data
"""
print("Reading TESS from {}, s:{}, e:{}...".format(sector_name, start, end))
sector_path = p.join(tess_dir, sector_name)
sector_files = glob.glob(p.join(sector_path,"*.fits"))
tess_names = sector_files[start:end]
return tess_names
def open_tess_fits(tess_fpath, norm=False):
try:
with ast.io.fits.open(tess_fpath, mode="readonly") as hdulist:
hdr = hdulist[0].header
tess_time = hdulist[1].data['TIME']
tess_flux = hdulist[1].data['PDCSAP_FLUX']
# set NaNs to median
med = np.nanmedian(tess_flux)
tess_flux[np.isnan(tess_flux)] = med
if norm:
# tess_flux[tess_flux > np.median(tess_flux)] = np.median(tess_flux)
tmin = np.min(tess_flux)
tmax = np.max(tess_flux)
tess_flux = (tess_flux - tmin)/(tmax-tmin)
except Exception as e:
print("ERROR reading file: ", tess_fpath, " with error: ", e,flush=True)
return None, None
return tess_time, tess_flux
# Convolve Fucntions
def convolve(tess_time, tess_flux, batmanCurves, curve_names, num_keep=10, plot=False):
conv_start = time()
curves = []
times = np.zeros(num_keep)
convs = np.zeros(num_keep)
print("Starting convolutions...",flush=True)
for i, curvename in enumerate(curve_names):
# do convolution
batman_curve = batmanCurves[curvename]
conv = np.abs(sig.fftconvolve(1-tess_flux, (1-batman_curve), 'same'))
ind_max = np.argmax(conv)
conv_max = conv[ind_max]
# if num_keep, save only the top num_keep curves
if num_keep < len(curve_names):
if conv_max > convs[-1]:
# insert in reverse sorted order
ind = np.searchsorted(-convs, -conv_max)
curves = curves[:ind] + [curvename] + curves[ind:-1]
times = np.insert(times, ind, tess_time[ind_max])[:-1]
convs = np.insert(convs, ind, conv_max)[:-1]
else:
curves.append(curvename)
times[i] = tess_time[ind_max]
convs[i] = conv_max
if plot:
plt.plot(tess_time, conv, label=curvename)
conv_time = time() - conv_start
print("Convolved {} curves in {:.3} s".format(len(curve_names), conv_time),flush=True)
return curves, times, convs
def tbconvolve(tess_dir, batman_dir, batman_suffix, sector, start, end, output_dir, num_keep=10, norm_tess=False, write=True, writechunk=10, verbosity=0):
"""
Parameters
----------
tess_dir(str): directory to TESS data
batman_dir (str): directory to model data
batman_suffix(str): suffix to append to barmanCurves file (e.g. _small)
sector (int): sector to pull data from
start (int): file to start at
end (int): file to end at
output_dir (str): directory to write candidates.csv
"""
tconv_start = time()
print("===START TCONVOLVE===",flush=True)
# Handle relative paths
tess_dir = p.abspath(tess_dir)
batman_dir = p.abspath(batman_dir)
output_dir = p.abspath(output_dir)
# Read in TESS Sector data
sector_name = "Sector{}".format(sector)
if sector == 0:
sector_name = "sample_"+sector_name
tess_names = read_tess(tess_dir, sector_name, start, end)
ntess = len(tess_names)
print("Found {} TESS files to process".format(ntess),flush=True)
if ntess < 1:
print("No tess curves found, quitting....")
return None
# Read in Batman Curves
batmanCurves_file = p.join(batman_dir,"batmanCurves{}.csv".format(batman_suffix))
times, curve_names, batmanCurves = read_batman(batmanCurves_file)
nbatman = len(curve_names)
print("Found {} Batman curves".format(nbatman),flush=True)
if ntess < 1:
print("No batman curves found, quitting....")
return None
# Read in Batman Params
params = pd.read_csv(p.join(batman_dir, "batmanParams{}.csv".format(batman_suffix)))
#Init dict for saving best batman curves
colnames = ['sector', 'tessFile', 'curveID', 'tcorr', 'correlation', 'chisq']
d = {key : [] for key in colnames}
s = 0
nerr = 0 # count number of failed files
# Do convolution on all tess files
for tind, tess_fpath in enumerate(tess_names):
tess_start = time()
tess_fname = p.basename(tess_fpath)
print("Starting TESS file: {}".format(tess_fname),flush=True)
# Read tess lightcurve
tess_time, tess_flux = open_tess_fits(tess_fpath, norm_tess)
if tess_time is None:
nerr += 1
continue # skip to next iter if read failed
# Do convolution and keep num_keep best curves
if num_keep < 1:
num_keep = len(curve_names)
curves, times, convs = convolve(tess_time, tess_flux, batmanCurves, curve_names, num_keep)
# Save this TESS curve's best batman curves to dict
d['sector'].extend([sector_name]*num_keep)
d['tessFile'].extend([tess_fname]*num_keep)
d['curveID'].extend(curves)
d['tcorr'].extend(times)
d['correlation'].extend(convs)
d['chisq'].extend(get_chi_sq(tess_time, tess_flux, times, params))
print(len(d['tcorr']), len(d['chisq']))
if write:
# Make table every writechunk tess curves
if (tind % writechunk == writechunk-1) or (tind == len(tess_names)-1):
e = start+tind
outname = 'candidates_sector{}_s{}_e{}.csv'.format(sector, s, e)
outpath = p.join(output_dir, outname)
# Convert to astropy table and write to csv
candidates = tbl.Table(d,names=colnames)
ast.io.ascii.write(candidates, outpath, format='csv', overwrite=True, comment='#', fast_writer=False)
print("Wrote file {} at {} s".format(outname,time()-tess_start),flush=True)
# reset dicts
# d = {key : [] for key in ['sector','tessFile','curveID','tcorr','correlation']}
s=e+1
candidates = tbl.Table(d,names=colnames)
# make merged table
cdf = pd.DataFrame.from_dict(d)
cdf = cdf[colnames]
df = pd.merge(cdf, params, on="curveID", how="left")
df.to_csv(p.join(output_dir, "chisq{}.csv".format(batman_suffix)))
tconv_time = time() - tconv_start
print("Convolved {}/{} tess files with {} curves in {:.3} s".format(ntess-nerr, ntess, nbatman, tconv_time),flush=True)
print("===END TCONVOLVE===",flush=True)
return candidates
def get_chi_sq(tess_time, tess_flux, tcorr, params):
current_fname = ""
chi_squared = []
#find the lightcurve minima to calculate the exoplanet period
arr = tess_flux / np.nanmedian(tess_flux)
arr[np.isnan(arr)] = np.nanmedian(arr)
arr[arr==0] = np.nanmedian(arr)
mu, std = stat.norm.fit(1 / arr)
peaks, _ = sig.find_peaks(1 / arr, height = mu + 4 * std, distance = 1000)
p = np.diff(tess_time[peaks])
#define parameters
PER = np.mean(p)
u_type = 'quadratic'
u_param = [0.1, 0.3]
t = tess_time - tess_time[0]
#normalize flux
outcounts = np.nan_to_num(tess_flux[tess_flux > np.nanmean(tess_flux)])
mu, sigma = stat.norm.fit(outcounts)
normalized_fluxes = tess_flux / mu
normalized_sigma = np.sqrt(tess_flux)/mu
for i, row in params.iterrows():
#get params for this row
T0 = tcorr[i]- tess_time[0]
RP = row["rp"]
INC = row["i"]
width = row["width"]
#calculate reduced chi-squared
chi_squared.append(np.nansum(((normalized_fluxes - make_lightcurve(T0, RP, INC, PER, width, u_type, u_param, t)) ** 2 / normalized_sigma ** 2) / 8))
return chi_squared
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tess_dir", type=str)
parser.add_argument("batman_dir", type=str)
parser.add_argument("sector", type=int)
parser.add_argument("start", type=int)
parser.add_argument("end", type=int)
parser.add_argument("output_dir", type=str)
parser.add_argument("batman_suffix",type=str,default="")
parser.add_argument("-v", "--verbosity", default=False,
action="store_true", help="Print console output")
args = parser.parse_args()
tbconvolve(args.tess_dir, args.batman_dir, args.batman_suffix, args.sector, args.start,
args.end, args.output_dir, num_keep=-1, norm_tess=True, verbosity=args.verbosity)
if __name__ == '__main__':
main() | wmax (num): maximum width
wnum (num): number of widths to generate
wlog (bool): use logspace for widths if True, else use linspace | random_line_split |
tessbatman.py | """ tessbatman.py
This file contains helper functions for the tessbatman pipeline.
It is divided into Batman, TESS, and Convolve functions.
"""
from time import time
import glob
import os.path as p
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.signal as sig
import scipy.stats as stat
import astropy as ast
import astropy.table as tbl
import batman
# Batman Functions
def make_batman_config(tmin, tmax, tstep, wmin, wmax, wnum, wlog=True, suffix="", path="."):
"""
Write batman parameters to a JSON param file used to generate batmanCurves.
Parameters
----------
tmin (num): minimum time
tmax (num): maximum time
tnum (num): time step
wmin (num): minimum width
wmax (num): maximum width
wnum (num): number of widths to generate
wlog (bool): use logspace for widths if True, else use linspace
suffix (str): append suffix to config and curve file names
"""
params = {}
params["curves_fname"] = p.join(path, 'batmanCurves{}.csv'.format(suffix))
params["params_fname"] = p.join(path, 'batmanParams{}.csv'.format(suffix))
params["tmin"] = tmin
params["tmax"] = tmax
params["tstep"] = tstep
params["wmin"] = wmin
params["wmax"] = wmax
params["wnum"] = wnum
params["wlog"] = wlog
outfile = p.join(path, 'batmanConfig{}.param'.format(suffix))
with open(outfile, "w+") as f:
json.dump(params, f)
print("Batman config written to {}".format(outfile))
def make_lightcurve(t0, r, i, p, width, u_type, u_param, t):
"""
Generate a batman lightcurve with the given parameters.
Parameters
----------
t0 (num): time of inferior conjunction
r (num): planet radius (in stellar radii)
i (num): orbital inclination (in degrees)
p (num): orbital period
width (num): width parameter (defined as a**3/p**2)
u_type (str): limb darkening model
u_param (list): parameters for limb darkening
t: timesteps that you want the fluxes at
assume circular orbit
"""
# Init batman model
params = batman.TransitParams()
params.rp = r
params.inc = i
params.w = 0 # longitude of periastron (degenerate with width)
params.ecc = 0 # eccentricity (0 for circular orbits)
params.per = p # orbital period
params.t0 = t0
params.a = (width * p ** 2) ** (1 / 3) # semi-major axis (stellar radii)
params.limb_dark = u_type
params.u = u_param
model = batman.TransitModel(params, t)
# Generate curve
flux = model.light_curve(params) # compute light curve
return flux
def make_batman(paramfile, outdir, norm=False, write=True, verbose=True):
"""
Return astropy tables of batman params and generated curves based on the
parameters given in paramfile.
Parameters
----------
paramfile (str): path to JSON param file written by make_batman_config
outdir (str): path to write output curve and param files
norm (bool): normalize curves to unit integrated area
write (bool): write param and curve tables to files
verbose (bool): print logging and timing info
"""
# read batman param file
if verbose:
print("Reading param file", flush=True)
with open(paramfile, "r") as f:
d = json.load(f)
# init time array and parameter ranges
if verbose:
print("Setting param ranges", flush=True)
t = np.arange(d['tmin'], d['tmax'], d['tstep'])
if d['wlog']:
widths = np.logspace(d['wmin'], d['wmax'], d['wnum'])
else:
widths = np.linspace(d['wmin'], d['wmax'], d['wnum'])
nparams = len(widths)
radii = 0.1 * np.ones(nparams)
incs = 90 * np.ones(nparams)
u = ['0.1 0.3'] * nparams
ld = ['quadratic'] * nparams
per = 100*np.ones(nparams)
t0 = np.zeros(nparams)
e = np.zeros(nparams)
w = np.zeros(nparams)
# Old
# radii = []
# widths = []
# incs = []
# widths_arr = np.logspace(d['wmin'], d['wmax'], d['wnum'])
# radii_arr = np.logspace(d['rmin'], d['rmax'], d['rnum'])
# for r in radii_arr:
# for w in widths_arr:
# a = (w * (100)**2)**(1.0/3.0)
# lim = np.arccos((1 + r)/(a))/(2 * np.pi) * 360
# inc = np.linspace(90, lim, 11)[:-1] # last inc always fails so exclude
# for i in inc:
# incs.append(i)
# radii.append(r)
# widths.append(w)
# add params to batman param table
curveID = ['curve{}'.format(i) for i in range(nparams)]
cols = [curveID, radii, incs, widths, per, u, ld, t0, e, w]
colnames = ['curveID', 'rp', 'i', 'width', 'per', 'u', 'ld', 't0', 'e', 'w']
batmanParams = tbl.Table(cols, names=colnames)
# generate curves
if verbose:
print("Generating curves", flush=True)
start = time()
batmanDict = {'times': t}
err = 0 # keep track of errored curves
for i in range(len(batmanParams)):
p = batmanParams[i]
cID = p['curveID']
c = make_lightcurve(p['t0'], p['rp'], p['i'], p['per'], p['width'], p['ld'],
[float(val) for val in p['u'].split()], t)
# normalize curve c
if norm:
cmax = np.max(c)
cmin = np.min(c)
c = (c-cmin)/(cmax-cmin) # scale to [0,1]
c = 1-c # flip
c = c / np.sum(c) # normalize area under curve to 1
c = 1-c # flip back
if np.isnan(c).any() or (sum(c==1) < 5):
print("Batman {} failed".format(cID), flush=True)
err += 1
continue
# Save curve to dict
batmanDict[cID] = c
# Progress report every 100
if verbose and (i % 100 == 0):
elapsed = time() - start
print("Generated {}/{} curves in {} s".format(i+1-err, nparams,
elapsed), flush=True)
# add curves to table
batmanCurves = tbl.Table(batmanDict)
if verbose:
elapsed = time() - start
print("Generated {}/{} curves in {} s".format(nparams-err, nparams,
elapsed), flush=True)
# Write batman params and curves tables to files
if write:
if verbose:
start = time()
print("Writing files", flush=True)
ast.io.ascii.write(batmanParams, d['params_fname'], format='csv',
overwrite=True, comment='#', fast_writer=False)
if verbose:
print("Wrote params to {}".format(d['params_fname']))
ast.io.ascii.write(batmanCurves, d['curves_fname'], format='csv',
overwrite=True, comment='#', fast_writer=False)
if verbose:
print("Wrote curves to {}".format(d['curves_fname']))
elapsed = time() - start
print("Wrote files in {} s".format(elapsed), flush=True)
return(batmanParams, batmanCurves)
def read_batman(batmancurves_file):
"""
Return times, cureve name, and batman curves from a batmanCurves file.
Parameters
----------
batmancurves_file (str): Path to a batmanCurves file
Return
------
times (numpy Array): The times array (x axis) of all batmanCurves
curve_names (numpy Array): The name of each batmanCurve
batmanCurves (astropy Table): The table of batmanCurves
"""
# Read in Batman Curves
print("Reading batmanCurves from {}...".format(batmancurves_file))
batmanCurves = ast.io.ascii.read(batmancurves_file, data_start=1, format='csv')
times = np.array(batmanCurves['times'])
curve_names = np.array(batmanCurves.colnames[1:])
return times, curve_names, batmanCurves
# TESS Functions
def read_tess(tess_dir, sector_name, start=0, end=None):
"""
Return list of tess .fits files in tess_dir from [start:end]. Default
to all fits files in directory if start and end are not specified.
Parameters
----------
tess_dir (str): path to tess data directory
sector_name (str): name of sector subdirectory (e.g. Sector1)
start (int): (Optional) Index of file in directory to start at
end (int): (Optional) Index of file to end at
Return
------
tess_names (list): List of file paths to tess .fits data
"""
print("Reading TESS from {}, s:{}, e:{}...".format(sector_name, start, end))
sector_path = p.join(tess_dir, sector_name)
sector_files = glob.glob(p.join(sector_path,"*.fits"))
tess_names = sector_files[start:end]
return tess_names
def open_tess_fits(tess_fpath, norm=False):
try:
with ast.io.fits.open(tess_fpath, mode="readonly") as hdulist:
hdr = hdulist[0].header
tess_time = hdulist[1].data['TIME']
tess_flux = hdulist[1].data['PDCSAP_FLUX']
# set NaNs to median
med = np.nanmedian(tess_flux)
tess_flux[np.isnan(tess_flux)] = med
if norm:
# tess_flux[tess_flux > np.median(tess_flux)] = np.median(tess_flux)
tmin = np.min(tess_flux)
tmax = np.max(tess_flux)
tess_flux = (tess_flux - tmin)/(tmax-tmin)
except Exception as e:
print("ERROR reading file: ", tess_fpath, " with error: ", e,flush=True)
return None, None
return tess_time, tess_flux
# Convolve Fucntions
def convolve(tess_time, tess_flux, batmanCurves, curve_names, num_keep=10, plot=False):
|
def tbconvolve(tess_dir, batman_dir, batman_suffix, sector, start, end, output_dir, num_keep=10, norm_tess=False, write=True, writechunk=10, verbosity=0):
"""
Parameters
----------
tess_dir(str): directory to TESS data
batman_dir (str): directory to model data
batman_suffix(str): suffix to append to barmanCurves file (e.g. _small)
sector (int): sector to pull data from
start (int): file to start at
end (int): file to end at
output_dir (str): directory to write candidates.csv
"""
tconv_start = time()
print("===START TCONVOLVE===",flush=True)
# Handle relative paths
tess_dir = p.abspath(tess_dir)
batman_dir = p.abspath(batman_dir)
output_dir = p.abspath(output_dir)
# Read in TESS Sector data
sector_name = "Sector{}".format(sector)
if sector == 0:
sector_name = "sample_"+sector_name
tess_names = read_tess(tess_dir, sector_name, start, end)
ntess = len(tess_names)
print("Found {} TESS files to process".format(ntess),flush=True)
if ntess < 1:
print("No tess curves found, quitting....")
return None
# Read in Batman Curves
batmanCurves_file = p.join(batman_dir,"batmanCurves{}.csv".format(batman_suffix))
times, curve_names, batmanCurves = read_batman(batmanCurves_file)
nbatman = len(curve_names)
print("Found {} Batman curves".format(nbatman),flush=True)
if ntess < 1:
print("No batman curves found, quitting....")
return None
# Read in Batman Params
params = pd.read_csv(p.join(batman_dir, "batmanParams{}.csv".format(batman_suffix)))
#Init dict for saving best batman curves
colnames = ['sector', 'tessFile', 'curveID', 'tcorr', 'correlation', 'chisq']
d = {key : [] for key in colnames}
s = 0
nerr = 0 # count number of failed files
# Do convolution on all tess files
for tind, tess_fpath in enumerate(tess_names):
tess_start = time()
tess_fname = p.basename(tess_fpath)
print("Starting TESS file: {}".format(tess_fname),flush=True)
# Read tess lightcurve
tess_time, tess_flux = open_tess_fits(tess_fpath, norm_tess)
if tess_time is None:
nerr += 1
continue # skip to next iter if read failed
# Do convolution and keep num_keep best curves
if num_keep < 1:
num_keep = len(curve_names)
curves, times, convs = convolve(tess_time, tess_flux, batmanCurves, curve_names, num_keep)
# Save this TESS curve's best batman curves to dict
d['sector'].extend([sector_name]*num_keep)
d['tessFile'].extend([tess_fname]*num_keep)
d['curveID'].extend(curves)
d['tcorr'].extend(times)
d['correlation'].extend(convs)
d['chisq'].extend(get_chi_sq(tess_time, tess_flux, times, params))
print(len(d['tcorr']), len(d['chisq']))
if write:
# Make table every writechunk tess curves
if (tind % writechunk == writechunk-1) or (tind == len(tess_names)-1):
e = start+tind
outname = 'candidates_sector{}_s{}_e{}.csv'.format(sector, s, e)
outpath = p.join(output_dir, outname)
# Convert to astropy table and write to csv
candidates = tbl.Table(d,names=colnames)
ast.io.ascii.write(candidates, outpath, format='csv', overwrite=True, comment='#', fast_writer=False)
print("Wrote file {} at {} s".format(outname,time()-tess_start),flush=True)
# reset dicts
# d = {key : [] for key in ['sector','tessFile','curveID','tcorr','correlation']}
s=e+1
candidates = tbl.Table(d,names=colnames)
# make merged table
cdf = pd.DataFrame.from_dict(d)
cdf = cdf[colnames]
df = pd.merge(cdf, params, on="curveID", how="left")
df.to_csv(p.join(output_dir, "chisq{}.csv".format(batman_suffix)))
tconv_time = time() - tconv_start
print("Convolved {}/{} tess files with {} curves in {:.3} s".format(ntess-nerr, ntess, nbatman, tconv_time),flush=True)
print("===END TCONVOLVE===",flush=True)
return candidates
def get_chi_sq(tess_time, tess_flux, tcorr, params):
current_fname = ""
chi_squared = []
#find the lightcurve minima to calculate the exoplanet period
arr = tess_flux / np.nanmedian(tess_flux)
arr[np.isnan(arr)] = np.nanmedian(arr)
arr[arr==0] = np.nanmedian(arr)
mu, std = stat.norm.fit(1 / arr)
peaks, _ = sig.find_peaks(1 / arr, height = mu + 4 * std, distance = 1000)
p = np.diff(tess_time[peaks])
#define parameters
PER = np.mean(p)
u_type = 'quadratic'
u_param = [0.1, 0.3]
t = tess_time - tess_time[0]
#normalize flux
outcounts = np.nan_to_num(tess_flux[tess_flux > np.nanmean(tess_flux)])
mu, sigma = stat.norm.fit(outcounts)
normalized_fluxes = tess_flux / mu
normalized_sigma = np.sqrt(tess_flux)/mu
for i, row in params.iterrows():
#get params for this row
T0 = tcorr[i]- tess_time[0]
RP = row["rp"]
INC = row["i"]
width = row["width"]
#calculate reduced chi-squared
chi_squared.append(np.nansum(((normalized_fluxes - make_lightcurve(T0, RP, INC, PER, width, u_type, u_param, t)) ** 2 / normalized_sigma ** 2) / 8))
return chi_squared
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tess_dir", type=str)
parser.add_argument("batman_dir", type=str)
parser.add_argument("sector", type=int)
parser.add_argument("start", type=int)
parser.add_argument("end", type=int)
parser.add_argument("output_dir", type=str)
parser.add_argument("batman_suffix",type=str,default="")
parser.add_argument("-v", "--verbosity", default=False,
action="store_true", help="Print console output")
args = parser.parse_args()
tbconvolve(args.tess_dir, args.batman_dir, args.batman_suffix, args.sector, args.start,
args.end, args.output_dir, num_keep=-1, norm_tess=True, verbosity=args.verbosity)
if __name__ == '__main__':
main()
| conv_start = time()
curves = []
times = np.zeros(num_keep)
convs = np.zeros(num_keep)
print("Starting convolutions...",flush=True)
for i, curvename in enumerate(curve_names):
# do convolution
batman_curve = batmanCurves[curvename]
conv = np.abs(sig.fftconvolve(1-tess_flux, (1-batman_curve), 'same'))
ind_max = np.argmax(conv)
conv_max = conv[ind_max]
# if num_keep, save only the top num_keep curves
if num_keep < len(curve_names):
if conv_max > convs[-1]:
# insert in reverse sorted order
ind = np.searchsorted(-convs, -conv_max)
curves = curves[:ind] + [curvename] + curves[ind:-1]
times = np.insert(times, ind, tess_time[ind_max])[:-1]
convs = np.insert(convs, ind, conv_max)[:-1]
else:
curves.append(curvename)
times[i] = tess_time[ind_max]
convs[i] = conv_max
if plot:
plt.plot(tess_time, conv, label=curvename)
conv_time = time() - conv_start
print("Convolved {} curves in {:.3} s".format(len(curve_names), conv_time),flush=True)
return curves, times, convs | identifier_body |
tessbatman.py | """ tessbatman.py
This file contains helper functions for the tessbatman pipeline.
It is divided into Batman, TESS, and Convolve functions.
"""
from time import time
import glob
import os.path as p
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.signal as sig
import scipy.stats as stat
import astropy as ast
import astropy.table as tbl
import batman
# Batman Functions
def | (tmin, tmax, tstep, wmin, wmax, wnum, wlog=True, suffix="", path="."):
"""
Write batman parameters to a JSON param file used to generate batmanCurves.
Parameters
----------
tmin (num): minimum time
tmax (num): maximum time
tnum (num): time step
wmin (num): minimum width
wmax (num): maximum width
wnum (num): number of widths to generate
wlog (bool): use logspace for widths if True, else use linspace
suffix (str): append suffix to config and curve file names
"""
params = {}
params["curves_fname"] = p.join(path, 'batmanCurves{}.csv'.format(suffix))
params["params_fname"] = p.join(path, 'batmanParams{}.csv'.format(suffix))
params["tmin"] = tmin
params["tmax"] = tmax
params["tstep"] = tstep
params["wmin"] = wmin
params["wmax"] = wmax
params["wnum"] = wnum
params["wlog"] = wlog
outfile = p.join(path, 'batmanConfig{}.param'.format(suffix))
with open(outfile, "w+") as f:
json.dump(params, f)
print("Batman config written to {}".format(outfile))
def make_lightcurve(t0, r, i, p, width, u_type, u_param, t):
"""
Generate a batman lightcurve with the given parameters.
Parameters
----------
t0 (num): time of inferior conjunction
r (num): planet radius (in stellar radii)
i (num): orbital inclination (in degrees)
p (num): orbital period
width (num): width parameter (defined as a**3/p**2)
u_type (str): limb darkening model
u_param (list): parameters for limb darkening
t: timesteps that you want the fluxes at
assume circular orbit
"""
# Init batman model
params = batman.TransitParams()
params.rp = r
params.inc = i
params.w = 0 # longitude of periastron (degenerate with width)
params.ecc = 0 # eccentricity (0 for circular orbits)
params.per = p # orbital period
params.t0 = t0
params.a = (width * p ** 2) ** (1 / 3) # semi-major axis (stellar radii)
params.limb_dark = u_type
params.u = u_param
model = batman.TransitModel(params, t)
# Generate curve
flux = model.light_curve(params) # compute light curve
return flux
def make_batman(paramfile, outdir, norm=False, write=True, verbose=True):
"""
Return astropy tables of batman params and generated curves based on the
parameters given in paramfile.
Parameters
----------
paramfile (str): path to JSON param file written by make_batman_config
outdir (str): path to write output curve and param files
norm (bool): normalize curves to unit integrated area
write (bool): write param and curve tables to files
verbose (bool): print logging and timing info
"""
# read batman param file
if verbose:
print("Reading param file", flush=True)
with open(paramfile, "r") as f:
d = json.load(f)
# init time array and parameter ranges
if verbose:
print("Setting param ranges", flush=True)
t = np.arange(d['tmin'], d['tmax'], d['tstep'])
if d['wlog']:
widths = np.logspace(d['wmin'], d['wmax'], d['wnum'])
else:
widths = np.linspace(d['wmin'], d['wmax'], d['wnum'])
nparams = len(widths)
radii = 0.1 * np.ones(nparams)
incs = 90 * np.ones(nparams)
u = ['0.1 0.3'] * nparams
ld = ['quadratic'] * nparams
per = 100*np.ones(nparams)
t0 = np.zeros(nparams)
e = np.zeros(nparams)
w = np.zeros(nparams)
# Old
# radii = []
# widths = []
# incs = []
# widths_arr = np.logspace(d['wmin'], d['wmax'], d['wnum'])
# radii_arr = np.logspace(d['rmin'], d['rmax'], d['rnum'])
# for r in radii_arr:
# for w in widths_arr:
# a = (w * (100)**2)**(1.0/3.0)
# lim = np.arccos((1 + r)/(a))/(2 * np.pi) * 360
# inc = np.linspace(90, lim, 11)[:-1] # last inc always fails so exclude
# for i in inc:
# incs.append(i)
# radii.append(r)
# widths.append(w)
# add params to batman param table
curveID = ['curve{}'.format(i) for i in range(nparams)]
cols = [curveID, radii, incs, widths, per, u, ld, t0, e, w]
colnames = ['curveID', 'rp', 'i', 'width', 'per', 'u', 'ld', 't0', 'e', 'w']
batmanParams = tbl.Table(cols, names=colnames)
# generate curves
if verbose:
print("Generating curves", flush=True)
start = time()
batmanDict = {'times': t}
err = 0 # keep track of errored curves
for i in range(len(batmanParams)):
p = batmanParams[i]
cID = p['curveID']
c = make_lightcurve(p['t0'], p['rp'], p['i'], p['per'], p['width'], p['ld'],
[float(val) for val in p['u'].split()], t)
# normalize curve c
if norm:
cmax = np.max(c)
cmin = np.min(c)
c = (c-cmin)/(cmax-cmin) # scale to [0,1]
c = 1-c # flip
c = c / np.sum(c) # normalize area under curve to 1
c = 1-c # flip back
if np.isnan(c).any() or (sum(c==1) < 5):
print("Batman {} failed".format(cID), flush=True)
err += 1
continue
# Save curve to dict
batmanDict[cID] = c
# Progress report every 100
if verbose and (i % 100 == 0):
elapsed = time() - start
print("Generated {}/{} curves in {} s".format(i+1-err, nparams,
elapsed), flush=True)
# add curves to table
batmanCurves = tbl.Table(batmanDict)
if verbose:
elapsed = time() - start
print("Generated {}/{} curves in {} s".format(nparams-err, nparams,
elapsed), flush=True)
# Write batman params and curves tables to files
if write:
if verbose:
start = time()
print("Writing files", flush=True)
ast.io.ascii.write(batmanParams, d['params_fname'], format='csv',
overwrite=True, comment='#', fast_writer=False)
if verbose:
print("Wrote params to {}".format(d['params_fname']))
ast.io.ascii.write(batmanCurves, d['curves_fname'], format='csv',
overwrite=True, comment='#', fast_writer=False)
if verbose:
print("Wrote curves to {}".format(d['curves_fname']))
elapsed = time() - start
print("Wrote files in {} s".format(elapsed), flush=True)
return(batmanParams, batmanCurves)
def read_batman(batmancurves_file):
"""
Return times, cureve name, and batman curves from a batmanCurves file.
Parameters
----------
batmancurves_file (str): Path to a batmanCurves file
Return
------
times (numpy Array): The times array (x axis) of all batmanCurves
curve_names (numpy Array): The name of each batmanCurve
batmanCurves (astropy Table): The table of batmanCurves
"""
# Read in Batman Curves
print("Reading batmanCurves from {}...".format(batmancurves_file))
batmanCurves = ast.io.ascii.read(batmancurves_file, data_start=1, format='csv')
times = np.array(batmanCurves['times'])
curve_names = np.array(batmanCurves.colnames[1:])
return times, curve_names, batmanCurves
# TESS Functions
def read_tess(tess_dir, sector_name, start=0, end=None):
"""
Return list of tess .fits files in tess_dir from [start:end]. Default
to all fits files in directory if start and end are not specified.
Parameters
----------
tess_dir (str): path to tess data directory
sector_name (str): name of sector subdirectory (e.g. Sector1)
start (int): (Optional) Index of file in directory to start at
end (int): (Optional) Index of file to end at
Return
------
tess_names (list): List of file paths to tess .fits data
"""
print("Reading TESS from {}, s:{}, e:{}...".format(sector_name, start, end))
sector_path = p.join(tess_dir, sector_name)
sector_files = glob.glob(p.join(sector_path,"*.fits"))
tess_names = sector_files[start:end]
return tess_names
def open_tess_fits(tess_fpath, norm=False):
try:
with ast.io.fits.open(tess_fpath, mode="readonly") as hdulist:
hdr = hdulist[0].header
tess_time = hdulist[1].data['TIME']
tess_flux = hdulist[1].data['PDCSAP_FLUX']
# set NaNs to median
med = np.nanmedian(tess_flux)
tess_flux[np.isnan(tess_flux)] = med
if norm:
# tess_flux[tess_flux > np.median(tess_flux)] = np.median(tess_flux)
tmin = np.min(tess_flux)
tmax = np.max(tess_flux)
tess_flux = (tess_flux - tmin)/(tmax-tmin)
except Exception as e:
print("ERROR reading file: ", tess_fpath, " with error: ", e,flush=True)
return None, None
return tess_time, tess_flux
# Convolve Fucntions
def convolve(tess_time, tess_flux, batmanCurves, curve_names, num_keep=10, plot=False):
conv_start = time()
curves = []
times = np.zeros(num_keep)
convs = np.zeros(num_keep)
print("Starting convolutions...",flush=True)
for i, curvename in enumerate(curve_names):
# do convolution
batman_curve = batmanCurves[curvename]
conv = np.abs(sig.fftconvolve(1-tess_flux, (1-batman_curve), 'same'))
ind_max = np.argmax(conv)
conv_max = conv[ind_max]
# if num_keep, save only the top num_keep curves
if num_keep < len(curve_names):
if conv_max > convs[-1]:
# insert in reverse sorted order
ind = np.searchsorted(-convs, -conv_max)
curves = curves[:ind] + [curvename] + curves[ind:-1]
times = np.insert(times, ind, tess_time[ind_max])[:-1]
convs = np.insert(convs, ind, conv_max)[:-1]
else:
curves.append(curvename)
times[i] = tess_time[ind_max]
convs[i] = conv_max
if plot:
plt.plot(tess_time, conv, label=curvename)
conv_time = time() - conv_start
print("Convolved {} curves in {:.3} s".format(len(curve_names), conv_time),flush=True)
return curves, times, convs
def tbconvolve(tess_dir, batman_dir, batman_suffix, sector, start, end, output_dir, num_keep=10, norm_tess=False, write=True, writechunk=10, verbosity=0):
"""
Parameters
----------
tess_dir(str): directory to TESS data
batman_dir (str): directory to model data
batman_suffix(str): suffix to append to barmanCurves file (e.g. _small)
sector (int): sector to pull data from
start (int): file to start at
end (int): file to end at
output_dir (str): directory to write candidates.csv
"""
tconv_start = time()
print("===START TCONVOLVE===",flush=True)
# Handle relative paths
tess_dir = p.abspath(tess_dir)
batman_dir = p.abspath(batman_dir)
output_dir = p.abspath(output_dir)
# Read in TESS Sector data
sector_name = "Sector{}".format(sector)
if sector == 0:
sector_name = "sample_"+sector_name
tess_names = read_tess(tess_dir, sector_name, start, end)
ntess = len(tess_names)
print("Found {} TESS files to process".format(ntess),flush=True)
if ntess < 1:
print("No tess curves found, quitting....")
return None
# Read in Batman Curves
batmanCurves_file = p.join(batman_dir,"batmanCurves{}.csv".format(batman_suffix))
times, curve_names, batmanCurves = read_batman(batmanCurves_file)
nbatman = len(curve_names)
print("Found {} Batman curves".format(nbatman),flush=True)
if ntess < 1:
print("No batman curves found, quitting....")
return None
# Read in Batman Params
params = pd.read_csv(p.join(batman_dir, "batmanParams{}.csv".format(batman_suffix)))
#Init dict for saving best batman curves
colnames = ['sector', 'tessFile', 'curveID', 'tcorr', 'correlation', 'chisq']
d = {key : [] for key in colnames}
s = 0
nerr = 0 # count number of failed files
# Do convolution on all tess files
for tind, tess_fpath in enumerate(tess_names):
tess_start = time()
tess_fname = p.basename(tess_fpath)
print("Starting TESS file: {}".format(tess_fname),flush=True)
# Read tess lightcurve
tess_time, tess_flux = open_tess_fits(tess_fpath, norm_tess)
if tess_time is None:
nerr += 1
continue # skip to next iter if read failed
# Do convolution and keep num_keep best curves
if num_keep < 1:
num_keep = len(curve_names)
curves, times, convs = convolve(tess_time, tess_flux, batmanCurves, curve_names, num_keep)
# Save this TESS curve's best batman curves to dict
d['sector'].extend([sector_name]*num_keep)
d['tessFile'].extend([tess_fname]*num_keep)
d['curveID'].extend(curves)
d['tcorr'].extend(times)
d['correlation'].extend(convs)
d['chisq'].extend(get_chi_sq(tess_time, tess_flux, times, params))
print(len(d['tcorr']), len(d['chisq']))
if write:
# Make table every writechunk tess curves
if (tind % writechunk == writechunk-1) or (tind == len(tess_names)-1):
e = start+tind
outname = 'candidates_sector{}_s{}_e{}.csv'.format(sector, s, e)
outpath = p.join(output_dir, outname)
# Convert to astropy table and write to csv
candidates = tbl.Table(d,names=colnames)
ast.io.ascii.write(candidates, outpath, format='csv', overwrite=True, comment='#', fast_writer=False)
print("Wrote file {} at {} s".format(outname,time()-tess_start),flush=True)
# reset dicts
# d = {key : [] for key in ['sector','tessFile','curveID','tcorr','correlation']}
s=e+1
candidates = tbl.Table(d,names=colnames)
# make merged table
cdf = pd.DataFrame.from_dict(d)
cdf = cdf[colnames]
df = pd.merge(cdf, params, on="curveID", how="left")
df.to_csv(p.join(output_dir, "chisq{}.csv".format(batman_suffix)))
tconv_time = time() - tconv_start
print("Convolved {}/{} tess files with {} curves in {:.3} s".format(ntess-nerr, ntess, nbatman, tconv_time),flush=True)
print("===END TCONVOLVE===",flush=True)
return candidates
def get_chi_sq(tess_time, tess_flux, tcorr, params):
current_fname = ""
chi_squared = []
#find the lightcurve minima to calculate the exoplanet period
arr = tess_flux / np.nanmedian(tess_flux)
arr[np.isnan(arr)] = np.nanmedian(arr)
arr[arr==0] = np.nanmedian(arr)
mu, std = stat.norm.fit(1 / arr)
peaks, _ = sig.find_peaks(1 / arr, height = mu + 4 * std, distance = 1000)
p = np.diff(tess_time[peaks])
#define parameters
PER = np.mean(p)
u_type = 'quadratic'
u_param = [0.1, 0.3]
t = tess_time - tess_time[0]
#normalize flux
outcounts = np.nan_to_num(tess_flux[tess_flux > np.nanmean(tess_flux)])
mu, sigma = stat.norm.fit(outcounts)
normalized_fluxes = tess_flux / mu
normalized_sigma = np.sqrt(tess_flux)/mu
for i, row in params.iterrows():
#get params for this row
T0 = tcorr[i]- tess_time[0]
RP = row["rp"]
INC = row["i"]
width = row["width"]
#calculate reduced chi-squared
chi_squared.append(np.nansum(((normalized_fluxes - make_lightcurve(T0, RP, INC, PER, width, u_type, u_param, t)) ** 2 / normalized_sigma ** 2) / 8))
return chi_squared
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tess_dir", type=str)
parser.add_argument("batman_dir", type=str)
parser.add_argument("sector", type=int)
parser.add_argument("start", type=int)
parser.add_argument("end", type=int)
parser.add_argument("output_dir", type=str)
parser.add_argument("batman_suffix",type=str,default="")
parser.add_argument("-v", "--verbosity", default=False,
action="store_true", help="Print console output")
args = parser.parse_args()
tbconvolve(args.tess_dir, args.batman_dir, args.batman_suffix, args.sector, args.start,
args.end, args.output_dir, num_keep=-1, norm_tess=True, verbosity=args.verbosity)
if __name__ == '__main__':
main()
| make_batman_config | identifier_name |
tessbatman.py | """ tessbatman.py
This file contains helper functions for the tessbatman pipeline.
It is divided into Batman, TESS, and Convolve functions.
"""
from time import time
import glob
import os.path as p
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.signal as sig
import scipy.stats as stat
import astropy as ast
import astropy.table as tbl
import batman
# Batman Functions
def make_batman_config(tmin, tmax, tstep, wmin, wmax, wnum, wlog=True, suffix="", path="."):
"""
Write batman parameters to a JSON param file used to generate batmanCurves.
Parameters
----------
tmin (num): minimum time
tmax (num): maximum time
tnum (num): time step
wmin (num): minimum width
wmax (num): maximum width
wnum (num): number of widths to generate
wlog (bool): use logspace for widths if True, else use linspace
suffix (str): append suffix to config and curve file names
"""
params = {}
params["curves_fname"] = p.join(path, 'batmanCurves{}.csv'.format(suffix))
params["params_fname"] = p.join(path, 'batmanParams{}.csv'.format(suffix))
params["tmin"] = tmin
params["tmax"] = tmax
params["tstep"] = tstep
params["wmin"] = wmin
params["wmax"] = wmax
params["wnum"] = wnum
params["wlog"] = wlog
outfile = p.join(path, 'batmanConfig{}.param'.format(suffix))
with open(outfile, "w+") as f:
json.dump(params, f)
print("Batman config written to {}".format(outfile))
def make_lightcurve(t0, r, i, p, width, u_type, u_param, t):
"""
Generate a batman lightcurve with the given parameters.
Parameters
----------
t0 (num): time of inferior conjunction
r (num): planet radius (in stellar radii)
i (num): orbital inclination (in degrees)
p (num): orbital period
width (num): width parameter (defined as a**3/p**2)
u_type (str): limb darkening model
u_param (list): parameters for limb darkening
t: timesteps that you want the fluxes at
assume circular orbit
"""
# Init batman model
params = batman.TransitParams()
params.rp = r
params.inc = i
params.w = 0 # longitude of periastron (degenerate with width)
params.ecc = 0 # eccentricity (0 for circular orbits)
params.per = p # orbital period
params.t0 = t0
params.a = (width * p ** 2) ** (1 / 3) # semi-major axis (stellar radii)
params.limb_dark = u_type
params.u = u_param
model = batman.TransitModel(params, t)
# Generate curve
flux = model.light_curve(params) # compute light curve
return flux
def make_batman(paramfile, outdir, norm=False, write=True, verbose=True):
"""
Return astropy tables of batman params and generated curves based on the
parameters given in paramfile.
Parameters
----------
paramfile (str): path to JSON param file written by make_batman_config
outdir (str): path to write output curve and param files
norm (bool): normalize curves to unit integrated area
write (bool): write param and curve tables to files
verbose (bool): print logging and timing info
"""
# read batman param file
if verbose:
print("Reading param file", flush=True)
with open(paramfile, "r") as f:
d = json.load(f)
# init time array and parameter ranges
if verbose:
print("Setting param ranges", flush=True)
t = np.arange(d['tmin'], d['tmax'], d['tstep'])
if d['wlog']:
widths = np.logspace(d['wmin'], d['wmax'], d['wnum'])
else:
widths = np.linspace(d['wmin'], d['wmax'], d['wnum'])
nparams = len(widths)
radii = 0.1 * np.ones(nparams)
incs = 90 * np.ones(nparams)
u = ['0.1 0.3'] * nparams
ld = ['quadratic'] * nparams
per = 100*np.ones(nparams)
t0 = np.zeros(nparams)
e = np.zeros(nparams)
w = np.zeros(nparams)
# Old
# radii = []
# widths = []
# incs = []
# widths_arr = np.logspace(d['wmin'], d['wmax'], d['wnum'])
# radii_arr = np.logspace(d['rmin'], d['rmax'], d['rnum'])
# for r in radii_arr:
# for w in widths_arr:
# a = (w * (100)**2)**(1.0/3.0)
# lim = np.arccos((1 + r)/(a))/(2 * np.pi) * 360
# inc = np.linspace(90, lim, 11)[:-1] # last inc always fails so exclude
# for i in inc:
# incs.append(i)
# radii.append(r)
# widths.append(w)
# add params to batman param table
curveID = ['curve{}'.format(i) for i in range(nparams)]
cols = [curveID, radii, incs, widths, per, u, ld, t0, e, w]
colnames = ['curveID', 'rp', 'i', 'width', 'per', 'u', 'ld', 't0', 'e', 'w']
batmanParams = tbl.Table(cols, names=colnames)
# generate curves
if verbose:
print("Generating curves", flush=True)
start = time()
batmanDict = {'times': t}
err = 0 # keep track of errored curves
for i in range(len(batmanParams)):
p = batmanParams[i]
cID = p['curveID']
c = make_lightcurve(p['t0'], p['rp'], p['i'], p['per'], p['width'], p['ld'],
[float(val) for val in p['u'].split()], t)
# normalize curve c
if norm:
cmax = np.max(c)
cmin = np.min(c)
c = (c-cmin)/(cmax-cmin) # scale to [0,1]
c = 1-c # flip
c = c / np.sum(c) # normalize area under curve to 1
c = 1-c # flip back
if np.isnan(c).any() or (sum(c==1) < 5):
print("Batman {} failed".format(cID), flush=True)
err += 1
continue
# Save curve to dict
batmanDict[cID] = c
# Progress report every 100
if verbose and (i % 100 == 0):
elapsed = time() - start
print("Generated {}/{} curves in {} s".format(i+1-err, nparams,
elapsed), flush=True)
# add curves to table
batmanCurves = tbl.Table(batmanDict)
if verbose:
elapsed = time() - start
print("Generated {}/{} curves in {} s".format(nparams-err, nparams,
elapsed), flush=True)
# Write batman params and curves tables to files
if write:
if verbose:
start = time()
print("Writing files", flush=True)
ast.io.ascii.write(batmanParams, d['params_fname'], format='csv',
overwrite=True, comment='#', fast_writer=False)
if verbose:
print("Wrote params to {}".format(d['params_fname']))
ast.io.ascii.write(batmanCurves, d['curves_fname'], format='csv',
overwrite=True, comment='#', fast_writer=False)
if verbose:
print("Wrote curves to {}".format(d['curves_fname']))
elapsed = time() - start
print("Wrote files in {} s".format(elapsed), flush=True)
return(batmanParams, batmanCurves)
def read_batman(batmancurves_file):
"""
Return times, cureve name, and batman curves from a batmanCurves file.
Parameters
----------
batmancurves_file (str): Path to a batmanCurves file
Return
------
times (numpy Array): The times array (x axis) of all batmanCurves
curve_names (numpy Array): The name of each batmanCurve
batmanCurves (astropy Table): The table of batmanCurves
"""
# Read in Batman Curves
print("Reading batmanCurves from {}...".format(batmancurves_file))
batmanCurves = ast.io.ascii.read(batmancurves_file, data_start=1, format='csv')
times = np.array(batmanCurves['times'])
curve_names = np.array(batmanCurves.colnames[1:])
return times, curve_names, batmanCurves
# TESS Functions
def read_tess(tess_dir, sector_name, start=0, end=None):
"""
Return list of tess .fits files in tess_dir from [start:end]. Default
to all fits files in directory if start and end are not specified.
Parameters
----------
tess_dir (str): path to tess data directory
sector_name (str): name of sector subdirectory (e.g. Sector1)
start (int): (Optional) Index of file in directory to start at
end (int): (Optional) Index of file to end at
Return
------
tess_names (list): List of file paths to tess .fits data
"""
print("Reading TESS from {}, s:{}, e:{}...".format(sector_name, start, end))
sector_path = p.join(tess_dir, sector_name)
sector_files = glob.glob(p.join(sector_path,"*.fits"))
tess_names = sector_files[start:end]
return tess_names
def open_tess_fits(tess_fpath, norm=False):
try:
with ast.io.fits.open(tess_fpath, mode="readonly") as hdulist:
hdr = hdulist[0].header
tess_time = hdulist[1].data['TIME']
tess_flux = hdulist[1].data['PDCSAP_FLUX']
# set NaNs to median
med = np.nanmedian(tess_flux)
tess_flux[np.isnan(tess_flux)] = med
if norm:
# tess_flux[tess_flux > np.median(tess_flux)] = np.median(tess_flux)
tmin = np.min(tess_flux)
tmax = np.max(tess_flux)
tess_flux = (tess_flux - tmin)/(tmax-tmin)
except Exception as e:
print("ERROR reading file: ", tess_fpath, " with error: ", e,flush=True)
return None, None
return tess_time, tess_flux
# Convolve Fucntions
def convolve(tess_time, tess_flux, batmanCurves, curve_names, num_keep=10, plot=False):
conv_start = time()
curves = []
times = np.zeros(num_keep)
convs = np.zeros(num_keep)
print("Starting convolutions...",flush=True)
for i, curvename in enumerate(curve_names):
# do convolution
batman_curve = batmanCurves[curvename]
conv = np.abs(sig.fftconvolve(1-tess_flux, (1-batman_curve), 'same'))
ind_max = np.argmax(conv)
conv_max = conv[ind_max]
# if num_keep, save only the top num_keep curves
if num_keep < len(curve_names):
if conv_max > convs[-1]:
# insert in reverse sorted order
ind = np.searchsorted(-convs, -conv_max)
curves = curves[:ind] + [curvename] + curves[ind:-1]
times = np.insert(times, ind, tess_time[ind_max])[:-1]
convs = np.insert(convs, ind, conv_max)[:-1]
else:
curves.append(curvename)
times[i] = tess_time[ind_max]
convs[i] = conv_max
if plot:
plt.plot(tess_time, conv, label=curvename)
conv_time = time() - conv_start
print("Convolved {} curves in {:.3} s".format(len(curve_names), conv_time),flush=True)
return curves, times, convs
def tbconvolve(tess_dir, batman_dir, batman_suffix, sector, start, end, output_dir, num_keep=10, norm_tess=False, write=True, writechunk=10, verbosity=0):
"""
Parameters
----------
tess_dir(str): directory to TESS data
batman_dir (str): directory to model data
batman_suffix(str): suffix to append to barmanCurves file (e.g. _small)
sector (int): sector to pull data from
start (int): file to start at
end (int): file to end at
output_dir (str): directory to write candidates.csv
"""
tconv_start = time()
print("===START TCONVOLVE===",flush=True)
# Handle relative paths
tess_dir = p.abspath(tess_dir)
batman_dir = p.abspath(batman_dir)
output_dir = p.abspath(output_dir)
# Read in TESS Sector data
sector_name = "Sector{}".format(sector)
if sector == 0:
sector_name = "sample_"+sector_name
tess_names = read_tess(tess_dir, sector_name, start, end)
ntess = len(tess_names)
print("Found {} TESS files to process".format(ntess),flush=True)
if ntess < 1:
print("No tess curves found, quitting....")
return None
# Read in Batman Curves
batmanCurves_file = p.join(batman_dir,"batmanCurves{}.csv".format(batman_suffix))
times, curve_names, batmanCurves = read_batman(batmanCurves_file)
nbatman = len(curve_names)
print("Found {} Batman curves".format(nbatman),flush=True)
if ntess < 1:
|
# Read in Batman Params
params = pd.read_csv(p.join(batman_dir, "batmanParams{}.csv".format(batman_suffix)))
#Init dict for saving best batman curves
colnames = ['sector', 'tessFile', 'curveID', 'tcorr', 'correlation', 'chisq']
d = {key : [] for key in colnames}
s = 0
nerr = 0 # count number of failed files
# Do convolution on all tess files
for tind, tess_fpath in enumerate(tess_names):
tess_start = time()
tess_fname = p.basename(tess_fpath)
print("Starting TESS file: {}".format(tess_fname),flush=True)
# Read tess lightcurve
tess_time, tess_flux = open_tess_fits(tess_fpath, norm_tess)
if tess_time is None:
nerr += 1
continue # skip to next iter if read failed
# Do convolution and keep num_keep best curves
if num_keep < 1:
num_keep = len(curve_names)
curves, times, convs = convolve(tess_time, tess_flux, batmanCurves, curve_names, num_keep)
# Save this TESS curve's best batman curves to dict
d['sector'].extend([sector_name]*num_keep)
d['tessFile'].extend([tess_fname]*num_keep)
d['curveID'].extend(curves)
d['tcorr'].extend(times)
d['correlation'].extend(convs)
d['chisq'].extend(get_chi_sq(tess_time, tess_flux, times, params))
print(len(d['tcorr']), len(d['chisq']))
if write:
# Make table every writechunk tess curves
if (tind % writechunk == writechunk-1) or (tind == len(tess_names)-1):
e = start+tind
outname = 'candidates_sector{}_s{}_e{}.csv'.format(sector, s, e)
outpath = p.join(output_dir, outname)
# Convert to astropy table and write to csv
candidates = tbl.Table(d,names=colnames)
ast.io.ascii.write(candidates, outpath, format='csv', overwrite=True, comment='#', fast_writer=False)
print("Wrote file {} at {} s".format(outname,time()-tess_start),flush=True)
# reset dicts
# d = {key : [] for key in ['sector','tessFile','curveID','tcorr','correlation']}
s=e+1
candidates = tbl.Table(d,names=colnames)
# make merged table
cdf = pd.DataFrame.from_dict(d)
cdf = cdf[colnames]
df = pd.merge(cdf, params, on="curveID", how="left")
df.to_csv(p.join(output_dir, "chisq{}.csv".format(batman_suffix)))
tconv_time = time() - tconv_start
print("Convolved {}/{} tess files with {} curves in {:.3} s".format(ntess-nerr, ntess, nbatman, tconv_time),flush=True)
print("===END TCONVOLVE===",flush=True)
return candidates
def get_chi_sq(tess_time, tess_flux, tcorr, params):
current_fname = ""
chi_squared = []
#find the lightcurve minima to calculate the exoplanet period
arr = tess_flux / np.nanmedian(tess_flux)
arr[np.isnan(arr)] = np.nanmedian(arr)
arr[arr==0] = np.nanmedian(arr)
mu, std = stat.norm.fit(1 / arr)
peaks, _ = sig.find_peaks(1 / arr, height = mu + 4 * std, distance = 1000)
p = np.diff(tess_time[peaks])
#define parameters
PER = np.mean(p)
u_type = 'quadratic'
u_param = [0.1, 0.3]
t = tess_time - tess_time[0]
#normalize flux
outcounts = np.nan_to_num(tess_flux[tess_flux > np.nanmean(tess_flux)])
mu, sigma = stat.norm.fit(outcounts)
normalized_fluxes = tess_flux / mu
normalized_sigma = np.sqrt(tess_flux)/mu
for i, row in params.iterrows():
#get params for this row
T0 = tcorr[i]- tess_time[0]
RP = row["rp"]
INC = row["i"]
width = row["width"]
#calculate reduced chi-squared
chi_squared.append(np.nansum(((normalized_fluxes - make_lightcurve(T0, RP, INC, PER, width, u_type, u_param, t)) ** 2 / normalized_sigma ** 2) / 8))
return chi_squared
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tess_dir", type=str)
parser.add_argument("batman_dir", type=str)
parser.add_argument("sector", type=int)
parser.add_argument("start", type=int)
parser.add_argument("end", type=int)
parser.add_argument("output_dir", type=str)
parser.add_argument("batman_suffix",type=str,default="")
parser.add_argument("-v", "--verbosity", default=False,
action="store_true", help="Print console output")
args = parser.parse_args()
tbconvolve(args.tess_dir, args.batman_dir, args.batman_suffix, args.sector, args.start,
args.end, args.output_dir, num_keep=-1, norm_tess=True, verbosity=args.verbosity)
if __name__ == '__main__':
main()
| print("No batman curves found, quitting....")
return None | conditional_block |
topic.go | package config
import (
"errors"
"fmt"
"github.com/ghodss/yaml"
"github.com/hashicorp/go-multierror"
"github.com/segmentio/kafka-go"
"github.com/segmentio/topicctl/pkg/admin"
log "github.com/sirupsen/logrus"
)
// PlacementStrategy is a string type that stores a replica placement strategy for a topic.
type PlacementStrategy string
const (
// PlacementStrategyAny allows any partition placement.
PlacementStrategyAny PlacementStrategy = "any"
// PlacementStrategyBalancedLeaders is a strategy that ensures the leaders of
// each partition are balanced by rack, but does not care about the placements
// of the non-leader replicas.
PlacementStrategyBalancedLeaders PlacementStrategy = "balanced-leaders"
// PlacementStrategyInRack is a strategy in which the leaders are balanced
// and the replicas for each partition are in the same rack as the leader.
PlacementStrategyInRack PlacementStrategy = "in-rack"
// PlacementStrategyCrossRack is a strategy in which the leaders are balanced
// and the replicas in each partition are spread to separate racks.
PlacementStrategyCrossRack PlacementStrategy = "cross-rack"
// PlacementStrategyStatic uses a static placement defined in the config. This is for
// testing only and should generally not be used in production.
PlacementStrategyStatic PlacementStrategy = "static"
// PlacementStrategyStaticInRack is a strategy in which the replicas in each partition
// are chosen from the rack in a static list, but the specific replicas within each partition
// aren't specified.
PlacementStrategyStaticInRack PlacementStrategy = "static-in-rack"
)
var allPlacementStrategies = []PlacementStrategy{
PlacementStrategyAny,
PlacementStrategyBalancedLeaders,
PlacementStrategyInRack,
PlacementStrategyCrossRack,
PlacementStrategyStatic,
PlacementStrategyStaticInRack,
}
// PickerMethod is a string type that stores a picker method for breaking ties when choosing
// the replica placements for a topic.
type PickerMethod string
const (
// PickerMethodClusterUse uses broker frequency in the topic, breaking ties by
// looking at the total number of replicas across the entire cluster that each broker
// appears in.
PickerMethodClusterUse PickerMethod = "cluster-use"
// PickerMethodLowestIndex uses broker frequency in the topic, breaking ties by
// choosing the broker with the lowest index.
PickerMethodLowestIndex PickerMethod = "lowest-index"
// PickerMethodRandomized uses broker frequency in the topic, breaking ties by
// using a repeatably random choice from the options.
PickerMethodRandomized PickerMethod = "randomized"
)
var allPickerMethods = []PickerMethod{
PickerMethodClusterUse,
PickerMethodLowestIndex,
PickerMethodRandomized,
}
// TopicConfig represents the desired configuration of a topic.
type TopicConfig struct {
Meta TopicMeta `json:"meta"`
Spec TopicSpec `json:"spec"`
}
// TopicMeta stores the (mostly immutable) metadata associated with a topic.
// Inspired by the meta structs in Kubernetes objects.
type TopicMeta struct {
Name string `json:"name"`
Cluster string `json:"cluster"`
Region string `json:"region"`
Environment string `json:"environment"`
Description string `json:"description"`
Labels map[string]string `json:"labels"`
// Consumers is a list of consumers who are expected to consume from this
// topic.
Consumers []string `json:"consumers,omitempty"`
}
// TopicSpec stores the (mutable) specification for a topic.
type TopicSpec struct {
Partitions int `json:"partitions"`
ReplicationFactor int `json:"replicationFactor"`
RetentionMinutes int `json:"retentionMinutes,omitempty"`
Settings TopicSettings `json:"settings,omitempty"`
PlacementConfig TopicPlacementConfig `json:"placement"`
MigrationConfig *TopicMigrationConfig `json:"migration,omitempty"`
}
// TopicPlacementConfig describes how the partition replicas in a topic
// should be chosen.
type TopicPlacementConfig struct {
Strategy PlacementStrategy `json:"strategy"`
Picker PickerMethod `json:"picker,omitempty"`
// StaticAssignments is a list of lists of desired replica assignments. It's used
// for the "static" strategy only.
StaticAssignments [][]int `json:"staticAssignments,omitempty"`
// StaticRackAssignments is a list of list of desired replica assignments. It's used
// for the "static-in-rack" strategy only.
StaticRackAssignments []string `json:"staticRackAssignments,omitempty"`
}
// TopicMigrationConfig configures the throttles and batch sizes used when
// running a partition migration. If these are left unset, resonable defaults
// will be used instead.
type TopicMigrationConfig struct {
ThrottleMB int64 `json:"throttleMB"`
PartitionBatchSize int `json:"partitionBatchSize"`
}
// ToNewTopicConfig converts a TopicConfig to a kafka.TopicConfig that can be
// used by kafka-go to create a new topic.
func (t TopicConfig) ToNewTopicConfig() (kafka.TopicConfig, error) {
config := kafka.TopicConfig{
Topic: t.Meta.Name,
NumPartitions: t.Spec.Partitions,
ReplicationFactor: t.Spec.ReplicationFactor,
}
if len(t.Spec.Settings) > 0 {
entries, err := t.Spec.Settings.ToConfigEntries(nil)
if err != nil {
return config, err
}
config.ConfigEntries = entries
}
if t.Spec.RetentionMinutes > 0 {
config.ConfigEntries = append(
config.ConfigEntries,
kafka.ConfigEntry{
ConfigName: admin.RetentionKey,
ConfigValue: fmt.Sprintf("%d", t.Spec.RetentionMinutes*60*1000),
},
)
}
return config, nil
}
// SetDefaults sets the default migration and placement settings in a topic config
// if these aren't set.
func (t *TopicConfig) SetDefaults() {
if t.Spec.MigrationConfig == nil {
t.Spec.MigrationConfig = &TopicMigrationConfig{}
}
if t.Spec.MigrationConfig.PartitionBatchSize == 0 {
// Migration partitions one at a time
t.Spec.MigrationConfig.PartitionBatchSize = 1
}
if t.Spec.PlacementConfig.Picker == "" {
t.Spec.PlacementConfig.Picker = PickerMethodRandomized
}
}
// Validate evaluates whether the topic config is valid.
func (t TopicConfig) Validate(numRacks int) error {
var err error
if t.Meta.Name == "" {
err = multierror.Append(err, errors.New("Name must be set"))
}
if t.Meta.Cluster == "" {
err = multierror.Append(err, errors.New("Cluster must be set"))
}
if t.Meta.Region == "" {
err = multierror.Append(err, errors.New("Region must be set"))
}
if t.Meta.Environment == "" {
err = multierror.Append(err, errors.New("Environment must be set"))
}
if t.Spec.Partitions <= 0 {
err = multierror.Append(err, errors.New("Partitions must be a positive number"))
}
if t.Spec.ReplicationFactor <= 0 {
err = multierror.Append(err, errors.New("ReplicationFactor must be > 0"))
}
if settingsErr := t.Spec.Settings.Validate(); settingsErr != nil {
err = multierror.Append(err, settingsErr)
}
if t.Spec.RetentionMinutes < 0 {
err = multierror.Append(err, errors.New("RetentionMinutes must be >= 0"))
}
if t.Spec.RetentionMinutes > 0 && t.Spec.Settings["retention.ms"] != nil {
err = multierror.Append(
err,
errors.New("Cannot set both RetentionMinutes and retention.ms in settings"),
)
}
if (t.Spec.Settings["local.retention.bytes"] != nil || t.Spec.Settings["local.retention.ms"] != nil) && t.Spec.Settings["remote.storage.enable"] == nil {
err = multierror.Append(
err,
errors.New("Setting local retention parameters requires remote.storage.enable to be set in settings"),
)
}
placement := t.Spec.PlacementConfig
strategyIndex := -1
for s, strategy := range allPlacementStrategies {
if strategy == placement.Strategy {
strategyIndex = s
break
}
}
if strategyIndex == -1 {
err = multierror.Append(
err,
fmt.Errorf(
"PlacementStrategy must in %+v",
allPlacementStrategies,
),
)
}
pickerIndex := -1
for p, pickerMethod := range allPickerMethods {
if pickerMethod == placement.Picker {
pickerIndex = p
break
}
}
if pickerIndex == -1 |
switch placement.Strategy {
case PlacementStrategyBalancedLeaders:
if numRacks > 0 && t.Spec.Partitions%numRacks != 0 {
// The balanced-leaders strategy requires that the
// partitions be a multiple of the number of racks, otherwise it's impossible
// to find a placement that satisfies the strategy.
err = multierror.Append(
err,
fmt.Errorf(
"Number of partitions (%d) is not a multiple of the number of racks (%d)",
t.Spec.Partitions,
numRacks,
),
)
}
case PlacementStrategyCrossRack:
if numRacks > 0 && t.Spec.ReplicationFactor > numRacks {
err = multierror.Append(
err,
fmt.Errorf(
"Replication factor (%d) cannot be larger than the number of racks (%d)",
t.Spec.ReplicationFactor,
numRacks,
),
)
}
case PlacementStrategyInRack:
case PlacementStrategyStatic:
if len(placement.StaticAssignments) != t.Spec.Partitions {
err = multierror.Append(
err,
errors.New("Static assignments must be same length as partitions"),
)
} else {
for _, replicas := range placement.StaticAssignments {
if len(replicas) != t.Spec.ReplicationFactor {
err = multierror.Append(
err,
errors.New("Static assignment rows must match replication factor"),
)
break
}
}
}
case PlacementStrategyStaticInRack:
if len(placement.StaticRackAssignments) != t.Spec.Partitions {
err = multierror.Append(
err,
errors.New("Static rack assignments must be same length as partitions"),
)
}
}
// Warn about the partition count in the non-balanced-leaders case
if numRacks > 0 &&
placement.Strategy != PlacementStrategyBalancedLeaders &&
t.Spec.Partitions%numRacks != 0 {
log.Warnf("Number of partitions (%d) is not a multiple of the number of racks (%d)",
t.Spec.Partitions,
numRacks,
)
}
return err
}
// ToYAML converts the current TopicConfig to a YAML string.
func (t TopicConfig) ToYAML() (string, error) {
outBytes, err := yaml.Marshal(t)
if err != nil {
return "", err
}
return string(outBytes), nil
}
// TopicConfigFromTopicInfo generates a TopicConfig from a ClusterConfig and admin.TopicInfo
// struct generated from the cluster state.
func TopicConfigFromTopicInfo(
clusterConfig ClusterConfig,
topicInfo admin.TopicInfo,
) TopicConfig {
topicConfig := TopicConfig{
Meta: TopicMeta{
Name: topicInfo.Name,
Cluster: clusterConfig.Meta.Name,
Region: clusterConfig.Meta.Region,
Environment: clusterConfig.Meta.Environment,
Description: "Bootstrapped via topicctl bootstrap",
},
Spec: TopicSpec{
Partitions: len(topicInfo.Partitions),
ReplicationFactor: len(topicInfo.Partitions[0].Replicas),
PlacementConfig: TopicPlacementConfig{
Strategy: PlacementStrategyAny,
},
},
}
topicConfig.Spec.Settings = FromConfigMap(topicInfo.Config)
retentionMinutes := topicInfo.Retention().Minutes()
if retentionMinutes >= 1.0 && float64(int(retentionMinutes)) == retentionMinutes {
topicConfig.Spec.RetentionMinutes = int(retentionMinutes)
delete(topicConfig.Spec.Settings, admin.RetentionKey)
}
return topicConfig
}
| {
err = multierror.Append(
err,
fmt.Errorf(
"PickerMethod must in %+v",
allPickerMethods,
),
)
} | conditional_block |
topic.go | package config
import (
"errors"
"fmt"
"github.com/ghodss/yaml"
"github.com/hashicorp/go-multierror"
"github.com/segmentio/kafka-go"
"github.com/segmentio/topicctl/pkg/admin"
log "github.com/sirupsen/logrus"
)
// PlacementStrategy is a string type that stores a replica placement strategy for a topic.
type PlacementStrategy string
const (
// PlacementStrategyAny allows any partition placement.
PlacementStrategyAny PlacementStrategy = "any"
// PlacementStrategyBalancedLeaders is a strategy that ensures the leaders of
// each partition are balanced by rack, but does not care about the placements
// of the non-leader replicas.
PlacementStrategyBalancedLeaders PlacementStrategy = "balanced-leaders"
// PlacementStrategyInRack is a strategy in which the leaders are balanced
// and the replicas for each partition are in the same rack as the leader.
PlacementStrategyInRack PlacementStrategy = "in-rack"
// PlacementStrategyCrossRack is a strategy in which the leaders are balanced
// and the replicas in each partition are spread to separate racks.
PlacementStrategyCrossRack PlacementStrategy = "cross-rack"
// PlacementStrategyStatic uses a static placement defined in the config. This is for
// testing only and should generally not be used in production.
PlacementStrategyStatic PlacementStrategy = "static"
// PlacementStrategyStaticInRack is a strategy in which the replicas in each partition
// are chosen from the rack in a static list, but the specific replicas within each partition
// aren't specified.
PlacementStrategyStaticInRack PlacementStrategy = "static-in-rack"
)
var allPlacementStrategies = []PlacementStrategy{
PlacementStrategyAny,
PlacementStrategyBalancedLeaders,
PlacementStrategyInRack,
PlacementStrategyCrossRack,
PlacementStrategyStatic,
PlacementStrategyStaticInRack,
}
// PickerMethod is a string type that stores a picker method for breaking ties when choosing
// the replica placements for a topic.
type PickerMethod string
const (
// PickerMethodClusterUse uses broker frequency in the topic, breaking ties by
// looking at the total number of replicas across the entire cluster that each broker
// appears in.
PickerMethodClusterUse PickerMethod = "cluster-use"
// PickerMethodLowestIndex uses broker frequency in the topic, breaking ties by
// choosing the broker with the lowest index.
PickerMethodLowestIndex PickerMethod = "lowest-index"
// PickerMethodRandomized uses broker frequency in the topic, breaking ties by
// using a repeatably random choice from the options.
PickerMethodRandomized PickerMethod = "randomized"
)
var allPickerMethods = []PickerMethod{
PickerMethodClusterUse,
PickerMethodLowestIndex,
PickerMethodRandomized,
}
// TopicConfig represents the desired configuration of a topic.
type TopicConfig struct {
Meta TopicMeta `json:"meta"`
Spec TopicSpec `json:"spec"`
}
// TopicMeta stores the (mostly immutable) metadata associated with a topic.
// Inspired by the meta structs in Kubernetes objects.
type TopicMeta struct {
Name string `json:"name"`
Cluster string `json:"cluster"`
Region string `json:"region"`
Environment string `json:"environment"`
Description string `json:"description"`
Labels map[string]string `json:"labels"`
// Consumers is a list of consumers who are expected to consume from this
// topic.
Consumers []string `json:"consumers,omitempty"`
}
// TopicSpec stores the (mutable) specification for a topic.
type TopicSpec struct {
Partitions int `json:"partitions"`
ReplicationFactor int `json:"replicationFactor"`
RetentionMinutes int `json:"retentionMinutes,omitempty"`
Settings TopicSettings `json:"settings,omitempty"`
PlacementConfig TopicPlacementConfig `json:"placement"`
MigrationConfig *TopicMigrationConfig `json:"migration,omitempty"`
}
// TopicPlacementConfig describes how the partition replicas in a topic
// should be chosen.
type TopicPlacementConfig struct {
Strategy PlacementStrategy `json:"strategy"`
Picker PickerMethod `json:"picker,omitempty"`
// StaticAssignments is a list of lists of desired replica assignments. It's used
// for the "static" strategy only.
StaticAssignments [][]int `json:"staticAssignments,omitempty"`
// StaticRackAssignments is a list of list of desired replica assignments. It's used
// for the "static-in-rack" strategy only.
StaticRackAssignments []string `json:"staticRackAssignments,omitempty"`
}
// TopicMigrationConfig configures the throttles and batch sizes used when
// running a partition migration. If these are left unset, resonable defaults
// will be used instead.
type TopicMigrationConfig struct {
ThrottleMB int64 `json:"throttleMB"`
PartitionBatchSize int `json:"partitionBatchSize"`
}
// ToNewTopicConfig converts a TopicConfig to a kafka.TopicConfig that can be
// used by kafka-go to create a new topic.
func (t TopicConfig) ToNewTopicConfig() (kafka.TopicConfig, error) {
config := kafka.TopicConfig{
Topic: t.Meta.Name,
NumPartitions: t.Spec.Partitions,
ReplicationFactor: t.Spec.ReplicationFactor,
}
if len(t.Spec.Settings) > 0 {
entries, err := t.Spec.Settings.ToConfigEntries(nil)
if err != nil {
return config, err
}
config.ConfigEntries = entries
}
if t.Spec.RetentionMinutes > 0 {
config.ConfigEntries = append(
config.ConfigEntries,
kafka.ConfigEntry{
ConfigName: admin.RetentionKey,
ConfigValue: fmt.Sprintf("%d", t.Spec.RetentionMinutes*60*1000),
},
)
}
return config, nil
}
// SetDefaults sets the default migration and placement settings in a topic config
// if these aren't set.
func (t *TopicConfig) SetDefaults() {
if t.Spec.MigrationConfig == nil {
t.Spec.MigrationConfig = &TopicMigrationConfig{}
}
if t.Spec.MigrationConfig.PartitionBatchSize == 0 {
// Migration partitions one at a time
t.Spec.MigrationConfig.PartitionBatchSize = 1
}
if t.Spec.PlacementConfig.Picker == "" {
t.Spec.PlacementConfig.Picker = PickerMethodRandomized
}
}
// Validate evaluates whether the topic config is valid.
func (t TopicConfig) Validate(numRacks int) error {
var err error
if t.Meta.Name == "" {
err = multierror.Append(err, errors.New("Name must be set"))
}
if t.Meta.Cluster == "" {
err = multierror.Append(err, errors.New("Cluster must be set"))
}
if t.Meta.Region == "" {
err = multierror.Append(err, errors.New("Region must be set"))
}
if t.Meta.Environment == "" {
err = multierror.Append(err, errors.New("Environment must be set"))
}
if t.Spec.Partitions <= 0 {
err = multierror.Append(err, errors.New("Partitions must be a positive number"))
}
if t.Spec.ReplicationFactor <= 0 {
err = multierror.Append(err, errors.New("ReplicationFactor must be > 0"))
}
if settingsErr := t.Spec.Settings.Validate(); settingsErr != nil {
err = multierror.Append(err, settingsErr)
}
if t.Spec.RetentionMinutes < 0 {
err = multierror.Append(err, errors.New("RetentionMinutes must be >= 0"))
}
if t.Spec.RetentionMinutes > 0 && t.Spec.Settings["retention.ms"] != nil {
err = multierror.Append(
err,
errors.New("Cannot set both RetentionMinutes and retention.ms in settings"),
)
}
if (t.Spec.Settings["local.retention.bytes"] != nil || t.Spec.Settings["local.retention.ms"] != nil) && t.Spec.Settings["remote.storage.enable"] == nil {
err = multierror.Append(
err,
errors.New("Setting local retention parameters requires remote.storage.enable to be set in settings"),
)
}
placement := t.Spec.PlacementConfig
strategyIndex := -1
for s, strategy := range allPlacementStrategies {
if strategy == placement.Strategy {
strategyIndex = s
break
}
} | "PlacementStrategy must in %+v",
allPlacementStrategies,
),
)
}
pickerIndex := -1
for p, pickerMethod := range allPickerMethods {
if pickerMethod == placement.Picker {
pickerIndex = p
break
}
}
if pickerIndex == -1 {
err = multierror.Append(
err,
fmt.Errorf(
"PickerMethod must in %+v",
allPickerMethods,
),
)
}
switch placement.Strategy {
case PlacementStrategyBalancedLeaders:
if numRacks > 0 && t.Spec.Partitions%numRacks != 0 {
// The balanced-leaders strategy requires that the
// partitions be a multiple of the number of racks, otherwise it's impossible
// to find a placement that satisfies the strategy.
err = multierror.Append(
err,
fmt.Errorf(
"Number of partitions (%d) is not a multiple of the number of racks (%d)",
t.Spec.Partitions,
numRacks,
),
)
}
case PlacementStrategyCrossRack:
if numRacks > 0 && t.Spec.ReplicationFactor > numRacks {
err = multierror.Append(
err,
fmt.Errorf(
"Replication factor (%d) cannot be larger than the number of racks (%d)",
t.Spec.ReplicationFactor,
numRacks,
),
)
}
case PlacementStrategyInRack:
case PlacementStrategyStatic:
if len(placement.StaticAssignments) != t.Spec.Partitions {
err = multierror.Append(
err,
errors.New("Static assignments must be same length as partitions"),
)
} else {
for _, replicas := range placement.StaticAssignments {
if len(replicas) != t.Spec.ReplicationFactor {
err = multierror.Append(
err,
errors.New("Static assignment rows must match replication factor"),
)
break
}
}
}
case PlacementStrategyStaticInRack:
if len(placement.StaticRackAssignments) != t.Spec.Partitions {
err = multierror.Append(
err,
errors.New("Static rack assignments must be same length as partitions"),
)
}
}
// Warn about the partition count in the non-balanced-leaders case
if numRacks > 0 &&
placement.Strategy != PlacementStrategyBalancedLeaders &&
t.Spec.Partitions%numRacks != 0 {
log.Warnf("Number of partitions (%d) is not a multiple of the number of racks (%d)",
t.Spec.Partitions,
numRacks,
)
}
return err
}
// ToYAML converts the current TopicConfig to a YAML string.
func (t TopicConfig) ToYAML() (string, error) {
outBytes, err := yaml.Marshal(t)
if err != nil {
return "", err
}
return string(outBytes), nil
}
// TopicConfigFromTopicInfo generates a TopicConfig from a ClusterConfig and admin.TopicInfo
// struct generated from the cluster state.
func TopicConfigFromTopicInfo(
clusterConfig ClusterConfig,
topicInfo admin.TopicInfo,
) TopicConfig {
topicConfig := TopicConfig{
Meta: TopicMeta{
Name: topicInfo.Name,
Cluster: clusterConfig.Meta.Name,
Region: clusterConfig.Meta.Region,
Environment: clusterConfig.Meta.Environment,
Description: "Bootstrapped via topicctl bootstrap",
},
Spec: TopicSpec{
Partitions: len(topicInfo.Partitions),
ReplicationFactor: len(topicInfo.Partitions[0].Replicas),
PlacementConfig: TopicPlacementConfig{
Strategy: PlacementStrategyAny,
},
},
}
topicConfig.Spec.Settings = FromConfigMap(topicInfo.Config)
retentionMinutes := topicInfo.Retention().Minutes()
if retentionMinutes >= 1.0 && float64(int(retentionMinutes)) == retentionMinutes {
topicConfig.Spec.RetentionMinutes = int(retentionMinutes)
delete(topicConfig.Spec.Settings, admin.RetentionKey)
}
return topicConfig
} |
if strategyIndex == -1 {
err = multierror.Append(
err,
fmt.Errorf( | random_line_split |
topic.go | package config
import (
"errors"
"fmt"
"github.com/ghodss/yaml"
"github.com/hashicorp/go-multierror"
"github.com/segmentio/kafka-go"
"github.com/segmentio/topicctl/pkg/admin"
log "github.com/sirupsen/logrus"
)
// PlacementStrategy is a string type that stores a replica placement strategy for a topic.
type PlacementStrategy string
const (
// PlacementStrategyAny allows any partition placement.
PlacementStrategyAny PlacementStrategy = "any"
// PlacementStrategyBalancedLeaders is a strategy that ensures the leaders of
// each partition are balanced by rack, but does not care about the placements
// of the non-leader replicas.
PlacementStrategyBalancedLeaders PlacementStrategy = "balanced-leaders"
// PlacementStrategyInRack is a strategy in which the leaders are balanced
// and the replicas for each partition are in the same rack as the leader.
PlacementStrategyInRack PlacementStrategy = "in-rack"
// PlacementStrategyCrossRack is a strategy in which the leaders are balanced
// and the replicas in each partition are spread to separate racks.
PlacementStrategyCrossRack PlacementStrategy = "cross-rack"
// PlacementStrategyStatic uses a static placement defined in the config. This is for
// testing only and should generally not be used in production.
PlacementStrategyStatic PlacementStrategy = "static"
// PlacementStrategyStaticInRack is a strategy in which the replicas in each partition
// are chosen from the rack in a static list, but the specific replicas within each partition
// aren't specified.
PlacementStrategyStaticInRack PlacementStrategy = "static-in-rack"
)
var allPlacementStrategies = []PlacementStrategy{
PlacementStrategyAny,
PlacementStrategyBalancedLeaders,
PlacementStrategyInRack,
PlacementStrategyCrossRack,
PlacementStrategyStatic,
PlacementStrategyStaticInRack,
}
// PickerMethod is a string type that stores a picker method for breaking ties when choosing
// the replica placements for a topic.
type PickerMethod string
const (
// PickerMethodClusterUse uses broker frequency in the topic, breaking ties by
// looking at the total number of replicas across the entire cluster that each broker
// appears in.
PickerMethodClusterUse PickerMethod = "cluster-use"
// PickerMethodLowestIndex uses broker frequency in the topic, breaking ties by
// choosing the broker with the lowest index.
PickerMethodLowestIndex PickerMethod = "lowest-index"
// PickerMethodRandomized uses broker frequency in the topic, breaking ties by
// using a repeatably random choice from the options.
PickerMethodRandomized PickerMethod = "randomized"
)
var allPickerMethods = []PickerMethod{
PickerMethodClusterUse,
PickerMethodLowestIndex,
PickerMethodRandomized,
}
// TopicConfig represents the desired configuration of a topic.
type TopicConfig struct {
Meta TopicMeta `json:"meta"`
Spec TopicSpec `json:"spec"`
}
// TopicMeta stores the (mostly immutable) metadata associated with a topic.
// Inspired by the meta structs in Kubernetes objects.
type TopicMeta struct {
Name string `json:"name"`
Cluster string `json:"cluster"`
Region string `json:"region"`
Environment string `json:"environment"`
Description string `json:"description"`
Labels map[string]string `json:"labels"`
// Consumers is a list of consumers who are expected to consume from this
// topic.
Consumers []string `json:"consumers,omitempty"`
}
// TopicSpec stores the (mutable) specification for a topic.
type TopicSpec struct {
Partitions int `json:"partitions"`
ReplicationFactor int `json:"replicationFactor"`
RetentionMinutes int `json:"retentionMinutes,omitempty"`
Settings TopicSettings `json:"settings,omitempty"`
PlacementConfig TopicPlacementConfig `json:"placement"`
MigrationConfig *TopicMigrationConfig `json:"migration,omitempty"`
}
// TopicPlacementConfig describes how the partition replicas in a topic
// should be chosen.
type TopicPlacementConfig struct {
Strategy PlacementStrategy `json:"strategy"`
Picker PickerMethod `json:"picker,omitempty"`
// StaticAssignments is a list of lists of desired replica assignments. It's used
// for the "static" strategy only.
StaticAssignments [][]int `json:"staticAssignments,omitempty"`
// StaticRackAssignments is a list of list of desired replica assignments. It's used
// for the "static-in-rack" strategy only.
StaticRackAssignments []string `json:"staticRackAssignments,omitempty"`
}
// TopicMigrationConfig configures the throttles and batch sizes used when
// running a partition migration. If these are left unset, resonable defaults
// will be used instead.
type TopicMigrationConfig struct {
ThrottleMB int64 `json:"throttleMB"`
PartitionBatchSize int `json:"partitionBatchSize"`
}
// ToNewTopicConfig converts a TopicConfig to a kafka.TopicConfig that can be
// used by kafka-go to create a new topic.
func (t TopicConfig) ToNewTopicConfig() (kafka.TopicConfig, error) {
config := kafka.TopicConfig{
Topic: t.Meta.Name,
NumPartitions: t.Spec.Partitions,
ReplicationFactor: t.Spec.ReplicationFactor,
}
if len(t.Spec.Settings) > 0 {
entries, err := t.Spec.Settings.ToConfigEntries(nil)
if err != nil {
return config, err
}
config.ConfigEntries = entries
}
if t.Spec.RetentionMinutes > 0 {
config.ConfigEntries = append(
config.ConfigEntries,
kafka.ConfigEntry{
ConfigName: admin.RetentionKey,
ConfigValue: fmt.Sprintf("%d", t.Spec.RetentionMinutes*60*1000),
},
)
}
return config, nil
}
// SetDefaults sets the default migration and placement settings in a topic config
// if these aren't set.
func (t *TopicConfig) SetDefaults() {
if t.Spec.MigrationConfig == nil {
t.Spec.MigrationConfig = &TopicMigrationConfig{}
}
if t.Spec.MigrationConfig.PartitionBatchSize == 0 {
// Migration partitions one at a time
t.Spec.MigrationConfig.PartitionBatchSize = 1
}
if t.Spec.PlacementConfig.Picker == "" {
t.Spec.PlacementConfig.Picker = PickerMethodRandomized
}
}
// Validate evaluates whether the topic config is valid.
func (t TopicConfig) Validate(numRacks int) error |
// ToYAML converts the current TopicConfig to a YAML string.
func (t TopicConfig) ToYAML() (string, error) {
outBytes, err := yaml.Marshal(t)
if err != nil {
return "", err
}
return string(outBytes), nil
}
// TopicConfigFromTopicInfo generates a TopicConfig from a ClusterConfig and admin.TopicInfo
// struct generated from the cluster state.
func TopicConfigFromTopicInfo(
clusterConfig ClusterConfig,
topicInfo admin.TopicInfo,
) TopicConfig {
topicConfig := TopicConfig{
Meta: TopicMeta{
Name: topicInfo.Name,
Cluster: clusterConfig.Meta.Name,
Region: clusterConfig.Meta.Region,
Environment: clusterConfig.Meta.Environment,
Description: "Bootstrapped via topicctl bootstrap",
},
Spec: TopicSpec{
Partitions: len(topicInfo.Partitions),
ReplicationFactor: len(topicInfo.Partitions[0].Replicas),
PlacementConfig: TopicPlacementConfig{
Strategy: PlacementStrategyAny,
},
},
}
topicConfig.Spec.Settings = FromConfigMap(topicInfo.Config)
retentionMinutes := topicInfo.Retention().Minutes()
if retentionMinutes >= 1.0 && float64(int(retentionMinutes)) == retentionMinutes {
topicConfig.Spec.RetentionMinutes = int(retentionMinutes)
delete(topicConfig.Spec.Settings, admin.RetentionKey)
}
return topicConfig
}
| {
var err error
if t.Meta.Name == "" {
err = multierror.Append(err, errors.New("Name must be set"))
}
if t.Meta.Cluster == "" {
err = multierror.Append(err, errors.New("Cluster must be set"))
}
if t.Meta.Region == "" {
err = multierror.Append(err, errors.New("Region must be set"))
}
if t.Meta.Environment == "" {
err = multierror.Append(err, errors.New("Environment must be set"))
}
if t.Spec.Partitions <= 0 {
err = multierror.Append(err, errors.New("Partitions must be a positive number"))
}
if t.Spec.ReplicationFactor <= 0 {
err = multierror.Append(err, errors.New("ReplicationFactor must be > 0"))
}
if settingsErr := t.Spec.Settings.Validate(); settingsErr != nil {
err = multierror.Append(err, settingsErr)
}
if t.Spec.RetentionMinutes < 0 {
err = multierror.Append(err, errors.New("RetentionMinutes must be >= 0"))
}
if t.Spec.RetentionMinutes > 0 && t.Spec.Settings["retention.ms"] != nil {
err = multierror.Append(
err,
errors.New("Cannot set both RetentionMinutes and retention.ms in settings"),
)
}
if (t.Spec.Settings["local.retention.bytes"] != nil || t.Spec.Settings["local.retention.ms"] != nil) && t.Spec.Settings["remote.storage.enable"] == nil {
err = multierror.Append(
err,
errors.New("Setting local retention parameters requires remote.storage.enable to be set in settings"),
)
}
placement := t.Spec.PlacementConfig
strategyIndex := -1
for s, strategy := range allPlacementStrategies {
if strategy == placement.Strategy {
strategyIndex = s
break
}
}
if strategyIndex == -1 {
err = multierror.Append(
err,
fmt.Errorf(
"PlacementStrategy must in %+v",
allPlacementStrategies,
),
)
}
pickerIndex := -1
for p, pickerMethod := range allPickerMethods {
if pickerMethod == placement.Picker {
pickerIndex = p
break
}
}
if pickerIndex == -1 {
err = multierror.Append(
err,
fmt.Errorf(
"PickerMethod must in %+v",
allPickerMethods,
),
)
}
switch placement.Strategy {
case PlacementStrategyBalancedLeaders:
if numRacks > 0 && t.Spec.Partitions%numRacks != 0 {
// The balanced-leaders strategy requires that the
// partitions be a multiple of the number of racks, otherwise it's impossible
// to find a placement that satisfies the strategy.
err = multierror.Append(
err,
fmt.Errorf(
"Number of partitions (%d) is not a multiple of the number of racks (%d)",
t.Spec.Partitions,
numRacks,
),
)
}
case PlacementStrategyCrossRack:
if numRacks > 0 && t.Spec.ReplicationFactor > numRacks {
err = multierror.Append(
err,
fmt.Errorf(
"Replication factor (%d) cannot be larger than the number of racks (%d)",
t.Spec.ReplicationFactor,
numRacks,
),
)
}
case PlacementStrategyInRack:
case PlacementStrategyStatic:
if len(placement.StaticAssignments) != t.Spec.Partitions {
err = multierror.Append(
err,
errors.New("Static assignments must be same length as partitions"),
)
} else {
for _, replicas := range placement.StaticAssignments {
if len(replicas) != t.Spec.ReplicationFactor {
err = multierror.Append(
err,
errors.New("Static assignment rows must match replication factor"),
)
break
}
}
}
case PlacementStrategyStaticInRack:
if len(placement.StaticRackAssignments) != t.Spec.Partitions {
err = multierror.Append(
err,
errors.New("Static rack assignments must be same length as partitions"),
)
}
}
// Warn about the partition count in the non-balanced-leaders case
if numRacks > 0 &&
placement.Strategy != PlacementStrategyBalancedLeaders &&
t.Spec.Partitions%numRacks != 0 {
log.Warnf("Number of partitions (%d) is not a multiple of the number of racks (%d)",
t.Spec.Partitions,
numRacks,
)
}
return err
} | identifier_body |
topic.go | package config
import (
"errors"
"fmt"
"github.com/ghodss/yaml"
"github.com/hashicorp/go-multierror"
"github.com/segmentio/kafka-go"
"github.com/segmentio/topicctl/pkg/admin"
log "github.com/sirupsen/logrus"
)
// PlacementStrategy is a string type that stores a replica placement strategy for a topic.
type PlacementStrategy string
const (
// PlacementStrategyAny allows any partition placement.
PlacementStrategyAny PlacementStrategy = "any"
// PlacementStrategyBalancedLeaders is a strategy that ensures the leaders of
// each partition are balanced by rack, but does not care about the placements
// of the non-leader replicas.
PlacementStrategyBalancedLeaders PlacementStrategy = "balanced-leaders"
// PlacementStrategyInRack is a strategy in which the leaders are balanced
// and the replicas for each partition are in the same rack as the leader.
PlacementStrategyInRack PlacementStrategy = "in-rack"
// PlacementStrategyCrossRack is a strategy in which the leaders are balanced
// and the replicas in each partition are spread to separate racks.
PlacementStrategyCrossRack PlacementStrategy = "cross-rack"
// PlacementStrategyStatic uses a static placement defined in the config. This is for
// testing only and should generally not be used in production.
PlacementStrategyStatic PlacementStrategy = "static"
// PlacementStrategyStaticInRack is a strategy in which the replicas in each partition
// are chosen from the rack in a static list, but the specific replicas within each partition
// aren't specified.
PlacementStrategyStaticInRack PlacementStrategy = "static-in-rack"
)
var allPlacementStrategies = []PlacementStrategy{
PlacementStrategyAny,
PlacementStrategyBalancedLeaders,
PlacementStrategyInRack,
PlacementStrategyCrossRack,
PlacementStrategyStatic,
PlacementStrategyStaticInRack,
}
// PickerMethod is a string type that stores a picker method for breaking ties when choosing
// the replica placements for a topic.
type PickerMethod string
const (
// PickerMethodClusterUse uses broker frequency in the topic, breaking ties by
// looking at the total number of replicas across the entire cluster that each broker
// appears in.
PickerMethodClusterUse PickerMethod = "cluster-use"
// PickerMethodLowestIndex uses broker frequency in the topic, breaking ties by
// choosing the broker with the lowest index.
PickerMethodLowestIndex PickerMethod = "lowest-index"
// PickerMethodRandomized uses broker frequency in the topic, breaking ties by
// using a repeatably random choice from the options.
PickerMethodRandomized PickerMethod = "randomized"
)
var allPickerMethods = []PickerMethod{
PickerMethodClusterUse,
PickerMethodLowestIndex,
PickerMethodRandomized,
}
// TopicConfig represents the desired configuration of a topic.
type TopicConfig struct {
Meta TopicMeta `json:"meta"`
Spec TopicSpec `json:"spec"`
}
// TopicMeta stores the (mostly immutable) metadata associated with a topic.
// Inspired by the meta structs in Kubernetes objects.
type TopicMeta struct {
Name string `json:"name"`
Cluster string `json:"cluster"`
Region string `json:"region"`
Environment string `json:"environment"`
Description string `json:"description"`
Labels map[string]string `json:"labels"`
// Consumers is a list of consumers who are expected to consume from this
// topic.
Consumers []string `json:"consumers,omitempty"`
}
// TopicSpec stores the (mutable) specification for a topic.
type TopicSpec struct {
Partitions int `json:"partitions"`
ReplicationFactor int `json:"replicationFactor"`
RetentionMinutes int `json:"retentionMinutes,omitempty"`
Settings TopicSettings `json:"settings,omitempty"`
PlacementConfig TopicPlacementConfig `json:"placement"`
MigrationConfig *TopicMigrationConfig `json:"migration,omitempty"`
}
// TopicPlacementConfig describes how the partition replicas in a topic
// should be chosen.
type TopicPlacementConfig struct {
Strategy PlacementStrategy `json:"strategy"`
Picker PickerMethod `json:"picker,omitempty"`
// StaticAssignments is a list of lists of desired replica assignments. It's used
// for the "static" strategy only.
StaticAssignments [][]int `json:"staticAssignments,omitempty"`
// StaticRackAssignments is a list of list of desired replica assignments. It's used
// for the "static-in-rack" strategy only.
StaticRackAssignments []string `json:"staticRackAssignments,omitempty"`
}
// TopicMigrationConfig configures the throttles and batch sizes used when
// running a partition migration. If these are left unset, resonable defaults
// will be used instead.
type TopicMigrationConfig struct {
ThrottleMB int64 `json:"throttleMB"`
PartitionBatchSize int `json:"partitionBatchSize"`
}
// ToNewTopicConfig converts a TopicConfig to a kafka.TopicConfig that can be
// used by kafka-go to create a new topic.
func (t TopicConfig) ToNewTopicConfig() (kafka.TopicConfig, error) {
config := kafka.TopicConfig{
Topic: t.Meta.Name,
NumPartitions: t.Spec.Partitions,
ReplicationFactor: t.Spec.ReplicationFactor,
}
if len(t.Spec.Settings) > 0 {
entries, err := t.Spec.Settings.ToConfigEntries(nil)
if err != nil {
return config, err
}
config.ConfigEntries = entries
}
if t.Spec.RetentionMinutes > 0 {
config.ConfigEntries = append(
config.ConfigEntries,
kafka.ConfigEntry{
ConfigName: admin.RetentionKey,
ConfigValue: fmt.Sprintf("%d", t.Spec.RetentionMinutes*60*1000),
},
)
}
return config, nil
}
// SetDefaults sets the default migration and placement settings in a topic config
// if these aren't set.
func (t *TopicConfig) SetDefaults() {
if t.Spec.MigrationConfig == nil {
t.Spec.MigrationConfig = &TopicMigrationConfig{}
}
if t.Spec.MigrationConfig.PartitionBatchSize == 0 {
// Migration partitions one at a time
t.Spec.MigrationConfig.PartitionBatchSize = 1
}
if t.Spec.PlacementConfig.Picker == "" {
t.Spec.PlacementConfig.Picker = PickerMethodRandomized
}
}
// Validate evaluates whether the topic config is valid.
func (t TopicConfig) Validate(numRacks int) error {
var err error
if t.Meta.Name == "" {
err = multierror.Append(err, errors.New("Name must be set"))
}
if t.Meta.Cluster == "" {
err = multierror.Append(err, errors.New("Cluster must be set"))
}
if t.Meta.Region == "" {
err = multierror.Append(err, errors.New("Region must be set"))
}
if t.Meta.Environment == "" {
err = multierror.Append(err, errors.New("Environment must be set"))
}
if t.Spec.Partitions <= 0 {
err = multierror.Append(err, errors.New("Partitions must be a positive number"))
}
if t.Spec.ReplicationFactor <= 0 {
err = multierror.Append(err, errors.New("ReplicationFactor must be > 0"))
}
if settingsErr := t.Spec.Settings.Validate(); settingsErr != nil {
err = multierror.Append(err, settingsErr)
}
if t.Spec.RetentionMinutes < 0 {
err = multierror.Append(err, errors.New("RetentionMinutes must be >= 0"))
}
if t.Spec.RetentionMinutes > 0 && t.Spec.Settings["retention.ms"] != nil {
err = multierror.Append(
err,
errors.New("Cannot set both RetentionMinutes and retention.ms in settings"),
)
}
if (t.Spec.Settings["local.retention.bytes"] != nil || t.Spec.Settings["local.retention.ms"] != nil) && t.Spec.Settings["remote.storage.enable"] == nil {
err = multierror.Append(
err,
errors.New("Setting local retention parameters requires remote.storage.enable to be set in settings"),
)
}
placement := t.Spec.PlacementConfig
strategyIndex := -1
for s, strategy := range allPlacementStrategies {
if strategy == placement.Strategy {
strategyIndex = s
break
}
}
if strategyIndex == -1 {
err = multierror.Append(
err,
fmt.Errorf(
"PlacementStrategy must in %+v",
allPlacementStrategies,
),
)
}
pickerIndex := -1
for p, pickerMethod := range allPickerMethods {
if pickerMethod == placement.Picker {
pickerIndex = p
break
}
}
if pickerIndex == -1 {
err = multierror.Append(
err,
fmt.Errorf(
"PickerMethod must in %+v",
allPickerMethods,
),
)
}
switch placement.Strategy {
case PlacementStrategyBalancedLeaders:
if numRacks > 0 && t.Spec.Partitions%numRacks != 0 {
// The balanced-leaders strategy requires that the
// partitions be a multiple of the number of racks, otherwise it's impossible
// to find a placement that satisfies the strategy.
err = multierror.Append(
err,
fmt.Errorf(
"Number of partitions (%d) is not a multiple of the number of racks (%d)",
t.Spec.Partitions,
numRacks,
),
)
}
case PlacementStrategyCrossRack:
if numRacks > 0 && t.Spec.ReplicationFactor > numRacks {
err = multierror.Append(
err,
fmt.Errorf(
"Replication factor (%d) cannot be larger than the number of racks (%d)",
t.Spec.ReplicationFactor,
numRacks,
),
)
}
case PlacementStrategyInRack:
case PlacementStrategyStatic:
if len(placement.StaticAssignments) != t.Spec.Partitions {
err = multierror.Append(
err,
errors.New("Static assignments must be same length as partitions"),
)
} else {
for _, replicas := range placement.StaticAssignments {
if len(replicas) != t.Spec.ReplicationFactor {
err = multierror.Append(
err,
errors.New("Static assignment rows must match replication factor"),
)
break
}
}
}
case PlacementStrategyStaticInRack:
if len(placement.StaticRackAssignments) != t.Spec.Partitions {
err = multierror.Append(
err,
errors.New("Static rack assignments must be same length as partitions"),
)
}
}
// Warn about the partition count in the non-balanced-leaders case
if numRacks > 0 &&
placement.Strategy != PlacementStrategyBalancedLeaders &&
t.Spec.Partitions%numRacks != 0 {
log.Warnf("Number of partitions (%d) is not a multiple of the number of racks (%d)",
t.Spec.Partitions,
numRacks,
)
}
return err
}
// ToYAML converts the current TopicConfig to a YAML string.
func (t TopicConfig) ToYAML() (string, error) {
outBytes, err := yaml.Marshal(t)
if err != nil {
return "", err
}
return string(outBytes), nil
}
// TopicConfigFromTopicInfo generates a TopicConfig from a ClusterConfig and admin.TopicInfo
// struct generated from the cluster state.
func | (
clusterConfig ClusterConfig,
topicInfo admin.TopicInfo,
) TopicConfig {
topicConfig := TopicConfig{
Meta: TopicMeta{
Name: topicInfo.Name,
Cluster: clusterConfig.Meta.Name,
Region: clusterConfig.Meta.Region,
Environment: clusterConfig.Meta.Environment,
Description: "Bootstrapped via topicctl bootstrap",
},
Spec: TopicSpec{
Partitions: len(topicInfo.Partitions),
ReplicationFactor: len(topicInfo.Partitions[0].Replicas),
PlacementConfig: TopicPlacementConfig{
Strategy: PlacementStrategyAny,
},
},
}
topicConfig.Spec.Settings = FromConfigMap(topicInfo.Config)
retentionMinutes := topicInfo.Retention().Minutes()
if retentionMinutes >= 1.0 && float64(int(retentionMinutes)) == retentionMinutes {
topicConfig.Spec.RetentionMinutes = int(retentionMinutes)
delete(topicConfig.Spec.Settings, admin.RetentionKey)
}
return topicConfig
}
| TopicConfigFromTopicInfo | identifier_name |
oid.rs | use crate::*;
use alloc::borrow::Cow;
#[cfg(not(feature = "std"))]
use alloc::format;
use alloc::string::{String, ToString};
use alloc::vec::Vec;
use core::{
convert::TryFrom, fmt, iter::FusedIterator, marker::PhantomData, ops::Shl, str::FromStr,
};
#[cfg(feature = "bigint")]
use num_bigint::BigUint;
use num_traits::Num;
/// An error for OID parsing functions.
#[derive(Debug)]
pub enum OidParseError {
TooShort,
/// Signalizes that the first or second component is too large.
/// The first must be within the range 0 to 6 (inclusive).
/// The second component must be less than 40.
FirstComponentsTooLarge,
ParseIntError,
}
/// Object ID (OID) representation which can be relative or non-relative.
/// An example for an OID in string representation is `"1.2.840.113549.1.1.5"`.
///
/// For non-relative OIDs restrictions apply to the first two components.
///
/// This library contains a procedural macro `oid` which can be used to
/// create oids. For example `oid!(1.2.44.233)` or `oid!(rel 44.233)`
/// for relative oids. See the [module documentation](index.html) for more information.
#[derive(Hash, PartialEq, Eq, Clone)]
pub struct Oid<'a> {
asn1: Cow<'a, [u8]>,
relative: bool,
}
impl<'a> TryFrom<Any<'a>> for Oid<'a> {
type Error = Error;
fn try_from(any: Any<'a>) -> Result<Self> {
TryFrom::try_from(&any)
}
}
impl<'a, 'b> TryFrom<&'b Any<'a>> for Oid<'a> {
type Error = Error;
fn try_from(any: &'b Any<'a>) -> Result<Self> {
// check that any.data.last().unwrap() >> 7 == 0u8
let asn1 = Cow::Borrowed(any.data);
Ok(Oid::new(asn1))
}
}
impl<'a> CheckDerConstraints for Oid<'a> {
fn check_constraints(any: &Any) -> Result<()> {
any.header.assert_primitive()?;
any.header.length.assert_definite()?;
Ok(())
}
}
| }
#[cfg(feature = "std")]
impl ToDer for Oid<'_> {
fn to_der_len(&self) -> Result<usize> {
// OID/REL-OID tag will not change header size, so we don't care here
let header = Header::new(
Class::Universal,
false,
Self::TAG,
Length::Definite(self.asn1.len()),
);
Ok(header.to_der_len()? + self.asn1.len())
}
fn write_der_header(&self, writer: &mut dyn std::io::Write) -> SerializeResult<usize> {
let tag = if self.relative {
Tag::RelativeOid
} else {
Tag::Oid
};
let header = Header::new(
Class::Universal,
false,
tag,
Length::Definite(self.asn1.len()),
);
header.write_der_header(writer).map_err(Into::into)
}
fn write_der_content(&self, writer: &mut dyn std::io::Write) -> SerializeResult<usize> {
writer.write(&self.asn1).map_err(Into::into)
}
}
fn encode_relative(ids: &'_ [u64]) -> impl Iterator<Item = u8> + '_ {
ids.iter().flat_map(|id| {
let bit_count = 64 - id.leading_zeros();
let octets_needed = ((bit_count + 6) / 7).max(1);
(0..octets_needed).map(move |i| {
let flag = if i == octets_needed - 1 { 0 } else { 1 << 7 };
((id >> (7 * (octets_needed - 1 - i))) & 0b111_1111) as u8 | flag
})
})
}
impl<'a> Oid<'a> {
/// Create an OID from the ASN.1 DER encoded form. See the [module documentation](index.html)
/// for other ways to create oids.
pub const fn new(asn1: Cow<'a, [u8]>) -> Oid {
Oid {
asn1,
relative: false,
}
}
/// Create a relative OID from the ASN.1 DER encoded form. See the [module documentation](index.html)
/// for other ways to create relative oids.
pub const fn new_relative(asn1: Cow<'a, [u8]>) -> Oid {
Oid {
asn1,
relative: true,
}
}
/// Build an OID from an array of object identifier components.
/// This method allocates memory on the heap.
pub fn from(s: &[u64]) -> core::result::Result<Oid<'static>, OidParseError> {
if s.len() < 2 {
if s.len() == 1 && s[0] == 0 {
return Ok(Oid {
asn1: Cow::Borrowed(&[0]),
relative: false,
});
}
return Err(OidParseError::TooShort);
}
if s[0] >= 7 || s[1] >= 40 {
return Err(OidParseError::FirstComponentsTooLarge);
}
let asn1_encoded: Vec<u8> = [(s[0] * 40 + s[1]) as u8]
.iter()
.copied()
.chain(encode_relative(&s[2..]))
.collect();
Ok(Oid {
asn1: Cow::from(asn1_encoded),
relative: false,
})
}
/// Build a relative OID from an array of object identifier components.
pub fn from_relative(s: &[u64]) -> core::result::Result<Oid<'static>, OidParseError> {
if s.is_empty() {
return Err(OidParseError::TooShort);
}
let asn1_encoded: Vec<u8> = encode_relative(s).collect();
Ok(Oid {
asn1: Cow::from(asn1_encoded),
relative: true,
})
}
/// Create a deep copy of the oid.
///
/// This method allocates data on the heap. The returned oid
/// can be used without keeping the ASN.1 representation around.
///
/// Cloning the returned oid does again allocate data.
pub fn to_owned(&self) -> Oid<'static> {
Oid {
asn1: Cow::from(self.asn1.to_vec()),
relative: self.relative,
}
}
/// Get the encoded oid without the header.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
self.asn1.as_ref()
}
/// Get the encoded oid without the header.
#[deprecated(since = "0.2.0", note = "Use `as_bytes` instead")]
#[inline]
pub fn bytes(&self) -> &[u8] {
self.as_bytes()
}
/// Get the bytes representation of the encoded oid
pub fn into_cow(self) -> Cow<'a, [u8]> {
self.asn1
}
/// Convert the OID to a string representation.
/// The string contains the IDs separated by dots, for ex: "1.2.840.113549.1.1.5"
#[cfg(feature = "bigint")]
pub fn to_id_string(&self) -> String {
let ints: Vec<String> = self.iter_bigint().map(|i| i.to_string()).collect();
ints.join(".")
}
#[cfg(not(feature = "bigint"))]
/// Convert the OID to a string representation.
///
/// If every arc fits into a u64 a string like "1.2.840.113549.1.1.5"
/// is returned, otherwise a hex representation.
///
/// See also the "bigint" feature of this crate.
pub fn to_id_string(&self) -> String {
if let Some(arcs) = self.iter() {
let ints: Vec<String> = arcs.map(|i| i.to_string()).collect();
ints.join(".")
} else {
let mut ret = String::with_capacity(self.asn1.len() * 3);
for (i, o) in self.asn1.iter().enumerate() {
ret.push_str(&format!("{:02x}", o));
if i + 1 != self.asn1.len() {
ret.push(' ');
}
}
ret
}
}
/// Return an iterator over the sub-identifiers (arcs).
#[cfg(feature = "bigint")]
pub fn iter_bigint(
&'_ self,
) -> impl Iterator<Item = BigUint> + FusedIterator + ExactSizeIterator + '_ {
SubIdentifierIterator {
oid: self,
pos: 0,
first: false,
n: PhantomData,
}
}
/// Return an iterator over the sub-identifiers (arcs).
/// Returns `None` if at least one arc does not fit into `u64`.
pub fn iter(
&'_ self,
) -> Option<impl Iterator<Item = u64> + FusedIterator + ExactSizeIterator + '_> {
// Check that every arc fits into u64
let bytes = if self.relative {
&self.asn1
} else if self.asn1.is_empty() {
&[]
} else {
&self.asn1[1..]
};
let max_bits = bytes
.iter()
.fold((0usize, 0usize), |(max, cur), c| {
let is_end = (c >> 7) == 0u8;
if is_end {
(max.max(cur + 7), 0)
} else {
(max, cur + 7)
}
})
.0;
if max_bits > 64 {
return None;
}
Some(SubIdentifierIterator {
oid: self,
pos: 0,
first: false,
n: PhantomData,
})
}
pub fn from_ber_relative(bytes: &'a [u8]) -> ParseResult<'a, Self> {
let (rem, any) = Any::from_ber(bytes)?;
any.header.assert_primitive()?;
any.header.assert_tag(Tag::RelativeOid)?;
let asn1 = Cow::Borrowed(any.data);
Ok((rem, Oid::new_relative(asn1)))
}
pub fn from_der_relative(bytes: &'a [u8]) -> ParseResult<'a, Self> {
let (rem, any) = Any::from_der(bytes)?;
any.header.assert_tag(Tag::RelativeOid)?;
Self::check_constraints(&any)?;
let asn1 = Cow::Borrowed(any.data);
Ok((rem, Oid::new_relative(asn1)))
}
/// Returns true if `needle` is a prefix of the OID.
pub fn starts_with(&self, needle: &Oid) -> bool {
self.asn1.len() >= needle.asn1.len() && self.asn1.starts_with(needle.as_bytes())
}
}
trait Repr: Num + Shl<usize, Output = Self> + From<u8> {}
impl<N> Repr for N where N: Num + Shl<usize, Output = N> + From<u8> {}
struct SubIdentifierIterator<'a, N: Repr> {
oid: &'a Oid<'a>,
pos: usize,
first: bool,
n: PhantomData<&'a N>,
}
impl<'a, N: Repr> Iterator for SubIdentifierIterator<'a, N> {
type Item = N;
fn next(&mut self) -> Option<Self::Item> {
use num_traits::identities::Zero;
if self.pos == self.oid.asn1.len() {
return None;
}
if !self.oid.relative {
if !self.first {
debug_assert!(self.pos == 0);
self.first = true;
return Some((self.oid.asn1[0] / 40).into());
} else if self.pos == 0 {
self.pos += 1;
if self.oid.asn1[0] == 0 && self.oid.asn1.len() == 1 {
return None;
}
return Some((self.oid.asn1[0] % 40).into());
}
}
// decode objet sub-identifier according to the asn.1 standard
let mut res = <N as Zero>::zero();
for o in self.oid.asn1[self.pos..].iter() {
self.pos += 1;
res = (res << 7) + (o & 0b111_1111).into();
let flag = o >> 7;
if flag == 0u8 {
break;
}
}
Some(res)
}
}
impl<'a, N: Repr> FusedIterator for SubIdentifierIterator<'a, N> {}
impl<'a, N: Repr> ExactSizeIterator for SubIdentifierIterator<'a, N> {
fn len(&self) -> usize {
if self.oid.relative {
self.oid.asn1.iter().filter(|o| (*o >> 7) == 0u8).count()
} else if self.oid.asn1.len() == 0 {
0
} else if self.oid.asn1.len() == 1 {
if self.oid.asn1[0] == 0 {
1
} else {
2
}
} else {
2 + self.oid.asn1[2..]
.iter()
.filter(|o| (*o >> 7) == 0u8)
.count()
}
}
#[cfg(feature = "exact_size_is_empty")]
fn is_empty(&self) -> bool {
self.oid.asn1.is_empty()
}
}
impl<'a> fmt::Display for Oid<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.relative {
f.write_str("rel. ")?;
}
f.write_str(&self.to_id_string())
}
}
impl<'a> fmt::Debug for Oid<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("OID(")?;
<Oid as fmt::Display>::fmt(self, f)?;
f.write_str(")")
}
}
impl<'a> FromStr for Oid<'a> {
type Err = OidParseError;
fn from_str(s: &str) -> core::result::Result<Self, Self::Err> {
let v: core::result::Result<Vec<_>, _> = s.split('.').map(|c| c.parse::<u64>()).collect();
v.map_err(|_| OidParseError::ParseIntError)
.and_then(|v| Oid::from(&v))
}
}
/// Helper macro to declare integers at compile-time
///
/// Since the DER encoded oids are not very readable we provide a
/// procedural macro `oid!`. The macro can be used the following ways:
///
/// - `oid!(1.4.42.23)`: Create a const expression for the corresponding `Oid<'static>`
/// - `oid!(rel 42.23)`: Create a const expression for the corresponding relative `Oid<'static>`
/// - `oid!(raw 1.4.42.23)`/`oid!(raw rel 42.23)`: Obtain the DER encoded form as a byte array.
///
/// # Comparing oids
///
/// Comparing a parsed oid to a static oid is probably the most common
/// thing done with oids in your code. The `oid!` macro can be used in expression positions for
/// this purpose. For example
/// ```
/// use asn1_rs::{oid, Oid};
///
/// # let some_oid: Oid<'static> = oid!(1.2.456);
/// const SOME_STATIC_OID: Oid<'static> = oid!(1.2.456);
/// assert_eq!(some_oid, SOME_STATIC_OID)
/// ```
/// To get a relative Oid use `oid!(rel 1.2)`.
///
/// Because of limitations for procedural macros ([rust issue](https://github.com/rust-lang/rust/issues/54727))
/// and constants used in patterns ([rust issue](https://github.com/rust-lang/rust/issues/31434))
/// the `oid` macro can not directly be used in patterns, also not through constants.
/// You can do this, though:
/// ```
/// # use asn1_rs::{oid, Oid};
/// # let some_oid: Oid<'static> = oid!(1.2.456);
/// const SOME_OID: Oid<'static> = oid!(1.2.456);
/// if some_oid == SOME_OID || some_oid == oid!(1.2.456) {
/// println!("match");
/// }
///
/// // Alternatively, compare the DER encoded form directly:
/// const SOME_OID_RAW: &[u8] = &oid!(raw 1.2.456);
/// match some_oid.as_bytes() {
/// SOME_OID_RAW => println!("match"),
/// _ => panic!("no match"),
/// }
/// ```
/// *Attention*, be aware that the latter version might not handle the case of a relative oid correctly. An
/// extra check might be necessary.
#[macro_export]
macro_rules! oid {
(raw $( $item:literal ).*) => {
$crate::exports::asn1_rs_impl::encode_oid!( $( $item ).* )
};
(raw $items:expr) => {
$crate::exports::asn1_rs_impl::encode_oid!($items)
};
(rel $($item:literal ).*) => {
$crate::Oid::new_relative($crate::exports::borrow::Cow::Borrowed(
&$crate::exports::asn1_rs_impl::encode_oid!(rel $( $item ).*),
))
};
($($item:literal ).*) => {
$crate::Oid::new($crate::exports::borrow::Cow::Borrowed(
&$crate::oid!(raw $( $item ).*),
))
};
}
#[cfg(test)]
mod tests {
use crate::{FromDer, Oid, ToDer};
use hex_literal::hex;
#[test]
fn declare_oid() {
let oid = super::oid! {1.2.840.113549.1};
assert_eq!(oid.to_string(), "1.2.840.113549.1");
}
const OID_RSA_ENCRYPTION: &[u8] = &oid! {raw 1.2.840.113549.1.1.1};
const OID_EC_PUBLIC_KEY: &[u8] = &oid! {raw 1.2.840.10045.2.1};
#[allow(clippy::match_like_matches_macro)]
fn compare_oid(oid: &Oid) -> bool {
match oid.as_bytes() {
OID_RSA_ENCRYPTION => true,
OID_EC_PUBLIC_KEY => true,
_ => false,
}
}
#[test]
fn test_compare_oid() {
let oid = Oid::from(&[1, 2, 840, 113_549, 1, 1, 1]).unwrap();
assert_eq!(oid, oid! {1.2.840.113549.1.1.1});
let oid = Oid::from(&[1, 2, 840, 113_549, 1, 1, 1]).unwrap();
assert!(compare_oid(&oid));
}
#[test]
fn oid_to_der() {
let oid = super::oid! {1.2.840.113549.1};
assert_eq!(oid.to_der_len(), Ok(9));
let v = oid.to_der_vec().expect("could not serialize");
assert_eq!(&v, &hex! {"06 07 2a 86 48 86 f7 0d 01"});
let (_, oid2) = Oid::from_der(&v).expect("could not re-parse");
assert_eq!(&oid, &oid2);
}
#[test]
fn oid_starts_with() {
const OID_RSA_ENCRYPTION: Oid = oid! {1.2.840.113549.1.1.1};
const OID_EC_PUBLIC_KEY: Oid = oid! {1.2.840.10045.2.1};
let oid = super::oid! {1.2.840.113549.1};
assert!(OID_RSA_ENCRYPTION.starts_with(&oid));
assert!(!OID_EC_PUBLIC_KEY.starts_with(&oid));
}
#[test]
fn oid_macro_parameters() {
// Code inspired from https://github.com/rusticata/der-parser/issues/68
macro_rules! foo {
($a:literal $b:literal $c:literal) => {
super::oid!($a.$b.$c)
};
}
let oid = foo!(1 2 3);
assert_eq!(oid, oid! {1.2.3});
}
} | impl DerAutoDerive for Oid<'_> {}
impl<'a> Tagged for Oid<'a> {
const TAG: Tag = Tag::Oid; | random_line_split |
oid.rs | use crate::*;
use alloc::borrow::Cow;
#[cfg(not(feature = "std"))]
use alloc::format;
use alloc::string::{String, ToString};
use alloc::vec::Vec;
use core::{
convert::TryFrom, fmt, iter::FusedIterator, marker::PhantomData, ops::Shl, str::FromStr,
};
#[cfg(feature = "bigint")]
use num_bigint::BigUint;
use num_traits::Num;
/// An error for OID parsing functions.
#[derive(Debug)]
pub enum OidParseError {
TooShort,
/// Signalizes that the first or second component is too large.
/// The first must be within the range 0 to 6 (inclusive).
/// The second component must be less than 40.
FirstComponentsTooLarge,
ParseIntError,
}
/// Object ID (OID) representation which can be relative or non-relative.
/// An example for an OID in string representation is `"1.2.840.113549.1.1.5"`.
///
/// For non-relative OIDs restrictions apply to the first two components.
///
/// This library contains a procedural macro `oid` which can be used to
/// create oids. For example `oid!(1.2.44.233)` or `oid!(rel 44.233)`
/// for relative oids. See the [module documentation](index.html) for more information.
#[derive(Hash, PartialEq, Eq, Clone)]
pub struct Oid<'a> {
asn1: Cow<'a, [u8]>,
relative: bool,
}
impl<'a> TryFrom<Any<'a>> for Oid<'a> {
type Error = Error;
fn try_from(any: Any<'a>) -> Result<Self> {
TryFrom::try_from(&any)
}
}
impl<'a, 'b> TryFrom<&'b Any<'a>> for Oid<'a> {
type Error = Error;
fn try_from(any: &'b Any<'a>) -> Result<Self> {
// check that any.data.last().unwrap() >> 7 == 0u8
let asn1 = Cow::Borrowed(any.data);
Ok(Oid::new(asn1))
}
}
impl<'a> CheckDerConstraints for Oid<'a> {
fn check_constraints(any: &Any) -> Result<()> {
any.header.assert_primitive()?;
any.header.length.assert_definite()?;
Ok(())
}
}
impl DerAutoDerive for Oid<'_> {}
impl<'a> Tagged for Oid<'a> {
const TAG: Tag = Tag::Oid;
}
#[cfg(feature = "std")]
impl ToDer for Oid<'_> {
fn to_der_len(&self) -> Result<usize> {
// OID/REL-OID tag will not change header size, so we don't care here
let header = Header::new(
Class::Universal,
false,
Self::TAG,
Length::Definite(self.asn1.len()),
);
Ok(header.to_der_len()? + self.asn1.len())
}
fn write_der_header(&self, writer: &mut dyn std::io::Write) -> SerializeResult<usize> {
let tag = if self.relative {
Tag::RelativeOid
} else {
Tag::Oid
};
let header = Header::new(
Class::Universal,
false,
tag,
Length::Definite(self.asn1.len()),
);
header.write_der_header(writer).map_err(Into::into)
}
fn write_der_content(&self, writer: &mut dyn std::io::Write) -> SerializeResult<usize> {
writer.write(&self.asn1).map_err(Into::into)
}
}
fn encode_relative(ids: &'_ [u64]) -> impl Iterator<Item = u8> + '_ {
ids.iter().flat_map(|id| {
let bit_count = 64 - id.leading_zeros();
let octets_needed = ((bit_count + 6) / 7).max(1);
(0..octets_needed).map(move |i| {
let flag = if i == octets_needed - 1 { 0 } else { 1 << 7 };
((id >> (7 * (octets_needed - 1 - i))) & 0b111_1111) as u8 | flag
})
})
}
impl<'a> Oid<'a> {
/// Create an OID from the ASN.1 DER encoded form. See the [module documentation](index.html)
/// for other ways to create oids.
pub const fn new(asn1: Cow<'a, [u8]>) -> Oid {
Oid {
asn1,
relative: false,
}
}
/// Create a relative OID from the ASN.1 DER encoded form. See the [module documentation](index.html)
/// for other ways to create relative oids.
pub const fn new_relative(asn1: Cow<'a, [u8]>) -> Oid {
Oid {
asn1,
relative: true,
}
}
/// Build an OID from an array of object identifier components.
/// This method allocates memory on the heap.
pub fn from(s: &[u64]) -> core::result::Result<Oid<'static>, OidParseError> {
if s.len() < 2 {
if s.len() == 1 && s[0] == 0 {
return Ok(Oid {
asn1: Cow::Borrowed(&[0]),
relative: false,
});
}
return Err(OidParseError::TooShort);
}
if s[0] >= 7 || s[1] >= 40 {
return Err(OidParseError::FirstComponentsTooLarge);
}
let asn1_encoded: Vec<u8> = [(s[0] * 40 + s[1]) as u8]
.iter()
.copied()
.chain(encode_relative(&s[2..]))
.collect();
Ok(Oid {
asn1: Cow::from(asn1_encoded),
relative: false,
})
}
/// Build a relative OID from an array of object identifier components.
pub fn from_relative(s: &[u64]) -> core::result::Result<Oid<'static>, OidParseError> {
if s.is_empty() {
return Err(OidParseError::TooShort);
}
let asn1_encoded: Vec<u8> = encode_relative(s).collect();
Ok(Oid {
asn1: Cow::from(asn1_encoded),
relative: true,
})
}
/// Create a deep copy of the oid.
///
/// This method allocates data on the heap. The returned oid
/// can be used without keeping the ASN.1 representation around.
///
/// Cloning the returned oid does again allocate data.
pub fn to_owned(&self) -> Oid<'static> {
Oid {
asn1: Cow::from(self.asn1.to_vec()),
relative: self.relative,
}
}
/// Get the encoded oid without the header.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
self.asn1.as_ref()
}
/// Get the encoded oid without the header.
#[deprecated(since = "0.2.0", note = "Use `as_bytes` instead")]
#[inline]
pub fn bytes(&self) -> &[u8] {
self.as_bytes()
}
/// Get the bytes representation of the encoded oid
pub fn into_cow(self) -> Cow<'a, [u8]> {
self.asn1
}
/// Convert the OID to a string representation.
/// The string contains the IDs separated by dots, for ex: "1.2.840.113549.1.1.5"
#[cfg(feature = "bigint")]
pub fn to_id_string(&self) -> String {
let ints: Vec<String> = self.iter_bigint().map(|i| i.to_string()).collect();
ints.join(".")
}
#[cfg(not(feature = "bigint"))]
/// Convert the OID to a string representation.
///
/// If every arc fits into a u64 a string like "1.2.840.113549.1.1.5"
/// is returned, otherwise a hex representation.
///
/// See also the "bigint" feature of this crate.
pub fn to_id_string(&self) -> String {
if let Some(arcs) = self.iter() {
let ints: Vec<String> = arcs.map(|i| i.to_string()).collect();
ints.join(".")
} else {
let mut ret = String::with_capacity(self.asn1.len() * 3);
for (i, o) in self.asn1.iter().enumerate() {
ret.push_str(&format!("{:02x}", o));
if i + 1 != self.asn1.len() {
ret.push(' ');
}
}
ret
}
}
/// Return an iterator over the sub-identifiers (arcs).
#[cfg(feature = "bigint")]
pub fn iter_bigint(
&'_ self,
) -> impl Iterator<Item = BigUint> + FusedIterator + ExactSizeIterator + '_ {
SubIdentifierIterator {
oid: self,
pos: 0,
first: false,
n: PhantomData,
}
}
/// Return an iterator over the sub-identifiers (arcs).
/// Returns `None` if at least one arc does not fit into `u64`.
pub fn iter(
&'_ self,
) -> Option<impl Iterator<Item = u64> + FusedIterator + ExactSizeIterator + '_> |
pub fn from_ber_relative(bytes: &'a [u8]) -> ParseResult<'a, Self> {
let (rem, any) = Any::from_ber(bytes)?;
any.header.assert_primitive()?;
any.header.assert_tag(Tag::RelativeOid)?;
let asn1 = Cow::Borrowed(any.data);
Ok((rem, Oid::new_relative(asn1)))
}
pub fn from_der_relative(bytes: &'a [u8]) -> ParseResult<'a, Self> {
let (rem, any) = Any::from_der(bytes)?;
any.header.assert_tag(Tag::RelativeOid)?;
Self::check_constraints(&any)?;
let asn1 = Cow::Borrowed(any.data);
Ok((rem, Oid::new_relative(asn1)))
}
/// Returns true if `needle` is a prefix of the OID.
pub fn starts_with(&self, needle: &Oid) -> bool {
self.asn1.len() >= needle.asn1.len() && self.asn1.starts_with(needle.as_bytes())
}
}
trait Repr: Num + Shl<usize, Output = Self> + From<u8> {}
impl<N> Repr for N where N: Num + Shl<usize, Output = N> + From<u8> {}
struct SubIdentifierIterator<'a, N: Repr> {
oid: &'a Oid<'a>,
pos: usize,
first: bool,
n: PhantomData<&'a N>,
}
impl<'a, N: Repr> Iterator for SubIdentifierIterator<'a, N> {
type Item = N;
fn next(&mut self) -> Option<Self::Item> {
use num_traits::identities::Zero;
if self.pos == self.oid.asn1.len() {
return None;
}
if !self.oid.relative {
if !self.first {
debug_assert!(self.pos == 0);
self.first = true;
return Some((self.oid.asn1[0] / 40).into());
} else if self.pos == 0 {
self.pos += 1;
if self.oid.asn1[0] == 0 && self.oid.asn1.len() == 1 {
return None;
}
return Some((self.oid.asn1[0] % 40).into());
}
}
// decode objet sub-identifier according to the asn.1 standard
let mut res = <N as Zero>::zero();
for o in self.oid.asn1[self.pos..].iter() {
self.pos += 1;
res = (res << 7) + (o & 0b111_1111).into();
let flag = o >> 7;
if flag == 0u8 {
break;
}
}
Some(res)
}
}
impl<'a, N: Repr> FusedIterator for SubIdentifierIterator<'a, N> {}
impl<'a, N: Repr> ExactSizeIterator for SubIdentifierIterator<'a, N> {
fn len(&self) -> usize {
if self.oid.relative {
self.oid.asn1.iter().filter(|o| (*o >> 7) == 0u8).count()
} else if self.oid.asn1.len() == 0 {
0
} else if self.oid.asn1.len() == 1 {
if self.oid.asn1[0] == 0 {
1
} else {
2
}
} else {
2 + self.oid.asn1[2..]
.iter()
.filter(|o| (*o >> 7) == 0u8)
.count()
}
}
#[cfg(feature = "exact_size_is_empty")]
fn is_empty(&self) -> bool {
self.oid.asn1.is_empty()
}
}
impl<'a> fmt::Display for Oid<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.relative {
f.write_str("rel. ")?;
}
f.write_str(&self.to_id_string())
}
}
impl<'a> fmt::Debug for Oid<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("OID(")?;
<Oid as fmt::Display>::fmt(self, f)?;
f.write_str(")")
}
}
impl<'a> FromStr for Oid<'a> {
type Err = OidParseError;
fn from_str(s: &str) -> core::result::Result<Self, Self::Err> {
let v: core::result::Result<Vec<_>, _> = s.split('.').map(|c| c.parse::<u64>()).collect();
v.map_err(|_| OidParseError::ParseIntError)
.and_then(|v| Oid::from(&v))
}
}
/// Helper macro to declare integers at compile-time
///
/// Since the DER encoded oids are not very readable we provide a
/// procedural macro `oid!`. The macro can be used the following ways:
///
/// - `oid!(1.4.42.23)`: Create a const expression for the corresponding `Oid<'static>`
/// - `oid!(rel 42.23)`: Create a const expression for the corresponding relative `Oid<'static>`
/// - `oid!(raw 1.4.42.23)`/`oid!(raw rel 42.23)`: Obtain the DER encoded form as a byte array.
///
/// # Comparing oids
///
/// Comparing a parsed oid to a static oid is probably the most common
/// thing done with oids in your code. The `oid!` macro can be used in expression positions for
/// this purpose. For example
/// ```
/// use asn1_rs::{oid, Oid};
///
/// # let some_oid: Oid<'static> = oid!(1.2.456);
/// const SOME_STATIC_OID: Oid<'static> = oid!(1.2.456);
/// assert_eq!(some_oid, SOME_STATIC_OID)
/// ```
/// To get a relative Oid use `oid!(rel 1.2)`.
///
/// Because of limitations for procedural macros ([rust issue](https://github.com/rust-lang/rust/issues/54727))
/// and constants used in patterns ([rust issue](https://github.com/rust-lang/rust/issues/31434))
/// the `oid` macro can not directly be used in patterns, also not through constants.
/// You can do this, though:
/// ```
/// # use asn1_rs::{oid, Oid};
/// # let some_oid: Oid<'static> = oid!(1.2.456);
/// const SOME_OID: Oid<'static> = oid!(1.2.456);
/// if some_oid == SOME_OID || some_oid == oid!(1.2.456) {
/// println!("match");
/// }
///
/// // Alternatively, compare the DER encoded form directly:
/// const SOME_OID_RAW: &[u8] = &oid!(raw 1.2.456);
/// match some_oid.as_bytes() {
/// SOME_OID_RAW => println!("match"),
/// _ => panic!("no match"),
/// }
/// ```
/// *Attention*, be aware that the latter version might not handle the case of a relative oid correctly. An
/// extra check might be necessary.
#[macro_export]
macro_rules! oid {
(raw $( $item:literal ).*) => {
$crate::exports::asn1_rs_impl::encode_oid!( $( $item ).* )
};
(raw $items:expr) => {
$crate::exports::asn1_rs_impl::encode_oid!($items)
};
(rel $($item:literal ).*) => {
$crate::Oid::new_relative($crate::exports::borrow::Cow::Borrowed(
&$crate::exports::asn1_rs_impl::encode_oid!(rel $( $item ).*),
))
};
($($item:literal ).*) => {
$crate::Oid::new($crate::exports::borrow::Cow::Borrowed(
&$crate::oid!(raw $( $item ).*),
))
};
}
#[cfg(test)]
mod tests {
use crate::{FromDer, Oid, ToDer};
use hex_literal::hex;
#[test]
fn declare_oid() {
let oid = super::oid! {1.2.840.113549.1};
assert_eq!(oid.to_string(), "1.2.840.113549.1");
}
const OID_RSA_ENCRYPTION: &[u8] = &oid! {raw 1.2.840.113549.1.1.1};
const OID_EC_PUBLIC_KEY: &[u8] = &oid! {raw 1.2.840.10045.2.1};
#[allow(clippy::match_like_matches_macro)]
fn compare_oid(oid: &Oid) -> bool {
match oid.as_bytes() {
OID_RSA_ENCRYPTION => true,
OID_EC_PUBLIC_KEY => true,
_ => false,
}
}
#[test]
fn test_compare_oid() {
let oid = Oid::from(&[1, 2, 840, 113_549, 1, 1, 1]).unwrap();
assert_eq!(oid, oid! {1.2.840.113549.1.1.1});
let oid = Oid::from(&[1, 2, 840, 113_549, 1, 1, 1]).unwrap();
assert!(compare_oid(&oid));
}
#[test]
fn oid_to_der() {
let oid = super::oid! {1.2.840.113549.1};
assert_eq!(oid.to_der_len(), Ok(9));
let v = oid.to_der_vec().expect("could not serialize");
assert_eq!(&v, &hex! {"06 07 2a 86 48 86 f7 0d 01"});
let (_, oid2) = Oid::from_der(&v).expect("could not re-parse");
assert_eq!(&oid, &oid2);
}
#[test]
fn oid_starts_with() {
const OID_RSA_ENCRYPTION: Oid = oid! {1.2.840.113549.1.1.1};
const OID_EC_PUBLIC_KEY: Oid = oid! {1.2.840.10045.2.1};
let oid = super::oid! {1.2.840.113549.1};
assert!(OID_RSA_ENCRYPTION.starts_with(&oid));
assert!(!OID_EC_PUBLIC_KEY.starts_with(&oid));
}
#[test]
fn oid_macro_parameters() {
// Code inspired from https://github.com/rusticata/der-parser/issues/68
macro_rules! foo {
($a:literal $b:literal $c:literal) => {
super::oid!($a.$b.$c)
};
}
let oid = foo!(1 2 3);
assert_eq!(oid, oid! {1.2.3});
}
}
| {
// Check that every arc fits into u64
let bytes = if self.relative {
&self.asn1
} else if self.asn1.is_empty() {
&[]
} else {
&self.asn1[1..]
};
let max_bits = bytes
.iter()
.fold((0usize, 0usize), |(max, cur), c| {
let is_end = (c >> 7) == 0u8;
if is_end {
(max.max(cur + 7), 0)
} else {
(max, cur + 7)
}
})
.0;
if max_bits > 64 {
return None;
}
Some(SubIdentifierIterator {
oid: self,
pos: 0,
first: false,
n: PhantomData,
})
} | identifier_body |
oid.rs | use crate::*;
use alloc::borrow::Cow;
#[cfg(not(feature = "std"))]
use alloc::format;
use alloc::string::{String, ToString};
use alloc::vec::Vec;
use core::{
convert::TryFrom, fmt, iter::FusedIterator, marker::PhantomData, ops::Shl, str::FromStr,
};
#[cfg(feature = "bigint")]
use num_bigint::BigUint;
use num_traits::Num;
/// An error for OID parsing functions.
#[derive(Debug)]
pub enum OidParseError {
TooShort,
/// Signalizes that the first or second component is too large.
/// The first must be within the range 0 to 6 (inclusive).
/// The second component must be less than 40.
FirstComponentsTooLarge,
ParseIntError,
}
/// Object ID (OID) representation which can be relative or non-relative.
/// An example for an OID in string representation is `"1.2.840.113549.1.1.5"`.
///
/// For non-relative OIDs restrictions apply to the first two components.
///
/// This library contains a procedural macro `oid` which can be used to
/// create oids. For example `oid!(1.2.44.233)` or `oid!(rel 44.233)`
/// for relative oids. See the [module documentation](index.html) for more information.
#[derive(Hash, PartialEq, Eq, Clone)]
pub struct Oid<'a> {
asn1: Cow<'a, [u8]>,
relative: bool,
}
impl<'a> TryFrom<Any<'a>> for Oid<'a> {
type Error = Error;
fn try_from(any: Any<'a>) -> Result<Self> {
TryFrom::try_from(&any)
}
}
impl<'a, 'b> TryFrom<&'b Any<'a>> for Oid<'a> {
type Error = Error;
fn try_from(any: &'b Any<'a>) -> Result<Self> {
// check that any.data.last().unwrap() >> 7 == 0u8
let asn1 = Cow::Borrowed(any.data);
Ok(Oid::new(asn1))
}
}
impl<'a> CheckDerConstraints for Oid<'a> {
fn check_constraints(any: &Any) -> Result<()> {
any.header.assert_primitive()?;
any.header.length.assert_definite()?;
Ok(())
}
}
impl DerAutoDerive for Oid<'_> {}
impl<'a> Tagged for Oid<'a> {
const TAG: Tag = Tag::Oid;
}
#[cfg(feature = "std")]
impl ToDer for Oid<'_> {
fn to_der_len(&self) -> Result<usize> {
// OID/REL-OID tag will not change header size, so we don't care here
let header = Header::new(
Class::Universal,
false,
Self::TAG,
Length::Definite(self.asn1.len()),
);
Ok(header.to_der_len()? + self.asn1.len())
}
fn write_der_header(&self, writer: &mut dyn std::io::Write) -> SerializeResult<usize> {
let tag = if self.relative {
Tag::RelativeOid
} else {
Tag::Oid
};
let header = Header::new(
Class::Universal,
false,
tag,
Length::Definite(self.asn1.len()),
);
header.write_der_header(writer).map_err(Into::into)
}
fn write_der_content(&self, writer: &mut dyn std::io::Write) -> SerializeResult<usize> {
writer.write(&self.asn1).map_err(Into::into)
}
}
fn encode_relative(ids: &'_ [u64]) -> impl Iterator<Item = u8> + '_ {
ids.iter().flat_map(|id| {
let bit_count = 64 - id.leading_zeros();
let octets_needed = ((bit_count + 6) / 7).max(1);
(0..octets_needed).map(move |i| {
let flag = if i == octets_needed - 1 { 0 } else { 1 << 7 };
((id >> (7 * (octets_needed - 1 - i))) & 0b111_1111) as u8 | flag
})
})
}
impl<'a> Oid<'a> {
/// Create an OID from the ASN.1 DER encoded form. See the [module documentation](index.html)
/// for other ways to create oids.
pub const fn new(asn1: Cow<'a, [u8]>) -> Oid {
Oid {
asn1,
relative: false,
}
}
/// Create a relative OID from the ASN.1 DER encoded form. See the [module documentation](index.html)
/// for other ways to create relative oids.
pub const fn new_relative(asn1: Cow<'a, [u8]>) -> Oid {
Oid {
asn1,
relative: true,
}
}
/// Build an OID from an array of object identifier components.
/// This method allocates memory on the heap.
pub fn from(s: &[u64]) -> core::result::Result<Oid<'static>, OidParseError> {
if s.len() < 2 {
if s.len() == 1 && s[0] == 0 {
return Ok(Oid {
asn1: Cow::Borrowed(&[0]),
relative: false,
});
}
return Err(OidParseError::TooShort);
}
if s[0] >= 7 || s[1] >= 40 {
return Err(OidParseError::FirstComponentsTooLarge);
}
let asn1_encoded: Vec<u8> = [(s[0] * 40 + s[1]) as u8]
.iter()
.copied()
.chain(encode_relative(&s[2..]))
.collect();
Ok(Oid {
asn1: Cow::from(asn1_encoded),
relative: false,
})
}
/// Build a relative OID from an array of object identifier components.
pub fn from_relative(s: &[u64]) -> core::result::Result<Oid<'static>, OidParseError> {
if s.is_empty() {
return Err(OidParseError::TooShort);
}
let asn1_encoded: Vec<u8> = encode_relative(s).collect();
Ok(Oid {
asn1: Cow::from(asn1_encoded),
relative: true,
})
}
/// Create a deep copy of the oid.
///
/// This method allocates data on the heap. The returned oid
/// can be used without keeping the ASN.1 representation around.
///
/// Cloning the returned oid does again allocate data.
pub fn to_owned(&self) -> Oid<'static> {
Oid {
asn1: Cow::from(self.asn1.to_vec()),
relative: self.relative,
}
}
/// Get the encoded oid without the header.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
self.asn1.as_ref()
}
/// Get the encoded oid without the header.
#[deprecated(since = "0.2.0", note = "Use `as_bytes` instead")]
#[inline]
pub fn bytes(&self) -> &[u8] {
self.as_bytes()
}
/// Get the bytes representation of the encoded oid
pub fn into_cow(self) -> Cow<'a, [u8]> {
self.asn1
}
/// Convert the OID to a string representation.
/// The string contains the IDs separated by dots, for ex: "1.2.840.113549.1.1.5"
#[cfg(feature = "bigint")]
pub fn to_id_string(&self) -> String {
let ints: Vec<String> = self.iter_bigint().map(|i| i.to_string()).collect();
ints.join(".")
}
#[cfg(not(feature = "bigint"))]
/// Convert the OID to a string representation.
///
/// If every arc fits into a u64 a string like "1.2.840.113549.1.1.5"
/// is returned, otherwise a hex representation.
///
/// See also the "bigint" feature of this crate.
pub fn to_id_string(&self) -> String {
if let Some(arcs) = self.iter() {
let ints: Vec<String> = arcs.map(|i| i.to_string()).collect();
ints.join(".")
} else {
let mut ret = String::with_capacity(self.asn1.len() * 3);
for (i, o) in self.asn1.iter().enumerate() {
ret.push_str(&format!("{:02x}", o));
if i + 1 != self.asn1.len() {
ret.push(' ');
}
}
ret
}
}
/// Return an iterator over the sub-identifiers (arcs).
#[cfg(feature = "bigint")]
pub fn iter_bigint(
&'_ self,
) -> impl Iterator<Item = BigUint> + FusedIterator + ExactSizeIterator + '_ {
SubIdentifierIterator {
oid: self,
pos: 0,
first: false,
n: PhantomData,
}
}
/// Return an iterator over the sub-identifiers (arcs).
/// Returns `None` if at least one arc does not fit into `u64`.
pub fn iter(
&'_ self,
) -> Option<impl Iterator<Item = u64> + FusedIterator + ExactSizeIterator + '_> {
// Check that every arc fits into u64
let bytes = if self.relative {
&self.asn1
} else if self.asn1.is_empty() {
&[]
} else {
&self.asn1[1..]
};
let max_bits = bytes
.iter()
.fold((0usize, 0usize), |(max, cur), c| {
let is_end = (c >> 7) == 0u8;
if is_end {
(max.max(cur + 7), 0)
} else {
(max, cur + 7)
}
})
.0;
if max_bits > 64 {
return None;
}
Some(SubIdentifierIterator {
oid: self,
pos: 0,
first: false,
n: PhantomData,
})
}
pub fn from_ber_relative(bytes: &'a [u8]) -> ParseResult<'a, Self> {
let (rem, any) = Any::from_ber(bytes)?;
any.header.assert_primitive()?;
any.header.assert_tag(Tag::RelativeOid)?;
let asn1 = Cow::Borrowed(any.data);
Ok((rem, Oid::new_relative(asn1)))
}
pub fn from_der_relative(bytes: &'a [u8]) -> ParseResult<'a, Self> {
let (rem, any) = Any::from_der(bytes)?;
any.header.assert_tag(Tag::RelativeOid)?;
Self::check_constraints(&any)?;
let asn1 = Cow::Borrowed(any.data);
Ok((rem, Oid::new_relative(asn1)))
}
/// Returns true if `needle` is a prefix of the OID.
pub fn starts_with(&self, needle: &Oid) -> bool {
self.asn1.len() >= needle.asn1.len() && self.asn1.starts_with(needle.as_bytes())
}
}
trait Repr: Num + Shl<usize, Output = Self> + From<u8> {}
impl<N> Repr for N where N: Num + Shl<usize, Output = N> + From<u8> {}
struct SubIdentifierIterator<'a, N: Repr> {
oid: &'a Oid<'a>,
pos: usize,
first: bool,
n: PhantomData<&'a N>,
}
impl<'a, N: Repr> Iterator for SubIdentifierIterator<'a, N> {
type Item = N;
fn next(&mut self) -> Option<Self::Item> {
use num_traits::identities::Zero;
if self.pos == self.oid.asn1.len() {
return None;
}
if !self.oid.relative {
if !self.first {
debug_assert!(self.pos == 0);
self.first = true;
return Some((self.oid.asn1[0] / 40).into());
} else if self.pos == 0 {
self.pos += 1;
if self.oid.asn1[0] == 0 && self.oid.asn1.len() == 1 {
return None;
}
return Some((self.oid.asn1[0] % 40).into());
}
}
// decode objet sub-identifier according to the asn.1 standard
let mut res = <N as Zero>::zero();
for o in self.oid.asn1[self.pos..].iter() {
self.pos += 1;
res = (res << 7) + (o & 0b111_1111).into();
let flag = o >> 7;
if flag == 0u8 {
break;
}
}
Some(res)
}
}
impl<'a, N: Repr> FusedIterator for SubIdentifierIterator<'a, N> {}
impl<'a, N: Repr> ExactSizeIterator for SubIdentifierIterator<'a, N> {
fn len(&self) -> usize {
if self.oid.relative {
self.oid.asn1.iter().filter(|o| (*o >> 7) == 0u8).count()
} else if self.oid.asn1.len() == 0 {
0
} else if self.oid.asn1.len() == 1 {
if self.oid.asn1[0] == 0 {
1
} else {
2
}
} else {
2 + self.oid.asn1[2..]
.iter()
.filter(|o| (*o >> 7) == 0u8)
.count()
}
}
#[cfg(feature = "exact_size_is_empty")]
fn is_empty(&self) -> bool {
self.oid.asn1.is_empty()
}
}
impl<'a> fmt::Display for Oid<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.relative {
f.write_str("rel. ")?;
}
f.write_str(&self.to_id_string())
}
}
impl<'a> fmt::Debug for Oid<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("OID(")?;
<Oid as fmt::Display>::fmt(self, f)?;
f.write_str(")")
}
}
impl<'a> FromStr for Oid<'a> {
type Err = OidParseError;
fn from_str(s: &str) -> core::result::Result<Self, Self::Err> {
let v: core::result::Result<Vec<_>, _> = s.split('.').map(|c| c.parse::<u64>()).collect();
v.map_err(|_| OidParseError::ParseIntError)
.and_then(|v| Oid::from(&v))
}
}
/// Helper macro to declare integers at compile-time
///
/// Since the DER encoded oids are not very readable we provide a
/// procedural macro `oid!`. The macro can be used the following ways:
///
/// - `oid!(1.4.42.23)`: Create a const expression for the corresponding `Oid<'static>`
/// - `oid!(rel 42.23)`: Create a const expression for the corresponding relative `Oid<'static>`
/// - `oid!(raw 1.4.42.23)`/`oid!(raw rel 42.23)`: Obtain the DER encoded form as a byte array.
///
/// # Comparing oids
///
/// Comparing a parsed oid to a static oid is probably the most common
/// thing done with oids in your code. The `oid!` macro can be used in expression positions for
/// this purpose. For example
/// ```
/// use asn1_rs::{oid, Oid};
///
/// # let some_oid: Oid<'static> = oid!(1.2.456);
/// const SOME_STATIC_OID: Oid<'static> = oid!(1.2.456);
/// assert_eq!(some_oid, SOME_STATIC_OID)
/// ```
/// To get a relative Oid use `oid!(rel 1.2)`.
///
/// Because of limitations for procedural macros ([rust issue](https://github.com/rust-lang/rust/issues/54727))
/// and constants used in patterns ([rust issue](https://github.com/rust-lang/rust/issues/31434))
/// the `oid` macro can not directly be used in patterns, also not through constants.
/// You can do this, though:
/// ```
/// # use asn1_rs::{oid, Oid};
/// # let some_oid: Oid<'static> = oid!(1.2.456);
/// const SOME_OID: Oid<'static> = oid!(1.2.456);
/// if some_oid == SOME_OID || some_oid == oid!(1.2.456) {
/// println!("match");
/// }
///
/// // Alternatively, compare the DER encoded form directly:
/// const SOME_OID_RAW: &[u8] = &oid!(raw 1.2.456);
/// match some_oid.as_bytes() {
/// SOME_OID_RAW => println!("match"),
/// _ => panic!("no match"),
/// }
/// ```
/// *Attention*, be aware that the latter version might not handle the case of a relative oid correctly. An
/// extra check might be necessary.
#[macro_export]
macro_rules! oid {
(raw $( $item:literal ).*) => {
$crate::exports::asn1_rs_impl::encode_oid!( $( $item ).* )
};
(raw $items:expr) => {
$crate::exports::asn1_rs_impl::encode_oid!($items)
};
(rel $($item:literal ).*) => {
$crate::Oid::new_relative($crate::exports::borrow::Cow::Borrowed(
&$crate::exports::asn1_rs_impl::encode_oid!(rel $( $item ).*),
))
};
($($item:literal ).*) => {
$crate::Oid::new($crate::exports::borrow::Cow::Borrowed(
&$crate::oid!(raw $( $item ).*),
))
};
}
#[cfg(test)]
mod tests {
use crate::{FromDer, Oid, ToDer};
use hex_literal::hex;
#[test]
fn declare_oid() {
let oid = super::oid! {1.2.840.113549.1};
assert_eq!(oid.to_string(), "1.2.840.113549.1");
}
const OID_RSA_ENCRYPTION: &[u8] = &oid! {raw 1.2.840.113549.1.1.1};
const OID_EC_PUBLIC_KEY: &[u8] = &oid! {raw 1.2.840.10045.2.1};
#[allow(clippy::match_like_matches_macro)]
fn compare_oid(oid: &Oid) -> bool {
match oid.as_bytes() {
OID_RSA_ENCRYPTION => true,
OID_EC_PUBLIC_KEY => true,
_ => false,
}
}
#[test]
fn | () {
let oid = Oid::from(&[1, 2, 840, 113_549, 1, 1, 1]).unwrap();
assert_eq!(oid, oid! {1.2.840.113549.1.1.1});
let oid = Oid::from(&[1, 2, 840, 113_549, 1, 1, 1]).unwrap();
assert!(compare_oid(&oid));
}
#[test]
fn oid_to_der() {
let oid = super::oid! {1.2.840.113549.1};
assert_eq!(oid.to_der_len(), Ok(9));
let v = oid.to_der_vec().expect("could not serialize");
assert_eq!(&v, &hex! {"06 07 2a 86 48 86 f7 0d 01"});
let (_, oid2) = Oid::from_der(&v).expect("could not re-parse");
assert_eq!(&oid, &oid2);
}
#[test]
fn oid_starts_with() {
const OID_RSA_ENCRYPTION: Oid = oid! {1.2.840.113549.1.1.1};
const OID_EC_PUBLIC_KEY: Oid = oid! {1.2.840.10045.2.1};
let oid = super::oid! {1.2.840.113549.1};
assert!(OID_RSA_ENCRYPTION.starts_with(&oid));
assert!(!OID_EC_PUBLIC_KEY.starts_with(&oid));
}
#[test]
fn oid_macro_parameters() {
// Code inspired from https://github.com/rusticata/der-parser/issues/68
macro_rules! foo {
($a:literal $b:literal $c:literal) => {
super::oid!($a.$b.$c)
};
}
let oid = foo!(1 2 3);
assert_eq!(oid, oid! {1.2.3});
}
}
| test_compare_oid | identifier_name |
oid.rs | use crate::*;
use alloc::borrow::Cow;
#[cfg(not(feature = "std"))]
use alloc::format;
use alloc::string::{String, ToString};
use alloc::vec::Vec;
use core::{
convert::TryFrom, fmt, iter::FusedIterator, marker::PhantomData, ops::Shl, str::FromStr,
};
#[cfg(feature = "bigint")]
use num_bigint::BigUint;
use num_traits::Num;
/// An error for OID parsing functions.
#[derive(Debug)]
pub enum OidParseError {
TooShort,
/// Signalizes that the first or second component is too large.
/// The first must be within the range 0 to 6 (inclusive).
/// The second component must be less than 40.
FirstComponentsTooLarge,
ParseIntError,
}
/// Object ID (OID) representation which can be relative or non-relative.
/// An example for an OID in string representation is `"1.2.840.113549.1.1.5"`.
///
/// For non-relative OIDs restrictions apply to the first two components.
///
/// This library contains a procedural macro `oid` which can be used to
/// create oids. For example `oid!(1.2.44.233)` or `oid!(rel 44.233)`
/// for relative oids. See the [module documentation](index.html) for more information.
#[derive(Hash, PartialEq, Eq, Clone)]
pub struct Oid<'a> {
asn1: Cow<'a, [u8]>,
relative: bool,
}
impl<'a> TryFrom<Any<'a>> for Oid<'a> {
type Error = Error;
fn try_from(any: Any<'a>) -> Result<Self> {
TryFrom::try_from(&any)
}
}
impl<'a, 'b> TryFrom<&'b Any<'a>> for Oid<'a> {
type Error = Error;
fn try_from(any: &'b Any<'a>) -> Result<Self> {
// check that any.data.last().unwrap() >> 7 == 0u8
let asn1 = Cow::Borrowed(any.data);
Ok(Oid::new(asn1))
}
}
impl<'a> CheckDerConstraints for Oid<'a> {
fn check_constraints(any: &Any) -> Result<()> {
any.header.assert_primitive()?;
any.header.length.assert_definite()?;
Ok(())
}
}
impl DerAutoDerive for Oid<'_> {}
impl<'a> Tagged for Oid<'a> {
const TAG: Tag = Tag::Oid;
}
#[cfg(feature = "std")]
impl ToDer for Oid<'_> {
fn to_der_len(&self) -> Result<usize> {
// OID/REL-OID tag will not change header size, so we don't care here
let header = Header::new(
Class::Universal,
false,
Self::TAG,
Length::Definite(self.asn1.len()),
);
Ok(header.to_der_len()? + self.asn1.len())
}
fn write_der_header(&self, writer: &mut dyn std::io::Write) -> SerializeResult<usize> {
let tag = if self.relative {
Tag::RelativeOid
} else {
Tag::Oid
};
let header = Header::new(
Class::Universal,
false,
tag,
Length::Definite(self.asn1.len()),
);
header.write_der_header(writer).map_err(Into::into)
}
fn write_der_content(&self, writer: &mut dyn std::io::Write) -> SerializeResult<usize> {
writer.write(&self.asn1).map_err(Into::into)
}
}
fn encode_relative(ids: &'_ [u64]) -> impl Iterator<Item = u8> + '_ {
ids.iter().flat_map(|id| {
let bit_count = 64 - id.leading_zeros();
let octets_needed = ((bit_count + 6) / 7).max(1);
(0..octets_needed).map(move |i| {
let flag = if i == octets_needed - 1 { 0 } else { 1 << 7 };
((id >> (7 * (octets_needed - 1 - i))) & 0b111_1111) as u8 | flag
})
})
}
impl<'a> Oid<'a> {
/// Create an OID from the ASN.1 DER encoded form. See the [module documentation](index.html)
/// for other ways to create oids.
pub const fn new(asn1: Cow<'a, [u8]>) -> Oid {
Oid {
asn1,
relative: false,
}
}
/// Create a relative OID from the ASN.1 DER encoded form. See the [module documentation](index.html)
/// for other ways to create relative oids.
pub const fn new_relative(asn1: Cow<'a, [u8]>) -> Oid {
Oid {
asn1,
relative: true,
}
}
/// Build an OID from an array of object identifier components.
/// This method allocates memory on the heap.
pub fn from(s: &[u64]) -> core::result::Result<Oid<'static>, OidParseError> {
if s.len() < 2 {
if s.len() == 1 && s[0] == 0 {
return Ok(Oid {
asn1: Cow::Borrowed(&[0]),
relative: false,
});
}
return Err(OidParseError::TooShort);
}
if s[0] >= 7 || s[1] >= 40 {
return Err(OidParseError::FirstComponentsTooLarge);
}
let asn1_encoded: Vec<u8> = [(s[0] * 40 + s[1]) as u8]
.iter()
.copied()
.chain(encode_relative(&s[2..]))
.collect();
Ok(Oid {
asn1: Cow::from(asn1_encoded),
relative: false,
})
}
/// Build a relative OID from an array of object identifier components.
pub fn from_relative(s: &[u64]) -> core::result::Result<Oid<'static>, OidParseError> {
if s.is_empty() {
return Err(OidParseError::TooShort);
}
let asn1_encoded: Vec<u8> = encode_relative(s).collect();
Ok(Oid {
asn1: Cow::from(asn1_encoded),
relative: true,
})
}
/// Create a deep copy of the oid.
///
/// This method allocates data on the heap. The returned oid
/// can be used without keeping the ASN.1 representation around.
///
/// Cloning the returned oid does again allocate data.
pub fn to_owned(&self) -> Oid<'static> {
Oid {
asn1: Cow::from(self.asn1.to_vec()),
relative: self.relative,
}
}
/// Get the encoded oid without the header.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
self.asn1.as_ref()
}
/// Get the encoded oid without the header.
#[deprecated(since = "0.2.0", note = "Use `as_bytes` instead")]
#[inline]
pub fn bytes(&self) -> &[u8] {
self.as_bytes()
}
/// Get the bytes representation of the encoded oid
pub fn into_cow(self) -> Cow<'a, [u8]> {
self.asn1
}
/// Convert the OID to a string representation.
/// The string contains the IDs separated by dots, for ex: "1.2.840.113549.1.1.5"
#[cfg(feature = "bigint")]
pub fn to_id_string(&self) -> String {
let ints: Vec<String> = self.iter_bigint().map(|i| i.to_string()).collect();
ints.join(".")
}
#[cfg(not(feature = "bigint"))]
/// Convert the OID to a string representation.
///
/// If every arc fits into a u64 a string like "1.2.840.113549.1.1.5"
/// is returned, otherwise a hex representation.
///
/// See also the "bigint" feature of this crate.
pub fn to_id_string(&self) -> String {
if let Some(arcs) = self.iter() {
let ints: Vec<String> = arcs.map(|i| i.to_string()).collect();
ints.join(".")
} else {
let mut ret = String::with_capacity(self.asn1.len() * 3);
for (i, o) in self.asn1.iter().enumerate() {
ret.push_str(&format!("{:02x}", o));
if i + 1 != self.asn1.len() {
ret.push(' ');
}
}
ret
}
}
/// Return an iterator over the sub-identifiers (arcs).
#[cfg(feature = "bigint")]
pub fn iter_bigint(
&'_ self,
) -> impl Iterator<Item = BigUint> + FusedIterator + ExactSizeIterator + '_ {
SubIdentifierIterator {
oid: self,
pos: 0,
first: false,
n: PhantomData,
}
}
/// Return an iterator over the sub-identifiers (arcs).
/// Returns `None` if at least one arc does not fit into `u64`.
pub fn iter(
&'_ self,
) -> Option<impl Iterator<Item = u64> + FusedIterator + ExactSizeIterator + '_> {
// Check that every arc fits into u64
let bytes = if self.relative {
&self.asn1
} else if self.asn1.is_empty() {
&[]
} else {
&self.asn1[1..]
};
let max_bits = bytes
.iter()
.fold((0usize, 0usize), |(max, cur), c| {
let is_end = (c >> 7) == 0u8;
if is_end {
(max.max(cur + 7), 0)
} else {
(max, cur + 7)
}
})
.0;
if max_bits > 64 {
return None;
}
Some(SubIdentifierIterator {
oid: self,
pos: 0,
first: false,
n: PhantomData,
})
}
pub fn from_ber_relative(bytes: &'a [u8]) -> ParseResult<'a, Self> {
let (rem, any) = Any::from_ber(bytes)?;
any.header.assert_primitive()?;
any.header.assert_tag(Tag::RelativeOid)?;
let asn1 = Cow::Borrowed(any.data);
Ok((rem, Oid::new_relative(asn1)))
}
pub fn from_der_relative(bytes: &'a [u8]) -> ParseResult<'a, Self> {
let (rem, any) = Any::from_der(bytes)?;
any.header.assert_tag(Tag::RelativeOid)?;
Self::check_constraints(&any)?;
let asn1 = Cow::Borrowed(any.data);
Ok((rem, Oid::new_relative(asn1)))
}
/// Returns true if `needle` is a prefix of the OID.
pub fn starts_with(&self, needle: &Oid) -> bool {
self.asn1.len() >= needle.asn1.len() && self.asn1.starts_with(needle.as_bytes())
}
}
trait Repr: Num + Shl<usize, Output = Self> + From<u8> {}
impl<N> Repr for N where N: Num + Shl<usize, Output = N> + From<u8> {}
struct SubIdentifierIterator<'a, N: Repr> {
oid: &'a Oid<'a>,
pos: usize,
first: bool,
n: PhantomData<&'a N>,
}
impl<'a, N: Repr> Iterator for SubIdentifierIterator<'a, N> {
type Item = N;
fn next(&mut self) -> Option<Self::Item> {
use num_traits::identities::Zero;
if self.pos == self.oid.asn1.len() {
return None;
}
if !self.oid.relative {
if !self.first {
debug_assert!(self.pos == 0);
self.first = true;
return Some((self.oid.asn1[0] / 40).into());
} else if self.pos == 0 {
self.pos += 1;
if self.oid.asn1[0] == 0 && self.oid.asn1.len() == 1 {
return None;
}
return Some((self.oid.asn1[0] % 40).into());
}
}
// decode objet sub-identifier according to the asn.1 standard
let mut res = <N as Zero>::zero();
for o in self.oid.asn1[self.pos..].iter() {
self.pos += 1;
res = (res << 7) + (o & 0b111_1111).into();
let flag = o >> 7;
if flag == 0u8 |
}
Some(res)
}
}
impl<'a, N: Repr> FusedIterator for SubIdentifierIterator<'a, N> {}
impl<'a, N: Repr> ExactSizeIterator for SubIdentifierIterator<'a, N> {
fn len(&self) -> usize {
if self.oid.relative {
self.oid.asn1.iter().filter(|o| (*o >> 7) == 0u8).count()
} else if self.oid.asn1.len() == 0 {
0
} else if self.oid.asn1.len() == 1 {
if self.oid.asn1[0] == 0 {
1
} else {
2
}
} else {
2 + self.oid.asn1[2..]
.iter()
.filter(|o| (*o >> 7) == 0u8)
.count()
}
}
#[cfg(feature = "exact_size_is_empty")]
fn is_empty(&self) -> bool {
self.oid.asn1.is_empty()
}
}
impl<'a> fmt::Display for Oid<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.relative {
f.write_str("rel. ")?;
}
f.write_str(&self.to_id_string())
}
}
impl<'a> fmt::Debug for Oid<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("OID(")?;
<Oid as fmt::Display>::fmt(self, f)?;
f.write_str(")")
}
}
impl<'a> FromStr for Oid<'a> {
type Err = OidParseError;
fn from_str(s: &str) -> core::result::Result<Self, Self::Err> {
let v: core::result::Result<Vec<_>, _> = s.split('.').map(|c| c.parse::<u64>()).collect();
v.map_err(|_| OidParseError::ParseIntError)
.and_then(|v| Oid::from(&v))
}
}
/// Helper macro to declare integers at compile-time
///
/// Since the DER encoded oids are not very readable we provide a
/// procedural macro `oid!`. The macro can be used the following ways:
///
/// - `oid!(1.4.42.23)`: Create a const expression for the corresponding `Oid<'static>`
/// - `oid!(rel 42.23)`: Create a const expression for the corresponding relative `Oid<'static>`
/// - `oid!(raw 1.4.42.23)`/`oid!(raw rel 42.23)`: Obtain the DER encoded form as a byte array.
///
/// # Comparing oids
///
/// Comparing a parsed oid to a static oid is probably the most common
/// thing done with oids in your code. The `oid!` macro can be used in expression positions for
/// this purpose. For example
/// ```
/// use asn1_rs::{oid, Oid};
///
/// # let some_oid: Oid<'static> = oid!(1.2.456);
/// const SOME_STATIC_OID: Oid<'static> = oid!(1.2.456);
/// assert_eq!(some_oid, SOME_STATIC_OID)
/// ```
/// To get a relative Oid use `oid!(rel 1.2)`.
///
/// Because of limitations for procedural macros ([rust issue](https://github.com/rust-lang/rust/issues/54727))
/// and constants used in patterns ([rust issue](https://github.com/rust-lang/rust/issues/31434))
/// the `oid` macro can not directly be used in patterns, also not through constants.
/// You can do this, though:
/// ```
/// # use asn1_rs::{oid, Oid};
/// # let some_oid: Oid<'static> = oid!(1.2.456);
/// const SOME_OID: Oid<'static> = oid!(1.2.456);
/// if some_oid == SOME_OID || some_oid == oid!(1.2.456) {
/// println!("match");
/// }
///
/// // Alternatively, compare the DER encoded form directly:
/// const SOME_OID_RAW: &[u8] = &oid!(raw 1.2.456);
/// match some_oid.as_bytes() {
/// SOME_OID_RAW => println!("match"),
/// _ => panic!("no match"),
/// }
/// ```
/// *Attention*, be aware that the latter version might not handle the case of a relative oid correctly. An
/// extra check might be necessary.
#[macro_export]
macro_rules! oid {
(raw $( $item:literal ).*) => {
$crate::exports::asn1_rs_impl::encode_oid!( $( $item ).* )
};
(raw $items:expr) => {
$crate::exports::asn1_rs_impl::encode_oid!($items)
};
(rel $($item:literal ).*) => {
$crate::Oid::new_relative($crate::exports::borrow::Cow::Borrowed(
&$crate::exports::asn1_rs_impl::encode_oid!(rel $( $item ).*),
))
};
($($item:literal ).*) => {
$crate::Oid::new($crate::exports::borrow::Cow::Borrowed(
&$crate::oid!(raw $( $item ).*),
))
};
}
#[cfg(test)]
mod tests {
use crate::{FromDer, Oid, ToDer};
use hex_literal::hex;
#[test]
fn declare_oid() {
let oid = super::oid! {1.2.840.113549.1};
assert_eq!(oid.to_string(), "1.2.840.113549.1");
}
const OID_RSA_ENCRYPTION: &[u8] = &oid! {raw 1.2.840.113549.1.1.1};
const OID_EC_PUBLIC_KEY: &[u8] = &oid! {raw 1.2.840.10045.2.1};
#[allow(clippy::match_like_matches_macro)]
fn compare_oid(oid: &Oid) -> bool {
match oid.as_bytes() {
OID_RSA_ENCRYPTION => true,
OID_EC_PUBLIC_KEY => true,
_ => false,
}
}
#[test]
fn test_compare_oid() {
let oid = Oid::from(&[1, 2, 840, 113_549, 1, 1, 1]).unwrap();
assert_eq!(oid, oid! {1.2.840.113549.1.1.1});
let oid = Oid::from(&[1, 2, 840, 113_549, 1, 1, 1]).unwrap();
assert!(compare_oid(&oid));
}
#[test]
fn oid_to_der() {
let oid = super::oid! {1.2.840.113549.1};
assert_eq!(oid.to_der_len(), Ok(9));
let v = oid.to_der_vec().expect("could not serialize");
assert_eq!(&v, &hex! {"06 07 2a 86 48 86 f7 0d 01"});
let (_, oid2) = Oid::from_der(&v).expect("could not re-parse");
assert_eq!(&oid, &oid2);
}
#[test]
fn oid_starts_with() {
const OID_RSA_ENCRYPTION: Oid = oid! {1.2.840.113549.1.1.1};
const OID_EC_PUBLIC_KEY: Oid = oid! {1.2.840.10045.2.1};
let oid = super::oid! {1.2.840.113549.1};
assert!(OID_RSA_ENCRYPTION.starts_with(&oid));
assert!(!OID_EC_PUBLIC_KEY.starts_with(&oid));
}
#[test]
fn oid_macro_parameters() {
// Code inspired from https://github.com/rusticata/der-parser/issues/68
macro_rules! foo {
($a:literal $b:literal $c:literal) => {
super::oid!($a.$b.$c)
};
}
let oid = foo!(1 2 3);
assert_eq!(oid, oid! {1.2.3});
}
}
| {
break;
} | conditional_block |
map.rs | #![allow(dead_code)]
extern crate rand;
use rand::Rng;
use std::cmp;
use game::rect::*;
use game::tile::*;
use game::object::*;
use game::draw_info::*;
use game::is_blocked;
pub struct Map {
tiles: Vec<Tile>,
width:i32,
height:i32,
out_of_bounds_tile: Tile,
}
//TODO Maybe one day we can implement an iterator over the map
//that will give the (x,y) coord of the tile and the tile itself
impl Map {
//We use i32's for the map's width / height because
//easier intergration with libtcod
//less wonky math when dealing with negatives
pub fn new(width:i32, height:i32, default_tile:Tile) -> Self {
assert!(width > 0, "width must be greater than 0!");
assert!(height > 0, "height must be greater than 0!");
Map {
tiles: vec![default_tile; (height * width) as usize],
width:width,
height:height,
out_of_bounds_tile: Tile::wall(),
}
}
pub fn in_bounds(&self, x:i32, y:i32) -> bool {
x >= 0
&& y >= 0
&& x < self.width()
&& y < self.height()
}
fn index_at(&self, x:i32, y:i32) -> usize {
return (y * self.width() + x) as usize;
}
pub fn at(&self, x:i32, y:i32) -> &Tile {
if !self.in_bounds(x,y) {
return &self.out_of_bounds_tile;
}
&self.tiles[self.index_at(x,y)]
}
pub fn at_mut(&mut self, x:i32, y:i32) -> &mut Tile {
let index = self.index_at(x,y);
&mut self.tiles[index]
}
pub fn set(&mut self, x:i32, y:i32, tile:Tile){
let index = self.index_at(x,y);
self.tiles[index] = tile;
}
pub fn width(&self) -> i32 {
self.width
}
pub fn height(&self) -> i32 {
self.height
}
fn create_room(&mut self, room: Rect, ) |
fn create_v_tunnel(&mut self, y1: i32, y2: i32, x: i32) {
for y in cmp::min(y1, y2)..(cmp::max(y1, y2) + 1) {
self.set(x,y, Tile::empty());
}
}
fn create_h_tunnel(&mut self, x1: i32, x2: i32, y: i32) {
for x in cmp::min(x1, x2)..(cmp::max(x1, x2) + 1) {
self.set(x,y, Tile::empty());
}
}
pub fn create_random_rooms(width:i32, height:i32, objects:&mut Vec<Object>) -> (Self, (i32,i32)){
const ROOM_MAX_SIZE: i32 = 10;
const ROOM_MIN_SIZE: i32 = 6;
const MAX_ROOMS: i32 = 40;
//set everything to a wall first.
let mut map = Map::new(width,height, Tile::wall());
//our starting position will be in the first valid room's center.
let mut starting_position = (0, 0);
//Then "carve" the empty rooms out.
let mut rooms = vec![];
//save local copy of thread_rng. Mostly for readability
let mut rng = rand::thread_rng();
for _ in 0..MAX_ROOMS {
// random width and height
let w = rng.gen_range(ROOM_MIN_SIZE, ROOM_MAX_SIZE + 1);
let h = rng.gen_range(ROOM_MIN_SIZE, ROOM_MAX_SIZE + 1);
// random position without going out of the boundaries of the map
let x = rng.gen_range(0, map.width() - w);
let y = rng.gen_range(0, map.height() - h);
let new_room = Rect::new(x, y, w, h);
// run through the other rooms and see if they intersect with this one
let failed = rooms.iter().any(|other_room| new_room.intersects_with(other_room));
// this means there are no intersections, so this room is valid
if !failed {
// "carve" it to the map's tiles
map.create_room(new_room);
//TODO just for the hell of it make it so the player spawns randomly in the first room.
let (new_x, new_y) = new_room.center();
Map::place_objects(new_room, objects);
if rooms.is_empty() {
//First room since there isnt any other rooms
starting_position = (new_x, new_y);
}else{
//Non first room.
// all rooms after the first:
// connect it to the previous room with a tunnel
// center coordinates of the previous room
let (prev_x, prev_y) = rooms[rooms.len() - 1].center();
// draw a coin (random bool value -- either true or false)
if rand::random() {
// first move horizontally, then vertically
map.create_h_tunnel(prev_x, new_x, prev_y);
map.create_v_tunnel(prev_y, new_y, new_x);
} else {
// first move vertically, then horizontally
map.create_v_tunnel(prev_y, new_y, prev_x);
map.create_h_tunnel(prev_x, new_x, new_y);
}
}
rooms.push(new_room);
}
}
(map, starting_position)
}
pub fn place_objects(room: Rect, objects: &mut Vec<Object>) {
let MAX_ROOM_MONSTERS = 3;
// choose random number of monsters
let num_monsters = rand::thread_rng().gen_range(0, MAX_ROOM_MONSTERS + 1);
for _ in 0..num_monsters {
// choose random spot for this monster
let x = rand::thread_rng().gen_range(room.x1 + 1, room.x2);
let y = rand::thread_rng().gen_range(room.y1 + 1, room.y2);
let mut monster = if rand::random::<f32>() < 0.8 { // 80% chance of getting an orc
// create an orc
Object::new(x, y, ascii::orc, *tileset::orc,"orc", true)
} else {
Object::new(x, y, ascii::troll, *tileset::troll,"troll", true)
};
monster.alive = true;
objects.push(monster);
}
}
//followed
//https://gamedevelopment.tutsplus.com/tutorials/generate-random-cave-levels-using-cellular-automata--gamedev-9664
pub fn create_caves(width:i32, height:i32, objects:&mut Vec<Object>) -> Self {
//set everything to a wall first.
let mut map = Map::new(width,height, Tile::wall());
let mut rng = rand::thread_rng();
let chance_to_be_empty = 0.46;
for tile in map.tiles.iter_mut(){
let chance = rng.gen::<f32>();
if chance < chance_to_be_empty {
*tile = Tile::empty();
}
}
let sim_steps = 6;
for _ in 0 .. sim_steps {
map.caves_sim_step();
}
let max_spawn_chances = 200;
let mut spawn_attempts = 0;
let desired_monsters = 15;
let mut spawn_amount = 0;
while spawn_attempts < max_spawn_chances && spawn_amount <= desired_monsters {
let x = rng.gen_range(0, map.width());
let y = rng.gen_range(0, map.height());
let tile_blocked = is_blocked(x,y, &map, objects);
if !tile_blocked {
let mut monster = if rand::random::<f32>() < 0.8 { // 80% chance of getting an orc
// create an orc
Object::new(x, y, ascii::orc, *tileset::orc,"orc", true)
} else {
Object::new(x, y, ascii::troll, *tileset::troll,"troll", true)
};
monster.alive = true;
objects.push(monster);
spawn_amount +=1;
}
spawn_attempts +=1;
}
println!("spawn amount: {} spawn_attempts: {}", spawn_amount, spawn_attempts);
map
}
fn caves_sim_step(&mut self) {
//We need to create a new map since updating the map in place will cause wonky behaviours.
//TODO from a memory perspective we could just use boolean values to represent the walls
//this will save memory from the map allocations
//or... maybe just have 2 maps at a given time and free the last map once we are done with it.
//arena allocator as well!
let mut new_map = Map::new(self.width, self.height, Tile::wall());
let death_limit = 3;
let birth_limit = 4;
for x in 0 .. self.width {
for y in 0 .. self.height {
let empty_neighbor_count = self.count_empty_neighbours(x,y);
//The new value is based on our simulation rules
//First, if a cell is empty but has too few neighbours, fill
if !self.at(x,y).is_wall() {
if empty_neighbor_count < death_limit {
new_map.set(x,y, Tile::wall());
}
else{
new_map.set(x,y, Tile::empty());
}
}
else{
//Otherwise, if the cell is filled now, check if it has the right number of neighbours to be cleared
if empty_neighbor_count > birth_limit {
new_map.set(x,y, Tile::empty());
}
else{
new_map.set(x,y, Tile::wall());
}
}
}
}
*self = new_map;
}
//We should create a unit test for this..
pub fn count_empty_neighbours(&self, x:i32, y:i32) -> i32{
let mut count = 0;
for i in -1 .. 2 {
for j in -1 .. 2 {
let neighbour_x = x + i;
let neighbour_y = y + j;
//if we're looking at the middle point do nothing
if i == 0 && j == 0 {}
else if neighbour_x < 0 || neighbour_y < 0 || neighbour_x >= self.width() || neighbour_y >= self.height() {
//Out of bounds. Count as a neighbor?
count += 1;
}else if !self.at(neighbour_x, neighbour_y).is_wall() {
count += 1;
}
}
}
count
}
} | {
for x in (room.x1 + 1) .. room.x2 {
for y in (room.y1 + 1) .. room.y2 {
self.set(x,y,Tile::empty());
}
}
} | identifier_body |
map.rs | #![allow(dead_code)]
extern crate rand;
use rand::Rng;
use std::cmp;
use game::rect::*;
use game::tile::*;
use game::object::*;
use game::draw_info::*;
use game::is_blocked;
pub struct Map {
tiles: Vec<Tile>,
width:i32,
height:i32,
out_of_bounds_tile: Tile,
}
//TODO Maybe one day we can implement an iterator over the map
//that will give the (x,y) coord of the tile and the tile itself
impl Map {
//We use i32's for the map's width / height because
//easier intergration with libtcod
//less wonky math when dealing with negatives
pub fn new(width:i32, height:i32, default_tile:Tile) -> Self {
assert!(width > 0, "width must be greater than 0!");
assert!(height > 0, "height must be greater than 0!");
Map {
tiles: vec![default_tile; (height * width) as usize],
width:width,
height:height,
out_of_bounds_tile: Tile::wall(),
}
}
pub fn in_bounds(&self, x:i32, y:i32) -> bool {
x >= 0
&& y >= 0
&& x < self.width()
&& y < self.height()
}
fn index_at(&self, x:i32, y:i32) -> usize {
return (y * self.width() + x) as usize;
}
pub fn at(&self, x:i32, y:i32) -> &Tile {
if !self.in_bounds(x,y) {
return &self.out_of_bounds_tile;
}
&self.tiles[self.index_at(x,y)]
}
pub fn at_mut(&mut self, x:i32, y:i32) -> &mut Tile {
let index = self.index_at(x,y);
&mut self.tiles[index]
}
pub fn set(&mut self, x:i32, y:i32, tile:Tile){
let index = self.index_at(x,y);
self.tiles[index] = tile;
}
pub fn width(&self) -> i32 {
self.width
}
pub fn height(&self) -> i32 {
self.height
}
fn create_room(&mut self, room: Rect, ) {
for x in (room.x1 + 1) .. room.x2 {
for y in (room.y1 + 1) .. room.y2 {
self.set(x,y,Tile::empty());
}
}
}
fn create_v_tunnel(&mut self, y1: i32, y2: i32, x: i32) {
for y in cmp::min(y1, y2)..(cmp::max(y1, y2) + 1) {
self.set(x,y, Tile::empty());
}
}
fn create_h_tunnel(&mut self, x1: i32, x2: i32, y: i32) {
for x in cmp::min(x1, x2)..(cmp::max(x1, x2) + 1) {
self.set(x,y, Tile::empty());
}
}
pub fn create_random_rooms(width:i32, height:i32, objects:&mut Vec<Object>) -> (Self, (i32,i32)){
const ROOM_MAX_SIZE: i32 = 10;
const ROOM_MIN_SIZE: i32 = 6;
const MAX_ROOMS: i32 = 40;
//set everything to a wall first.
let mut map = Map::new(width,height, Tile::wall());
//our starting position will be in the first valid room's center.
let mut starting_position = (0, 0);
//Then "carve" the empty rooms out.
let mut rooms = vec![];
//save local copy of thread_rng. Mostly for readability
let mut rng = rand::thread_rng();
for _ in 0..MAX_ROOMS {
// random width and height
let w = rng.gen_range(ROOM_MIN_SIZE, ROOM_MAX_SIZE + 1);
let h = rng.gen_range(ROOM_MIN_SIZE, ROOM_MAX_SIZE + 1);
// random position without going out of the boundaries of the map
let x = rng.gen_range(0, map.width() - w);
let y = rng.gen_range(0, map.height() - h);
let new_room = Rect::new(x, y, w, h);
// run through the other rooms and see if they intersect with this one
let failed = rooms.iter().any(|other_room| new_room.intersects_with(other_room));
// this means there are no intersections, so this room is valid
if !failed {
// "carve" it to the map's tiles
map.create_room(new_room);
//TODO just for the hell of it make it so the player spawns randomly in the first room.
let (new_x, new_y) = new_room.center();
Map::place_objects(new_room, objects);
if rooms.is_empty() {
//First room since there isnt any other rooms
starting_position = (new_x, new_y);
}else{
//Non first room.
// all rooms after the first:
// connect it to the previous room with a tunnel
// center coordinates of the previous room
let (prev_x, prev_y) = rooms[rooms.len() - 1].center();
// draw a coin (random bool value -- either true or false)
if rand::random() {
// first move horizontally, then vertically
map.create_h_tunnel(prev_x, new_x, prev_y);
map.create_v_tunnel(prev_y, new_y, new_x);
} else {
// first move vertically, then horizontally
map.create_v_tunnel(prev_y, new_y, prev_x);
map.create_h_tunnel(prev_x, new_x, new_y);
}
}
rooms.push(new_room);
}
}
(map, starting_position)
}
pub fn place_objects(room: Rect, objects: &mut Vec<Object>) {
let MAX_ROOM_MONSTERS = 3;
// choose random number of monsters
let num_monsters = rand::thread_rng().gen_range(0, MAX_ROOM_MONSTERS + 1);
for _ in 0..num_monsters {
// choose random spot for this monster
let x = rand::thread_rng().gen_range(room.x1 + 1, room.x2);
let y = rand::thread_rng().gen_range(room.y1 + 1, room.y2);
let mut monster = if rand::random::<f32>() < 0.8 { // 80% chance of getting an orc
// create an orc
Object::new(x, y, ascii::orc, *tileset::orc,"orc", true)
} else {
Object::new(x, y, ascii::troll, *tileset::troll,"troll", true)
};
monster.alive = true;
objects.push(monster);
}
}
//followed
//https://gamedevelopment.tutsplus.com/tutorials/generate-random-cave-levels-using-cellular-automata--gamedev-9664
pub fn create_caves(width:i32, height:i32, objects:&mut Vec<Object>) -> Self {
//set everything to a wall first.
let mut map = Map::new(width,height, Tile::wall());
let mut rng = rand::thread_rng();
let chance_to_be_empty = 0.46;
for tile in map.tiles.iter_mut(){
let chance = rng.gen::<f32>();
if chance < chance_to_be_empty {
*tile = Tile::empty();
}
}
let sim_steps = 6;
for _ in 0 .. sim_steps {
map.caves_sim_step();
}
let max_spawn_chances = 200;
let mut spawn_attempts = 0;
let desired_monsters = 15;
let mut spawn_amount = 0;
while spawn_attempts < max_spawn_chances && spawn_amount <= desired_monsters {
let x = rng.gen_range(0, map.width()); | let y = rng.gen_range(0, map.height());
let tile_blocked = is_blocked(x,y, &map, objects);
if !tile_blocked {
let mut monster = if rand::random::<f32>() < 0.8 { // 80% chance of getting an orc
// create an orc
Object::new(x, y, ascii::orc, *tileset::orc,"orc", true)
} else {
Object::new(x, y, ascii::troll, *tileset::troll,"troll", true)
};
monster.alive = true;
objects.push(monster);
spawn_amount +=1;
}
spawn_attempts +=1;
}
println!("spawn amount: {} spawn_attempts: {}", spawn_amount, spawn_attempts);
map
}
fn caves_sim_step(&mut self) {
//We need to create a new map since updating the map in place will cause wonky behaviours.
//TODO from a memory perspective we could just use boolean values to represent the walls
//this will save memory from the map allocations
//or... maybe just have 2 maps at a given time and free the last map once we are done with it.
//arena allocator as well!
let mut new_map = Map::new(self.width, self.height, Tile::wall());
let death_limit = 3;
let birth_limit = 4;
for x in 0 .. self.width {
for y in 0 .. self.height {
let empty_neighbor_count = self.count_empty_neighbours(x,y);
//The new value is based on our simulation rules
//First, if a cell is empty but has too few neighbours, fill
if !self.at(x,y).is_wall() {
if empty_neighbor_count < death_limit {
new_map.set(x,y, Tile::wall());
}
else{
new_map.set(x,y, Tile::empty());
}
}
else{
//Otherwise, if the cell is filled now, check if it has the right number of neighbours to be cleared
if empty_neighbor_count > birth_limit {
new_map.set(x,y, Tile::empty());
}
else{
new_map.set(x,y, Tile::wall());
}
}
}
}
*self = new_map;
}
//We should create a unit test for this..
pub fn count_empty_neighbours(&self, x:i32, y:i32) -> i32{
let mut count = 0;
for i in -1 .. 2 {
for j in -1 .. 2 {
let neighbour_x = x + i;
let neighbour_y = y + j;
//if we're looking at the middle point do nothing
if i == 0 && j == 0 {}
else if neighbour_x < 0 || neighbour_y < 0 || neighbour_x >= self.width() || neighbour_y >= self.height() {
//Out of bounds. Count as a neighbor?
count += 1;
}else if !self.at(neighbour_x, neighbour_y).is_wall() {
count += 1;
}
}
}
count
}
} | random_line_split | |
map.rs | #![allow(dead_code)]
extern crate rand;
use rand::Rng;
use std::cmp;
use game::rect::*;
use game::tile::*;
use game::object::*;
use game::draw_info::*;
use game::is_blocked;
pub struct Map {
tiles: Vec<Tile>,
width:i32,
height:i32,
out_of_bounds_tile: Tile,
}
//TODO Maybe one day we can implement an iterator over the map
//that will give the (x,y) coord of the tile and the tile itself
impl Map {
//We use i32's for the map's width / height because
//easier intergration with libtcod
//less wonky math when dealing with negatives
pub fn new(width:i32, height:i32, default_tile:Tile) -> Self {
assert!(width > 0, "width must be greater than 0!");
assert!(height > 0, "height must be greater than 0!");
Map {
tiles: vec![default_tile; (height * width) as usize],
width:width,
height:height,
out_of_bounds_tile: Tile::wall(),
}
}
pub fn in_bounds(&self, x:i32, y:i32) -> bool {
x >= 0
&& y >= 0
&& x < self.width()
&& y < self.height()
}
fn index_at(&self, x:i32, y:i32) -> usize {
return (y * self.width() + x) as usize;
}
pub fn at(&self, x:i32, y:i32) -> &Tile {
if !self.in_bounds(x,y) {
return &self.out_of_bounds_tile;
}
&self.tiles[self.index_at(x,y)]
}
pub fn at_mut(&mut self, x:i32, y:i32) -> &mut Tile {
let index = self.index_at(x,y);
&mut self.tiles[index]
}
pub fn set(&mut self, x:i32, y:i32, tile:Tile){
let index = self.index_at(x,y);
self.tiles[index] = tile;
}
pub fn width(&self) -> i32 {
self.width
}
pub fn height(&self) -> i32 {
self.height
}
fn create_room(&mut self, room: Rect, ) {
for x in (room.x1 + 1) .. room.x2 {
for y in (room.y1 + 1) .. room.y2 {
self.set(x,y,Tile::empty());
}
}
}
fn create_v_tunnel(&mut self, y1: i32, y2: i32, x: i32) {
for y in cmp::min(y1, y2)..(cmp::max(y1, y2) + 1) {
self.set(x,y, Tile::empty());
}
}
fn create_h_tunnel(&mut self, x1: i32, x2: i32, y: i32) {
for x in cmp::min(x1, x2)..(cmp::max(x1, x2) + 1) {
self.set(x,y, Tile::empty());
}
}
pub fn create_random_rooms(width:i32, height:i32, objects:&mut Vec<Object>) -> (Self, (i32,i32)){
const ROOM_MAX_SIZE: i32 = 10;
const ROOM_MIN_SIZE: i32 = 6;
const MAX_ROOMS: i32 = 40;
//set everything to a wall first.
let mut map = Map::new(width,height, Tile::wall());
//our starting position will be in the first valid room's center.
let mut starting_position = (0, 0);
//Then "carve" the empty rooms out.
let mut rooms = vec![];
//save local copy of thread_rng. Mostly for readability
let mut rng = rand::thread_rng();
for _ in 0..MAX_ROOMS {
// random width and height
let w = rng.gen_range(ROOM_MIN_SIZE, ROOM_MAX_SIZE + 1);
let h = rng.gen_range(ROOM_MIN_SIZE, ROOM_MAX_SIZE + 1);
// random position without going out of the boundaries of the map
let x = rng.gen_range(0, map.width() - w);
let y = rng.gen_range(0, map.height() - h);
let new_room = Rect::new(x, y, w, h);
// run through the other rooms and see if they intersect with this one
let failed = rooms.iter().any(|other_room| new_room.intersects_with(other_room));
// this means there are no intersections, so this room is valid
if !failed {
// "carve" it to the map's tiles
map.create_room(new_room);
//TODO just for the hell of it make it so the player spawns randomly in the first room.
let (new_x, new_y) = new_room.center();
Map::place_objects(new_room, objects);
if rooms.is_empty() {
//First room since there isnt any other rooms
starting_position = (new_x, new_y);
}else{
//Non first room.
// all rooms after the first:
// connect it to the previous room with a tunnel
// center coordinates of the previous room
let (prev_x, prev_y) = rooms[rooms.len() - 1].center();
// draw a coin (random bool value -- either true or false)
if rand::random() {
// first move horizontally, then vertically
map.create_h_tunnel(prev_x, new_x, prev_y);
map.create_v_tunnel(prev_y, new_y, new_x);
} else {
// first move vertically, then horizontally
map.create_v_tunnel(prev_y, new_y, prev_x);
map.create_h_tunnel(prev_x, new_x, new_y);
}
}
rooms.push(new_room);
}
}
(map, starting_position)
}
pub fn place_objects(room: Rect, objects: &mut Vec<Object>) {
let MAX_ROOM_MONSTERS = 3;
// choose random number of monsters
let num_monsters = rand::thread_rng().gen_range(0, MAX_ROOM_MONSTERS + 1);
for _ in 0..num_monsters {
// choose random spot for this monster
let x = rand::thread_rng().gen_range(room.x1 + 1, room.x2);
let y = rand::thread_rng().gen_range(room.y1 + 1, room.y2);
let mut monster = if rand::random::<f32>() < 0.8 { // 80% chance of getting an orc
// create an orc
Object::new(x, y, ascii::orc, *tileset::orc,"orc", true)
} else {
Object::new(x, y, ascii::troll, *tileset::troll,"troll", true)
};
monster.alive = true;
objects.push(monster);
}
}
//followed
//https://gamedevelopment.tutsplus.com/tutorials/generate-random-cave-levels-using-cellular-automata--gamedev-9664
pub fn create_caves(width:i32, height:i32, objects:&mut Vec<Object>) -> Self {
//set everything to a wall first.
let mut map = Map::new(width,height, Tile::wall());
let mut rng = rand::thread_rng();
let chance_to_be_empty = 0.46;
for tile in map.tiles.iter_mut(){
let chance = rng.gen::<f32>();
if chance < chance_to_be_empty |
}
let sim_steps = 6;
for _ in 0 .. sim_steps {
map.caves_sim_step();
}
let max_spawn_chances = 200;
let mut spawn_attempts = 0;
let desired_monsters = 15;
let mut spawn_amount = 0;
while spawn_attempts < max_spawn_chances && spawn_amount <= desired_monsters {
let x = rng.gen_range(0, map.width());
let y = rng.gen_range(0, map.height());
let tile_blocked = is_blocked(x,y, &map, objects);
if !tile_blocked {
let mut monster = if rand::random::<f32>() < 0.8 { // 80% chance of getting an orc
// create an orc
Object::new(x, y, ascii::orc, *tileset::orc,"orc", true)
} else {
Object::new(x, y, ascii::troll, *tileset::troll,"troll", true)
};
monster.alive = true;
objects.push(monster);
spawn_amount +=1;
}
spawn_attempts +=1;
}
println!("spawn amount: {} spawn_attempts: {}", spawn_amount, spawn_attempts);
map
}
fn caves_sim_step(&mut self) {
//We need to create a new map since updating the map in place will cause wonky behaviours.
//TODO from a memory perspective we could just use boolean values to represent the walls
//this will save memory from the map allocations
//or... maybe just have 2 maps at a given time and free the last map once we are done with it.
//arena allocator as well!
let mut new_map = Map::new(self.width, self.height, Tile::wall());
let death_limit = 3;
let birth_limit = 4;
for x in 0 .. self.width {
for y in 0 .. self.height {
let empty_neighbor_count = self.count_empty_neighbours(x,y);
//The new value is based on our simulation rules
//First, if a cell is empty but has too few neighbours, fill
if !self.at(x,y).is_wall() {
if empty_neighbor_count < death_limit {
new_map.set(x,y, Tile::wall());
}
else{
new_map.set(x,y, Tile::empty());
}
}
else{
//Otherwise, if the cell is filled now, check if it has the right number of neighbours to be cleared
if empty_neighbor_count > birth_limit {
new_map.set(x,y, Tile::empty());
}
else{
new_map.set(x,y, Tile::wall());
}
}
}
}
*self = new_map;
}
//We should create a unit test for this..
pub fn count_empty_neighbours(&self, x:i32, y:i32) -> i32{
let mut count = 0;
for i in -1 .. 2 {
for j in -1 .. 2 {
let neighbour_x = x + i;
let neighbour_y = y + j;
//if we're looking at the middle point do nothing
if i == 0 && j == 0 {}
else if neighbour_x < 0 || neighbour_y < 0 || neighbour_x >= self.width() || neighbour_y >= self.height() {
//Out of bounds. Count as a neighbor?
count += 1;
}else if !self.at(neighbour_x, neighbour_y).is_wall() {
count += 1;
}
}
}
count
}
} | {
*tile = Tile::empty();
} | conditional_block |
map.rs | #![allow(dead_code)]
extern crate rand;
use rand::Rng;
use std::cmp;
use game::rect::*;
use game::tile::*;
use game::object::*;
use game::draw_info::*;
use game::is_blocked;
pub struct Map {
tiles: Vec<Tile>,
width:i32,
height:i32,
out_of_bounds_tile: Tile,
}
//TODO Maybe one day we can implement an iterator over the map
//that will give the (x,y) coord of the tile and the tile itself
impl Map {
//We use i32's for the map's width / height because
//easier intergration with libtcod
//less wonky math when dealing with negatives
pub fn | (width:i32, height:i32, default_tile:Tile) -> Self {
assert!(width > 0, "width must be greater than 0!");
assert!(height > 0, "height must be greater than 0!");
Map {
tiles: vec![default_tile; (height * width) as usize],
width:width,
height:height,
out_of_bounds_tile: Tile::wall(),
}
}
pub fn in_bounds(&self, x:i32, y:i32) -> bool {
x >= 0
&& y >= 0
&& x < self.width()
&& y < self.height()
}
fn index_at(&self, x:i32, y:i32) -> usize {
return (y * self.width() + x) as usize;
}
pub fn at(&self, x:i32, y:i32) -> &Tile {
if !self.in_bounds(x,y) {
return &self.out_of_bounds_tile;
}
&self.tiles[self.index_at(x,y)]
}
pub fn at_mut(&mut self, x:i32, y:i32) -> &mut Tile {
let index = self.index_at(x,y);
&mut self.tiles[index]
}
pub fn set(&mut self, x:i32, y:i32, tile:Tile){
let index = self.index_at(x,y);
self.tiles[index] = tile;
}
pub fn width(&self) -> i32 {
self.width
}
pub fn height(&self) -> i32 {
self.height
}
fn create_room(&mut self, room: Rect, ) {
for x in (room.x1 + 1) .. room.x2 {
for y in (room.y1 + 1) .. room.y2 {
self.set(x,y,Tile::empty());
}
}
}
fn create_v_tunnel(&mut self, y1: i32, y2: i32, x: i32) {
for y in cmp::min(y1, y2)..(cmp::max(y1, y2) + 1) {
self.set(x,y, Tile::empty());
}
}
fn create_h_tunnel(&mut self, x1: i32, x2: i32, y: i32) {
for x in cmp::min(x1, x2)..(cmp::max(x1, x2) + 1) {
self.set(x,y, Tile::empty());
}
}
pub fn create_random_rooms(width:i32, height:i32, objects:&mut Vec<Object>) -> (Self, (i32,i32)){
const ROOM_MAX_SIZE: i32 = 10;
const ROOM_MIN_SIZE: i32 = 6;
const MAX_ROOMS: i32 = 40;
//set everything to a wall first.
let mut map = Map::new(width,height, Tile::wall());
//our starting position will be in the first valid room's center.
let mut starting_position = (0, 0);
//Then "carve" the empty rooms out.
let mut rooms = vec![];
//save local copy of thread_rng. Mostly for readability
let mut rng = rand::thread_rng();
for _ in 0..MAX_ROOMS {
// random width and height
let w = rng.gen_range(ROOM_MIN_SIZE, ROOM_MAX_SIZE + 1);
let h = rng.gen_range(ROOM_MIN_SIZE, ROOM_MAX_SIZE + 1);
// random position without going out of the boundaries of the map
let x = rng.gen_range(0, map.width() - w);
let y = rng.gen_range(0, map.height() - h);
let new_room = Rect::new(x, y, w, h);
// run through the other rooms and see if they intersect with this one
let failed = rooms.iter().any(|other_room| new_room.intersects_with(other_room));
// this means there are no intersections, so this room is valid
if !failed {
// "carve" it to the map's tiles
map.create_room(new_room);
//TODO just for the hell of it make it so the player spawns randomly in the first room.
let (new_x, new_y) = new_room.center();
Map::place_objects(new_room, objects);
if rooms.is_empty() {
//First room since there isnt any other rooms
starting_position = (new_x, new_y);
}else{
//Non first room.
// all rooms after the first:
// connect it to the previous room with a tunnel
// center coordinates of the previous room
let (prev_x, prev_y) = rooms[rooms.len() - 1].center();
// draw a coin (random bool value -- either true or false)
if rand::random() {
// first move horizontally, then vertically
map.create_h_tunnel(prev_x, new_x, prev_y);
map.create_v_tunnel(prev_y, new_y, new_x);
} else {
// first move vertically, then horizontally
map.create_v_tunnel(prev_y, new_y, prev_x);
map.create_h_tunnel(prev_x, new_x, new_y);
}
}
rooms.push(new_room);
}
}
(map, starting_position)
}
pub fn place_objects(room: Rect, objects: &mut Vec<Object>) {
let MAX_ROOM_MONSTERS = 3;
// choose random number of monsters
let num_monsters = rand::thread_rng().gen_range(0, MAX_ROOM_MONSTERS + 1);
for _ in 0..num_monsters {
// choose random spot for this monster
let x = rand::thread_rng().gen_range(room.x1 + 1, room.x2);
let y = rand::thread_rng().gen_range(room.y1 + 1, room.y2);
let mut monster = if rand::random::<f32>() < 0.8 { // 80% chance of getting an orc
// create an orc
Object::new(x, y, ascii::orc, *tileset::orc,"orc", true)
} else {
Object::new(x, y, ascii::troll, *tileset::troll,"troll", true)
};
monster.alive = true;
objects.push(monster);
}
}
//followed
//https://gamedevelopment.tutsplus.com/tutorials/generate-random-cave-levels-using-cellular-automata--gamedev-9664
pub fn create_caves(width:i32, height:i32, objects:&mut Vec<Object>) -> Self {
//set everything to a wall first.
let mut map = Map::new(width,height, Tile::wall());
let mut rng = rand::thread_rng();
let chance_to_be_empty = 0.46;
for tile in map.tiles.iter_mut(){
let chance = rng.gen::<f32>();
if chance < chance_to_be_empty {
*tile = Tile::empty();
}
}
let sim_steps = 6;
for _ in 0 .. sim_steps {
map.caves_sim_step();
}
let max_spawn_chances = 200;
let mut spawn_attempts = 0;
let desired_monsters = 15;
let mut spawn_amount = 0;
while spawn_attempts < max_spawn_chances && spawn_amount <= desired_monsters {
let x = rng.gen_range(0, map.width());
let y = rng.gen_range(0, map.height());
let tile_blocked = is_blocked(x,y, &map, objects);
if !tile_blocked {
let mut monster = if rand::random::<f32>() < 0.8 { // 80% chance of getting an orc
// create an orc
Object::new(x, y, ascii::orc, *tileset::orc,"orc", true)
} else {
Object::new(x, y, ascii::troll, *tileset::troll,"troll", true)
};
monster.alive = true;
objects.push(monster);
spawn_amount +=1;
}
spawn_attempts +=1;
}
println!("spawn amount: {} spawn_attempts: {}", spawn_amount, spawn_attempts);
map
}
fn caves_sim_step(&mut self) {
//We need to create a new map since updating the map in place will cause wonky behaviours.
//TODO from a memory perspective we could just use boolean values to represent the walls
//this will save memory from the map allocations
//or... maybe just have 2 maps at a given time and free the last map once we are done with it.
//arena allocator as well!
let mut new_map = Map::new(self.width, self.height, Tile::wall());
let death_limit = 3;
let birth_limit = 4;
for x in 0 .. self.width {
for y in 0 .. self.height {
let empty_neighbor_count = self.count_empty_neighbours(x,y);
//The new value is based on our simulation rules
//First, if a cell is empty but has too few neighbours, fill
if !self.at(x,y).is_wall() {
if empty_neighbor_count < death_limit {
new_map.set(x,y, Tile::wall());
}
else{
new_map.set(x,y, Tile::empty());
}
}
else{
//Otherwise, if the cell is filled now, check if it has the right number of neighbours to be cleared
if empty_neighbor_count > birth_limit {
new_map.set(x,y, Tile::empty());
}
else{
new_map.set(x,y, Tile::wall());
}
}
}
}
*self = new_map;
}
//We should create a unit test for this..
pub fn count_empty_neighbours(&self, x:i32, y:i32) -> i32{
let mut count = 0;
for i in -1 .. 2 {
for j in -1 .. 2 {
let neighbour_x = x + i;
let neighbour_y = y + j;
//if we're looking at the middle point do nothing
if i == 0 && j == 0 {}
else if neighbour_x < 0 || neighbour_y < 0 || neighbour_x >= self.width() || neighbour_y >= self.height() {
//Out of bounds. Count as a neighbor?
count += 1;
}else if !self.at(neighbour_x, neighbour_y).is_wall() {
count += 1;
}
}
}
count
}
} | new | identifier_name |
encode.rs | use super::constants::{CR, DEFAULT_LINE_SIZE, DOT, ESCAPE, LF, NUL};
use super::errors::EncodeError;
use std::fs::File;
use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write};
use std::path::Path;
/// Options for encoding.
/// The entry point for encoding a file (part)
/// to a file or (TCP) stream.
#[derive(Debug)]
pub struct EncodeOptions {
line_length: u8,
parts: u32,
part: u32,
begin: u64,
end: u64,
}
impl Default for EncodeOptions {
/// Constructs a new EncodeOptions instance, with the following defaults:
/// line_length = 128.
/// parts = 1,
/// part = begin = end = 0
fn default() -> Self {
EncodeOptions {
line_length: DEFAULT_LINE_SIZE,
parts: 1,
part: 0,
begin: 0,
end: 0,
}
}
}
impl EncodeOptions {
/// Constructs a new EncodeOptions with defaults, see Default impl.
pub fn new() -> EncodeOptions {
Default::default()
}
/// Sets the maximum line length.
pub fn line_length(mut self, line_length: u8) -> EncodeOptions {
self.line_length = line_length;
self
}
/// Sets the number of parts (default=1).
/// When the number of parts is 1, no '=ypart' line will be written
/// in the ouput.
pub fn parts(mut self, parts: u32) -> EncodeOptions {
self.parts = parts;
self
}
/// Sets the part number.
/// Only used when `parts > 1`.
/// The part number count starts at 1.
pub fn part(mut self, part: u32) -> EncodeOptions {
self.part = part;
self
}
/// Sets the begin (which is the file offset + 1).
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
pub fn begin(mut self, begin: u64) -> EncodeOptions {
self.begin = begin;
self
}
/// Sets the end.
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
/// `end` should be larger than `begin`, otherwise an overflow error occurrs.
pub fn end(mut self, end: u64) -> EncodeOptions {
self.end = end;
self
}
/// Encodes the input file and writes it to the writer. For multi-part encoding, only
/// one part is encoded. In case of multipart, the part number, begin and end offset need
/// to be specified in the `EncodeOptions`. When directly encoding to an NNTP stream, the
/// caller needs to take care of the message header and end of multi-line block (`".\r\n"`).
///
/// # Example
/// ```rust,no_run
/// let encode_options = yenc::EncodeOptions::default()
/// .parts(2)
/// .part(1)
/// .begin(1)
/// .end(38400);
/// let mut output_file = std::fs::File::create("test1.bin.yenc.001").unwrap();
/// encode_options.encode_file("test1.bin", &mut output_file).unwrap();
/// ```
/// # Errors
/// - when the output file already exists
///
pub fn encode_file<P, W>(&self, input_path: P, output: W) -> Result<(), EncodeError>
where
P: AsRef<Path>,
W: Write,
{
let input_filename = input_path.as_ref().file_name();
let input_filename = match input_filename {
Some(s) => s.to_str().unwrap_or(""),
None => "",
};
let input_file = File::open(&input_path)?;
let length = input_file.metadata()?.len();
self.encode_stream(input_file, output, length, input_filename)
}
/// Checks the options. Returns Ok(()) if all options are ok.
/// # Return
/// - EncodeError::PartNumberMissing
/// - EncodeError::PartBeginOffsetMissing
/// - EncodeError::PartEndOffsetMissing
/// - EncodeError::PartOffsetsInvalidRange
pub fn check_options(&self) -> Result<(), EncodeError> {
if self.parts > 1 && self.part == 0 {
return Err(EncodeError::PartNumberMissing);
}
if self.parts > 1 && self.begin == 0 {
return Err(EncodeError::PartBeginOffsetMissing);
}
if self.parts > 1 && self.end == 0 {
return Err(EncodeError::PartEndOffsetMissing);
}
if self.parts > 1 && self.begin > self.end {
return Err(EncodeError::PartOffsetsInvalidRange);
}
Ok(())
}
/// Encodes the date from input from stream and writes the encoded data to the output stream.
/// The input stream does not need to be a file, therefore, size and input_filename
/// must be specified. The input_filename ends up as the filename in the yenc header.
#[allow(clippy::write_with_newline)]
pub fn encode_stream<R, W>(
&self,
input: R,
output: W,
length: u64,
input_filename: &str,
) -> Result<(), EncodeError>
where
R: Read + Seek,
W: Write,
{
let mut rdr = BufReader::new(input);
let mut checksum = crc32fast::Hasher::new();
let mut buffer = [0u8; 8192];
let mut col = 0;
let mut num_bytes = 0;
let mut output = BufWriter::new(output);
self.check_options()?;
if self.parts == 1 {
write!(
output,
"=ybegin line={} size={} name={}\r\n",
self.line_length, length, input_filename
)?;
} else {
write!(
output,
"=ybegin part={} line={} size={} name={}\r\n",
self.part, self.line_length, length, input_filename
)?;
}
if self.parts > 1 {
write!(output, "=ypart begin={} end={}\r\n", self.begin, self.end)?;
}
rdr.seek(SeekFrom::Start(self.begin - 1))?;
let mut remainder = (self.end - self.begin + 1) as usize;
while remainder > 0 {
let buf_slice = if remainder > buffer.len() {
&mut buffer[..]
} else {
&mut buffer[0..remainder]
};
rdr.read_exact(buf_slice)?;
checksum.update(buf_slice);
num_bytes += buf_slice.len();
col = encode_buffer(buf_slice, col, self.line_length, &mut output)?;
remainder -= buf_slice.len();
}
if self.parts > 1 {
write!(
output,
"\r\n=yend size={} part={} pcrc32={:08x}\r\n",
num_bytes,
self.part,
checksum.finalize()
)?;
} else {
write!(
output,
"\r\n=yend size={} crc32={:08x}\r\n",
num_bytes,
checksum.finalize()
)?;
}
Ok(())
}
}
/// Encodes the input buffer and writes it to the writer.
///
/// Lines are wrapped with a maximum of `line_length` characters per line.
/// Does not include the header and footer lines.
/// Only `encode_stream` and `encode_file` produce the headers in the output.
/// The `col` parameter is the starting offset in the row. The result contains the new offset.
pub fn encode_buffer<W>(
input: &[u8],
col: u8,
line_length: u8,
writer: W,
) -> Result<u8, EncodeError>
where
W: Write,
{
let mut col = col;
let mut writer = writer;
let mut v = Vec::<u8>::with_capacity(((input.len() as f64) * 1.04) as usize);
input.iter().for_each(|&b| {
let encoded = encode_byte(b);
v.push(encoded.0);
col += match encoded.0 {
ESCAPE => {
v.push(encoded.1);
2
}
DOT if col == 0 => {
v.push(DOT);
2
}
_ => 1,
};
if col >= line_length {
v.push(CR);
v.push(LF);
col = 0;
}
});
writer.write_all(&v)?;
Ok(col)
}
#[inline(always)]
fn encode_byte(input_byte: u8) -> (u8, u8) {
let mut output = (0, 0);
let output_byte = input_byte.overflowing_add(42).0;
match output_byte {
LF | CR | NUL | ESCAPE => {
output.0 = ESCAPE;
output.1 = output_byte.overflowing_add(64).0;
}
_ => {
output.0 = output_byte;
}
};
output
}
#[cfg(test)]
mod tests {
use super::super::constants::{CR, ESCAPE, LF, NUL};
use super::{encode_buffer, encode_byte, EncodeOptions};
#[test]
fn escape_null() {
assert_eq!((ESCAPE, 0x40), encode_byte(214));
}
/*
#[test]
fn escape_tab() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + TAB, &mut output));
assert_eq!(vec![ESCAPE, 0x49], output);
}
*/
#[test]
fn escape_lf() {
assert_eq!((ESCAPE, 0x4A), encode_byte(214 + LF));
}
#[test]
fn escape_cr() {
assert_eq!((ESCAPE, 0x4D), encode_byte(214 + CR));
}
/*
#[test]
fn escape_space() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + SPACE, &mut output));
assert_eq!(vec![ESCAPE, 0x60], output);
}
*/
#[test]
fn escape_equal_sign() {
assert_eq!((ESCAPE, 0x7D), encode_byte(ESCAPE - 42));
}
#[test]
fn non_escaped() {
for x in 0..256u16 {
let encoded = (x as u8).overflowing_add(42).0;
if encoded != NUL && encoded != CR && encoded != LF && encoded != ESCAPE {
assert_eq!((encoded, 0), encode_byte(x as u8));
}
}
}
#[test]
fn test_encode_buffer() {
let buffer = (0..256u16).map(|c| c as u8).collect::<Vec<u8>>();
#[rustfmt::skip]
const EXPECTED: [u8; 264] =
[42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
125, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, | 211, 212, 213, 214, 215, 216,217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 61, 64, 1, 2, 3,
4, 5, 6, 7, 8, 9, 61, 74, 11, 12, 61, 77, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 13, 10, 38, 39, 40, 41];
let mut encoded = Vec::<u8>::new();
let result = encode_buffer(&buffer, 0, 128, &mut encoded);
assert!(result.is_ok());
assert_eq!(encoded.as_slice(), &EXPECTED[..]);
}
#[test]
fn encode_options_invalid_parts() {
let encode_options = EncodeOptions::new().parts(2).begin(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_begin() {
let encode_options = EncodeOptions::new().parts(2).part(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_end() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_range() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(38400).end(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
} | 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 13, 10, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, | random_line_split |
encode.rs | use super::constants::{CR, DEFAULT_LINE_SIZE, DOT, ESCAPE, LF, NUL};
use super::errors::EncodeError;
use std::fs::File;
use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write};
use std::path::Path;
/// Options for encoding.
/// The entry point for encoding a file (part)
/// to a file or (TCP) stream.
#[derive(Debug)]
pub struct EncodeOptions {
line_length: u8,
parts: u32,
part: u32,
begin: u64,
end: u64,
}
impl Default for EncodeOptions {
/// Constructs a new EncodeOptions instance, with the following defaults:
/// line_length = 128.
/// parts = 1,
/// part = begin = end = 0
fn default() -> Self {
EncodeOptions {
line_length: DEFAULT_LINE_SIZE,
parts: 1,
part: 0,
begin: 0,
end: 0,
}
}
}
impl EncodeOptions {
/// Constructs a new EncodeOptions with defaults, see Default impl.
pub fn new() -> EncodeOptions {
Default::default()
}
/// Sets the maximum line length.
pub fn line_length(mut self, line_length: u8) -> EncodeOptions {
self.line_length = line_length;
self
}
/// Sets the number of parts (default=1).
/// When the number of parts is 1, no '=ypart' line will be written
/// in the ouput.
pub fn parts(mut self, parts: u32) -> EncodeOptions {
self.parts = parts;
self
}
/// Sets the part number.
/// Only used when `parts > 1`.
/// The part number count starts at 1.
pub fn part(mut self, part: u32) -> EncodeOptions {
self.part = part;
self
}
/// Sets the begin (which is the file offset + 1).
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
pub fn begin(mut self, begin: u64) -> EncodeOptions {
self.begin = begin;
self
}
/// Sets the end.
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
/// `end` should be larger than `begin`, otherwise an overflow error occurrs.
pub fn end(mut self, end: u64) -> EncodeOptions {
self.end = end;
self
}
/// Encodes the input file and writes it to the writer. For multi-part encoding, only
/// one part is encoded. In case of multipart, the part number, begin and end offset need
/// to be specified in the `EncodeOptions`. When directly encoding to an NNTP stream, the
/// caller needs to take care of the message header and end of multi-line block (`".\r\n"`).
///
/// # Example
/// ```rust,no_run
/// let encode_options = yenc::EncodeOptions::default()
/// .parts(2)
/// .part(1)
/// .begin(1)
/// .end(38400);
/// let mut output_file = std::fs::File::create("test1.bin.yenc.001").unwrap();
/// encode_options.encode_file("test1.bin", &mut output_file).unwrap();
/// ```
/// # Errors
/// - when the output file already exists
///
pub fn encode_file<P, W>(&self, input_path: P, output: W) -> Result<(), EncodeError>
where
P: AsRef<Path>,
W: Write,
{
let input_filename = input_path.as_ref().file_name();
let input_filename = match input_filename {
Some(s) => s.to_str().unwrap_or(""),
None => "",
};
let input_file = File::open(&input_path)?;
let length = input_file.metadata()?.len();
self.encode_stream(input_file, output, length, input_filename)
}
/// Checks the options. Returns Ok(()) if all options are ok.
/// # Return
/// - EncodeError::PartNumberMissing
/// - EncodeError::PartBeginOffsetMissing
/// - EncodeError::PartEndOffsetMissing
/// - EncodeError::PartOffsetsInvalidRange
pub fn check_options(&self) -> Result<(), EncodeError> {
if self.parts > 1 && self.part == 0 {
return Err(EncodeError::PartNumberMissing);
}
if self.parts > 1 && self.begin == 0 {
return Err(EncodeError::PartBeginOffsetMissing);
}
if self.parts > 1 && self.end == 0 {
return Err(EncodeError::PartEndOffsetMissing);
}
if self.parts > 1 && self.begin > self.end {
return Err(EncodeError::PartOffsetsInvalidRange);
}
Ok(())
}
/// Encodes the date from input from stream and writes the encoded data to the output stream.
/// The input stream does not need to be a file, therefore, size and input_filename
/// must be specified. The input_filename ends up as the filename in the yenc header.
#[allow(clippy::write_with_newline)]
pub fn encode_stream<R, W>(
&self,
input: R,
output: W,
length: u64,
input_filename: &str,
) -> Result<(), EncodeError>
where
R: Read + Seek,
W: Write,
{
let mut rdr = BufReader::new(input);
let mut checksum = crc32fast::Hasher::new();
let mut buffer = [0u8; 8192];
let mut col = 0;
let mut num_bytes = 0;
let mut output = BufWriter::new(output);
self.check_options()?;
if self.parts == 1 {
write!(
output,
"=ybegin line={} size={} name={}\r\n",
self.line_length, length, input_filename
)?;
} else {
write!(
output,
"=ybegin part={} line={} size={} name={}\r\n",
self.part, self.line_length, length, input_filename
)?;
}
if self.parts > 1 {
write!(output, "=ypart begin={} end={}\r\n", self.begin, self.end)?;
}
rdr.seek(SeekFrom::Start(self.begin - 1))?;
let mut remainder = (self.end - self.begin + 1) as usize;
while remainder > 0 {
let buf_slice = if remainder > buffer.len() {
&mut buffer[..]
} else {
&mut buffer[0..remainder]
};
rdr.read_exact(buf_slice)?;
checksum.update(buf_slice);
num_bytes += buf_slice.len();
col = encode_buffer(buf_slice, col, self.line_length, &mut output)?;
remainder -= buf_slice.len();
}
if self.parts > 1 {
write!(
output,
"\r\n=yend size={} part={} pcrc32={:08x}\r\n",
num_bytes,
self.part,
checksum.finalize()
)?;
} else {
write!(
output,
"\r\n=yend size={} crc32={:08x}\r\n",
num_bytes,
checksum.finalize()
)?;
}
Ok(())
}
}
/// Encodes the input buffer and writes it to the writer.
///
/// Lines are wrapped with a maximum of `line_length` characters per line.
/// Does not include the header and footer lines.
/// Only `encode_stream` and `encode_file` produce the headers in the output.
/// The `col` parameter is the starting offset in the row. The result contains the new offset.
pub fn encode_buffer<W>(
input: &[u8],
col: u8,
line_length: u8,
writer: W,
) -> Result<u8, EncodeError>
where
W: Write,
{
let mut col = col;
let mut writer = writer;
let mut v = Vec::<u8>::with_capacity(((input.len() as f64) * 1.04) as usize);
input.iter().for_each(|&b| {
let encoded = encode_byte(b);
v.push(encoded.0);
col += match encoded.0 {
ESCAPE => {
v.push(encoded.1);
2
}
DOT if col == 0 => {
v.push(DOT);
2
}
_ => 1,
};
if col >= line_length {
v.push(CR);
v.push(LF);
col = 0;
}
});
writer.write_all(&v)?;
Ok(col)
}
#[inline(always)]
fn encode_byte(input_byte: u8) -> (u8, u8) {
let mut output = (0, 0);
let output_byte = input_byte.overflowing_add(42).0;
match output_byte {
LF | CR | NUL | ESCAPE => {
output.0 = ESCAPE;
output.1 = output_byte.overflowing_add(64).0;
}
_ => {
output.0 = output_byte;
}
};
output
}
#[cfg(test)]
mod tests {
use super::super::constants::{CR, ESCAPE, LF, NUL};
use super::{encode_buffer, encode_byte, EncodeOptions};
#[test]
fn escape_null() {
assert_eq!((ESCAPE, 0x40), encode_byte(214));
}
/*
#[test]
fn escape_tab() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + TAB, &mut output));
assert_eq!(vec![ESCAPE, 0x49], output);
}
*/
#[test]
fn escape_lf() {
assert_eq!((ESCAPE, 0x4A), encode_byte(214 + LF));
}
#[test]
fn escape_cr() {
assert_eq!((ESCAPE, 0x4D), encode_byte(214 + CR));
}
/*
#[test]
fn escape_space() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + SPACE, &mut output));
assert_eq!(vec![ESCAPE, 0x60], output);
}
*/
#[test]
fn escape_equal_sign() {
assert_eq!((ESCAPE, 0x7D), encode_byte(ESCAPE - 42));
}
#[test]
fn non_escaped() {
for x in 0..256u16 {
let encoded = (x as u8).overflowing_add(42).0;
if encoded != NUL && encoded != CR && encoded != LF && encoded != ESCAPE {
assert_eq!((encoded, 0), encode_byte(x as u8));
}
}
}
#[test]
fn test_encode_buffer() {
let buffer = (0..256u16).map(|c| c as u8).collect::<Vec<u8>>();
#[rustfmt::skip]
const EXPECTED: [u8; 264] =
[42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
125, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 13, 10, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
211, 212, 213, 214, 215, 216,217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 61, 64, 1, 2, 3,
4, 5, 6, 7, 8, 9, 61, 74, 11, 12, 61, 77, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 13, 10, 38, 39, 40, 41];
let mut encoded = Vec::<u8>::new();
let result = encode_buffer(&buffer, 0, 128, &mut encoded);
assert!(result.is_ok());
assert_eq!(encoded.as_slice(), &EXPECTED[..]);
}
#[test]
fn encode_options_invalid_parts() {
let encode_options = EncodeOptions::new().parts(2).begin(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_begin() |
#[test]
fn encode_options_invalid_end() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_range() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(38400).end(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
}
| {
let encode_options = EncodeOptions::new().parts(2).part(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
} | identifier_body |
encode.rs | use super::constants::{CR, DEFAULT_LINE_SIZE, DOT, ESCAPE, LF, NUL};
use super::errors::EncodeError;
use std::fs::File;
use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write};
use std::path::Path;
/// Options for encoding.
/// The entry point for encoding a file (part)
/// to a file or (TCP) stream.
#[derive(Debug)]
pub struct EncodeOptions {
line_length: u8,
parts: u32,
part: u32,
begin: u64,
end: u64,
}
impl Default for EncodeOptions {
/// Constructs a new EncodeOptions instance, with the following defaults:
/// line_length = 128.
/// parts = 1,
/// part = begin = end = 0
fn default() -> Self {
EncodeOptions {
line_length: DEFAULT_LINE_SIZE,
parts: 1,
part: 0,
begin: 0,
end: 0,
}
}
}
impl EncodeOptions {
/// Constructs a new EncodeOptions with defaults, see Default impl.
pub fn new() -> EncodeOptions {
Default::default()
}
/// Sets the maximum line length.
pub fn line_length(mut self, line_length: u8) -> EncodeOptions {
self.line_length = line_length;
self
}
/// Sets the number of parts (default=1).
/// When the number of parts is 1, no '=ypart' line will be written
/// in the ouput.
pub fn parts(mut self, parts: u32) -> EncodeOptions {
self.parts = parts;
self
}
/// Sets the part number.
/// Only used when `parts > 1`.
/// The part number count starts at 1.
pub fn part(mut self, part: u32) -> EncodeOptions {
self.part = part;
self
}
/// Sets the begin (which is the file offset + 1).
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
pub fn begin(mut self, begin: u64) -> EncodeOptions {
self.begin = begin;
self
}
/// Sets the end.
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
/// `end` should be larger than `begin`, otherwise an overflow error occurrs.
pub fn end(mut self, end: u64) -> EncodeOptions {
self.end = end;
self
}
/// Encodes the input file and writes it to the writer. For multi-part encoding, only
/// one part is encoded. In case of multipart, the part number, begin and end offset need
/// to be specified in the `EncodeOptions`. When directly encoding to an NNTP stream, the
/// caller needs to take care of the message header and end of multi-line block (`".\r\n"`).
///
/// # Example
/// ```rust,no_run
/// let encode_options = yenc::EncodeOptions::default()
/// .parts(2)
/// .part(1)
/// .begin(1)
/// .end(38400);
/// let mut output_file = std::fs::File::create("test1.bin.yenc.001").unwrap();
/// encode_options.encode_file("test1.bin", &mut output_file).unwrap();
/// ```
/// # Errors
/// - when the output file already exists
///
pub fn encode_file<P, W>(&self, input_path: P, output: W) -> Result<(), EncodeError>
where
P: AsRef<Path>,
W: Write,
{
let input_filename = input_path.as_ref().file_name();
let input_filename = match input_filename {
Some(s) => s.to_str().unwrap_or(""),
None => "",
};
let input_file = File::open(&input_path)?;
let length = input_file.metadata()?.len();
self.encode_stream(input_file, output, length, input_filename)
}
/// Checks the options. Returns Ok(()) if all options are ok.
/// # Return
/// - EncodeError::PartNumberMissing
/// - EncodeError::PartBeginOffsetMissing
/// - EncodeError::PartEndOffsetMissing
/// - EncodeError::PartOffsetsInvalidRange
pub fn check_options(&self) -> Result<(), EncodeError> {
if self.parts > 1 && self.part == 0 {
return Err(EncodeError::PartNumberMissing);
}
if self.parts > 1 && self.begin == 0 {
return Err(EncodeError::PartBeginOffsetMissing);
}
if self.parts > 1 && self.end == 0 {
return Err(EncodeError::PartEndOffsetMissing);
}
if self.parts > 1 && self.begin > self.end {
return Err(EncodeError::PartOffsetsInvalidRange);
}
Ok(())
}
/// Encodes the date from input from stream and writes the encoded data to the output stream.
/// The input stream does not need to be a file, therefore, size and input_filename
/// must be specified. The input_filename ends up as the filename in the yenc header.
#[allow(clippy::write_with_newline)]
pub fn encode_stream<R, W>(
&self,
input: R,
output: W,
length: u64,
input_filename: &str,
) -> Result<(), EncodeError>
where
R: Read + Seek,
W: Write,
{
let mut rdr = BufReader::new(input);
let mut checksum = crc32fast::Hasher::new();
let mut buffer = [0u8; 8192];
let mut col = 0;
let mut num_bytes = 0;
let mut output = BufWriter::new(output);
self.check_options()?;
if self.parts == 1 {
write!(
output,
"=ybegin line={} size={} name={}\r\n",
self.line_length, length, input_filename
)?;
} else {
write!(
output,
"=ybegin part={} line={} size={} name={}\r\n",
self.part, self.line_length, length, input_filename
)?;
}
if self.parts > 1 {
write!(output, "=ypart begin={} end={}\r\n", self.begin, self.end)?;
}
rdr.seek(SeekFrom::Start(self.begin - 1))?;
let mut remainder = (self.end - self.begin + 1) as usize;
while remainder > 0 {
let buf_slice = if remainder > buffer.len() {
&mut buffer[..]
} else {
&mut buffer[0..remainder]
};
rdr.read_exact(buf_slice)?;
checksum.update(buf_slice);
num_bytes += buf_slice.len();
col = encode_buffer(buf_slice, col, self.line_length, &mut output)?;
remainder -= buf_slice.len();
}
if self.parts > 1 {
write!(
output,
"\r\n=yend size={} part={} pcrc32={:08x}\r\n",
num_bytes,
self.part,
checksum.finalize()
)?;
} else {
write!(
output,
"\r\n=yend size={} crc32={:08x}\r\n",
num_bytes,
checksum.finalize()
)?;
}
Ok(())
}
}
/// Encodes the input buffer and writes it to the writer.
///
/// Lines are wrapped with a maximum of `line_length` characters per line.
/// Does not include the header and footer lines.
/// Only `encode_stream` and `encode_file` produce the headers in the output.
/// The `col` parameter is the starting offset in the row. The result contains the new offset.
pub fn encode_buffer<W>(
input: &[u8],
col: u8,
line_length: u8,
writer: W,
) -> Result<u8, EncodeError>
where
W: Write,
{
let mut col = col;
let mut writer = writer;
let mut v = Vec::<u8>::with_capacity(((input.len() as f64) * 1.04) as usize);
input.iter().for_each(|&b| {
let encoded = encode_byte(b);
v.push(encoded.0);
col += match encoded.0 {
ESCAPE => {
v.push(encoded.1);
2
}
DOT if col == 0 => |
_ => 1,
};
if col >= line_length {
v.push(CR);
v.push(LF);
col = 0;
}
});
writer.write_all(&v)?;
Ok(col)
}
#[inline(always)]
fn encode_byte(input_byte: u8) -> (u8, u8) {
let mut output = (0, 0);
let output_byte = input_byte.overflowing_add(42).0;
match output_byte {
LF | CR | NUL | ESCAPE => {
output.0 = ESCAPE;
output.1 = output_byte.overflowing_add(64).0;
}
_ => {
output.0 = output_byte;
}
};
output
}
#[cfg(test)]
mod tests {
use super::super::constants::{CR, ESCAPE, LF, NUL};
use super::{encode_buffer, encode_byte, EncodeOptions};
#[test]
fn escape_null() {
assert_eq!((ESCAPE, 0x40), encode_byte(214));
}
/*
#[test]
fn escape_tab() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + TAB, &mut output));
assert_eq!(vec![ESCAPE, 0x49], output);
}
*/
#[test]
fn escape_lf() {
assert_eq!((ESCAPE, 0x4A), encode_byte(214 + LF));
}
#[test]
fn escape_cr() {
assert_eq!((ESCAPE, 0x4D), encode_byte(214 + CR));
}
/*
#[test]
fn escape_space() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + SPACE, &mut output));
assert_eq!(vec![ESCAPE, 0x60], output);
}
*/
#[test]
fn escape_equal_sign() {
assert_eq!((ESCAPE, 0x7D), encode_byte(ESCAPE - 42));
}
#[test]
fn non_escaped() {
for x in 0..256u16 {
let encoded = (x as u8).overflowing_add(42).0;
if encoded != NUL && encoded != CR && encoded != LF && encoded != ESCAPE {
assert_eq!((encoded, 0), encode_byte(x as u8));
}
}
}
#[test]
fn test_encode_buffer() {
let buffer = (0..256u16).map(|c| c as u8).collect::<Vec<u8>>();
#[rustfmt::skip]
const EXPECTED: [u8; 264] =
[42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
125, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 13, 10, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
211, 212, 213, 214, 215, 216,217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 61, 64, 1, 2, 3,
4, 5, 6, 7, 8, 9, 61, 74, 11, 12, 61, 77, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 13, 10, 38, 39, 40, 41];
let mut encoded = Vec::<u8>::new();
let result = encode_buffer(&buffer, 0, 128, &mut encoded);
assert!(result.is_ok());
assert_eq!(encoded.as_slice(), &EXPECTED[..]);
}
#[test]
fn encode_options_invalid_parts() {
let encode_options = EncodeOptions::new().parts(2).begin(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_begin() {
let encode_options = EncodeOptions::new().parts(2).part(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_end() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_range() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(38400).end(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
}
| {
v.push(DOT);
2
} | conditional_block |
encode.rs | use super::constants::{CR, DEFAULT_LINE_SIZE, DOT, ESCAPE, LF, NUL};
use super::errors::EncodeError;
use std::fs::File;
use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write};
use std::path::Path;
/// Options for encoding.
/// The entry point for encoding a file (part)
/// to a file or (TCP) stream.
#[derive(Debug)]
pub struct EncodeOptions {
line_length: u8,
parts: u32,
part: u32,
begin: u64,
end: u64,
}
impl Default for EncodeOptions {
/// Constructs a new EncodeOptions instance, with the following defaults:
/// line_length = 128.
/// parts = 1,
/// part = begin = end = 0
fn default() -> Self {
EncodeOptions {
line_length: DEFAULT_LINE_SIZE,
parts: 1,
part: 0,
begin: 0,
end: 0,
}
}
}
impl EncodeOptions {
/// Constructs a new EncodeOptions with defaults, see Default impl.
pub fn new() -> EncodeOptions {
Default::default()
}
/// Sets the maximum line length.
pub fn line_length(mut self, line_length: u8) -> EncodeOptions {
self.line_length = line_length;
self
}
/// Sets the number of parts (default=1).
/// When the number of parts is 1, no '=ypart' line will be written
/// in the ouput.
pub fn parts(mut self, parts: u32) -> EncodeOptions {
self.parts = parts;
self
}
/// Sets the part number.
/// Only used when `parts > 1`.
/// The part number count starts at 1.
pub fn part(mut self, part: u32) -> EncodeOptions {
self.part = part;
self
}
/// Sets the begin (which is the file offset + 1).
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
pub fn begin(mut self, begin: u64) -> EncodeOptions {
self.begin = begin;
self
}
/// Sets the end.
/// Only used when `parts > 1`.
/// The size of the part is `end - begin + 1`.
/// `end` should be larger than `begin`, otherwise an overflow error occurrs.
pub fn end(mut self, end: u64) -> EncodeOptions {
self.end = end;
self
}
/// Encodes the input file and writes it to the writer. For multi-part encoding, only
/// one part is encoded. In case of multipart, the part number, begin and end offset need
/// to be specified in the `EncodeOptions`. When directly encoding to an NNTP stream, the
/// caller needs to take care of the message header and end of multi-line block (`".\r\n"`).
///
/// # Example
/// ```rust,no_run
/// let encode_options = yenc::EncodeOptions::default()
/// .parts(2)
/// .part(1)
/// .begin(1)
/// .end(38400);
/// let mut output_file = std::fs::File::create("test1.bin.yenc.001").unwrap();
/// encode_options.encode_file("test1.bin", &mut output_file).unwrap();
/// ```
/// # Errors
/// - when the output file already exists
///
pub fn encode_file<P, W>(&self, input_path: P, output: W) -> Result<(), EncodeError>
where
P: AsRef<Path>,
W: Write,
{
let input_filename = input_path.as_ref().file_name();
let input_filename = match input_filename {
Some(s) => s.to_str().unwrap_or(""),
None => "",
};
let input_file = File::open(&input_path)?;
let length = input_file.metadata()?.len();
self.encode_stream(input_file, output, length, input_filename)
}
/// Checks the options. Returns Ok(()) if all options are ok.
/// # Return
/// - EncodeError::PartNumberMissing
/// - EncodeError::PartBeginOffsetMissing
/// - EncodeError::PartEndOffsetMissing
/// - EncodeError::PartOffsetsInvalidRange
pub fn check_options(&self) -> Result<(), EncodeError> {
if self.parts > 1 && self.part == 0 {
return Err(EncodeError::PartNumberMissing);
}
if self.parts > 1 && self.begin == 0 {
return Err(EncodeError::PartBeginOffsetMissing);
}
if self.parts > 1 && self.end == 0 {
return Err(EncodeError::PartEndOffsetMissing);
}
if self.parts > 1 && self.begin > self.end {
return Err(EncodeError::PartOffsetsInvalidRange);
}
Ok(())
}
/// Encodes the date from input from stream and writes the encoded data to the output stream.
/// The input stream does not need to be a file, therefore, size and input_filename
/// must be specified. The input_filename ends up as the filename in the yenc header.
#[allow(clippy::write_with_newline)]
pub fn | <R, W>(
&self,
input: R,
output: W,
length: u64,
input_filename: &str,
) -> Result<(), EncodeError>
where
R: Read + Seek,
W: Write,
{
let mut rdr = BufReader::new(input);
let mut checksum = crc32fast::Hasher::new();
let mut buffer = [0u8; 8192];
let mut col = 0;
let mut num_bytes = 0;
let mut output = BufWriter::new(output);
self.check_options()?;
if self.parts == 1 {
write!(
output,
"=ybegin line={} size={} name={}\r\n",
self.line_length, length, input_filename
)?;
} else {
write!(
output,
"=ybegin part={} line={} size={} name={}\r\n",
self.part, self.line_length, length, input_filename
)?;
}
if self.parts > 1 {
write!(output, "=ypart begin={} end={}\r\n", self.begin, self.end)?;
}
rdr.seek(SeekFrom::Start(self.begin - 1))?;
let mut remainder = (self.end - self.begin + 1) as usize;
while remainder > 0 {
let buf_slice = if remainder > buffer.len() {
&mut buffer[..]
} else {
&mut buffer[0..remainder]
};
rdr.read_exact(buf_slice)?;
checksum.update(buf_slice);
num_bytes += buf_slice.len();
col = encode_buffer(buf_slice, col, self.line_length, &mut output)?;
remainder -= buf_slice.len();
}
if self.parts > 1 {
write!(
output,
"\r\n=yend size={} part={} pcrc32={:08x}\r\n",
num_bytes,
self.part,
checksum.finalize()
)?;
} else {
write!(
output,
"\r\n=yend size={} crc32={:08x}\r\n",
num_bytes,
checksum.finalize()
)?;
}
Ok(())
}
}
/// Encodes the input buffer and writes it to the writer.
///
/// Lines are wrapped with a maximum of `line_length` characters per line.
/// Does not include the header and footer lines.
/// Only `encode_stream` and `encode_file` produce the headers in the output.
/// The `col` parameter is the starting offset in the row. The result contains the new offset.
pub fn encode_buffer<W>(
input: &[u8],
col: u8,
line_length: u8,
writer: W,
) -> Result<u8, EncodeError>
where
W: Write,
{
let mut col = col;
let mut writer = writer;
let mut v = Vec::<u8>::with_capacity(((input.len() as f64) * 1.04) as usize);
input.iter().for_each(|&b| {
let encoded = encode_byte(b);
v.push(encoded.0);
col += match encoded.0 {
ESCAPE => {
v.push(encoded.1);
2
}
DOT if col == 0 => {
v.push(DOT);
2
}
_ => 1,
};
if col >= line_length {
v.push(CR);
v.push(LF);
col = 0;
}
});
writer.write_all(&v)?;
Ok(col)
}
#[inline(always)]
fn encode_byte(input_byte: u8) -> (u8, u8) {
let mut output = (0, 0);
let output_byte = input_byte.overflowing_add(42).0;
match output_byte {
LF | CR | NUL | ESCAPE => {
output.0 = ESCAPE;
output.1 = output_byte.overflowing_add(64).0;
}
_ => {
output.0 = output_byte;
}
};
output
}
#[cfg(test)]
mod tests {
use super::super::constants::{CR, ESCAPE, LF, NUL};
use super::{encode_buffer, encode_byte, EncodeOptions};
#[test]
fn escape_null() {
assert_eq!((ESCAPE, 0x40), encode_byte(214));
}
/*
#[test]
fn escape_tab() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + TAB, &mut output));
assert_eq!(vec![ESCAPE, 0x49], output);
}
*/
#[test]
fn escape_lf() {
assert_eq!((ESCAPE, 0x4A), encode_byte(214 + LF));
}
#[test]
fn escape_cr() {
assert_eq!((ESCAPE, 0x4D), encode_byte(214 + CR));
}
/*
#[test]
fn escape_space() {
let mut output = [0u8; 2];
assert_eq!(2, encode_byte(214 + SPACE, &mut output));
assert_eq!(vec![ESCAPE, 0x60], output);
}
*/
#[test]
fn escape_equal_sign() {
assert_eq!((ESCAPE, 0x7D), encode_byte(ESCAPE - 42));
}
#[test]
fn non_escaped() {
for x in 0..256u16 {
let encoded = (x as u8).overflowing_add(42).0;
if encoded != NUL && encoded != CR && encoded != LF && encoded != ESCAPE {
assert_eq!((encoded, 0), encode_byte(x as u8));
}
}
}
#[test]
fn test_encode_buffer() {
let buffer = (0..256u16).map(|c| c as u8).collect::<Vec<u8>>();
#[rustfmt::skip]
const EXPECTED: [u8; 264] =
[42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
125, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132,
133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 13, 10, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210,
211, 212, 213, 214, 215, 216,217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 61, 64, 1, 2, 3,
4, 5, 6, 7, 8, 9, 61, 74, 11, 12, 61, 77, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 13, 10, 38, 39, 40, 41];
let mut encoded = Vec::<u8>::new();
let result = encode_buffer(&buffer, 0, 128, &mut encoded);
assert!(result.is_ok());
assert_eq!(encoded.as_slice(), &EXPECTED[..]);
}
#[test]
fn encode_options_invalid_parts() {
let encode_options = EncodeOptions::new().parts(2).begin(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_begin() {
let encode_options = EncodeOptions::new().parts(2).part(1).end(38400);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_end() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
#[test]
fn encode_options_invalid_range() {
let encode_options = EncodeOptions::new().parts(2).part(1).begin(38400).end(1);
let vr = encode_options.check_options();
assert!(vr.is_err());
}
}
| encode_stream | identifier_name |
messagepass.py | import numpy as np
import time as time
from sortedcontainers import SortedSet;
from pyGM.factor import *
from pyGM.graphmodel import *
inf = float('inf')
# Basic implementation -- flooding schedule f->v, v->f etc.
#
#
#
def LBP(model, maxIter=100, verbose=False):
beliefs_F = [ f/f.sum() for f in model.factors ] # copies & normalizes each f
beliefs_V = [ Factor([v],1.0/v.states) for v in model.X ] # variable beliefs
msg = {}
for f in model.factors:
for v in f.vars:
msg[v,f] = Factor([v],1.0) # init msg[i->alpha]
msg[f,v] = Factor([v],1.0) # and msg[alpha->i]
for t in xrange(1,maxIter+1): # for each iteration:
# Update beliefs and outgoing messages for each factor:
for a,f in enumerate(model.factors):
beliefs_F[a] = f.copy() # find f * incoming msgs & normalize
for v in f.vars: beliefs_F[a] *= msg[v,f]
beliefs_F[a] /= beliefs_F[a].sum() # divide by i->f & sum out all but Xi
for v in f.vars: msg[f,v] = beliefs_F[a].marginal([v])/msg[v,f]
# Update beliefs and outgoing messages for each variable:
for i,v in enumerate(model.X):
beliefs_V[i] = Factor([v],1.0) # find product of incoming msgs & normalize
for f in model.factorsWith(v): beliefs_V[i] *= msg[f,v]
beliefs_V[i] /= beliefs_V[i].sum() # divide by f->i to get msg i->f
for f in model.factorsWith(v): msg[v,f] = beliefs_V[i]/msg[f,v]
#for f in model.factors: # print msgs and beliefs for debugging
# for v in f.vars:
# print v,"->",f,":",msg[X[v],f].table
# print f,"->",v,":",msg[f,X[v]].table
#for b in beliefs_F: print b, b.table
#for b in beliefs_V: print b, b.table
# Compute estimate of the log partition function:
# E_b [ log f ] + H_Bethe(b) = \sum_f E_bf[log f] + \sum_f H(bf) + \sum (1-di) H(bi)
lnZ = sum([(1-len(model.factorsWith(v)))*beliefs_V[v].entropy() for v in model.X])
for a,f in enumerate(model.factors):
lnZ += (beliefs_F[a] * f.log()).sum()
lnZ += beliefs_F[a].entropy()
if verbose: print("Iter "+str(t)+": "+str(lnZ))
return lnZ,beliefs_V
#@do_profile(follow=[get_number])
def NMF(model, maxIter=100, beliefs=None, verbose=False):
"""Simple naive mean field lower bound on log(Z). Returns lnZ,[bel(Xi) for Xi in X]"""
if beliefs is None: beliefs = [Factor([Xi],1.0/Xi.states) for Xi in model.X]
lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])
for f in model.factors:
m = f.log()
for v in f.vars: m *= beliefs[v]
lnZ += m.sum()
if verbose: print("Iter 0: "+str(lnZ))
for t in xrange(1,maxIter+1): # for each iteration:
# Update all the beliefs via coordinate ascent:
for Xi in model.X: # for each variable,
bNew = 0.0 # compute E[ log f ] as a function of Xi:
for f in model.factorsWith(Xi,copy=False): # for each factor f_a, compute:
m = f.log() # E[log f_a] = \sum \log f_a \prod b_v
for v in f.vars - [Xi]: m *= beliefs[v]
bNew += m.marginal([Xi]) # sum them up to get E[log f]
bNew -= bNew.max() # (numerical issues)
bNew = bNew.exp()
bNew /= bNew.sum() # set b(Xi) = exp( E[log f] ) / Z
beliefs[Xi] = bNew
#
# Compute the lower bound on the partition function:
# E_b [ log f ] + H(b) = \sum_a E[log f_a] + \sum_i H(b_i) for independent beliefs
lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])
for f in model.factors:
m = f.log()
for v in f.vars: m *= beliefs[v]
lnZ += m.sum() |
################ DECOMPOSITION METHODS #############################################
#@do_profile(follow=[get_number])
def DualDecomposition(model, maxIter=100, verbose=False):
""" ub,lb,xhat = DualDecomposition( model [,maxiter,verbose] )
Compute a decomposition-based upper bound & estimate of the MAP of a graphical model"""
lnF = sum( np.log(f.max()) for f in model.factors )
lnX, xhat = -np.inf, np.zeros( (len(model.X),), dtype=int)
lnR, rhat = -np.inf, np.zeros( (len(model.X),), dtype=int)
if verbose: print("Iter 0: "+str(lnF))
for t in xrange(1,maxIter+1): # for each iteration:
# Update each variable in turn:
for Xi in model.X: # for each variable,
flist = model.factorsWith(Xi, copy=False)
gamma = [f.maxmarginal([Xi]) for f in flist]
avg = np.prod(gamma)**(1.0/len(gamma))
for f,g in zip(flist,gamma): f *= avg/(g+1e-300) # !!! numerical issues...
xhat[Xi] = avg.argmax()[0] # guess a state for Xi
#
# Compute the upper bound on the maximum and the value of our current guess
lnF = sum( np.log(f.max()) for f in model.factors )
lnX = model.logValue( xhat )
if lnR < lnX: lnR = lnX; rhat[:]=xhat;
if verbose: print("Iter "+str(t)+": "+str(lnF)+" > "+str(lnX))
if (lnF == lnX): break
return lnF,lnR,rhat
def WeightedDD( factors, weights, elimOrder, direction=1.0, maxIter=100, verbose=False, stop_tol=0.0 ):
step_inner = 5;
thetas = [f.log() for f in factors]
weights = { th:wt for th,wt in zip(thetas,weights) }
logmodel = GraphModel(thetas, copy=False)
def wt_elim(f,w,pri):
elim_ord = np.argsort( [pri[x] for x in f.vars] )
tmp = f
for i in elim_ord: tmp = tmp.lsePower([f.v[i]],1.0/w[i])
return tmp
def calc_bound( thetas, weights, pri):
return sum([wt_elim(th,wt,pri) for th,wt in zip(thetas,weights)])
def calc_deriv(th,w,pri,match,Xi=None):
elim_ord = np.argsort( [pri[x] for x in th.vars] )
lnZ0 = th.copy()
lnmu = 0.0 * lnZ0
for i in elim_ord: # run over v[i],w[i] in the given elim order
lnZ1 = lnZ0.lsePower([th.v[i]],1.0/w[i])
lnZ0 -= lnZ1; # update lnmu += (lnZ0 - lnZ1)*(1.0/w[i])
lnZ0 *= (1.0/w[i]);
lnmu += lnZ0; # TODO: save a copy by assigning = lnZ0 on 1st loop?
lnZ0 = lnZ1; # then move to the next conditional
lnmu.expIP()
Hxi = 0.0
if Xi is not None:
keep = [x for x in th.vars if pri[x]>=pri[Xi]]
forH = lnmu.marginal(keep) if len(keep) < th.nvar else lnmu
Hxi = forH.entropy() - forH.sum([Xi]).entropy() if forH.nvar > 1 else forH.entropy()
return lnmu.marginal(match), Hxi
def update_weights(weights,idx,dW,stepW): # TODO only works for positive weights
wtot = 0.0
for j,wt,dw in zip(idx,weights,dW): wt[j] *= np.exp( - stepW * wt[j] * dw ); wtot += wt[j];
for j,wt,dw in zip(idx,weights,dW): wt[j] /= wtot;
def armijo(thetas,weights,pri,Xi,steps,threshold=1e-4,direction=+1, optTol=1e-8,progTol=1e-8):
import copy
f0,f1 = None, calc_bound(thetas,weights,pri) # init prev, current objective values
match = reduce(lambda a,b: a&b, [th.vars for th in thetas], thetas[0].vars)
idx = [th.v.index(Xi) for th in thetas] if Xi is not None else [] # find location of Xi in var/weight vectors
newweights = copy.deepcopy(weights) if Xi is not None else weights # copy weights if updated
for s in range(steps):
# compute gradients dPhi/dTheta, dPhi/dW (wrt parameters, weights):
dT,dW = zip(*[calc_deriv(th,wt,pri,match,Xi) for th,wt in zip(thetas,weights)])
dT,dW = list(dT),list(dW)
for dt in dT[1:]: dt -= dT[0]; dt *= -1;
dT[0] = -sum(dT[1:])
if Xi is not None:
Hbar = sum([wt[j]*dw for j,dw,wt in zip(idx,dW,weights)])
for j in range(len(dW)): dW[j] -= Hbar
# Compute gradient norms:
L0,L1,L2 = zip(*[ (d.max(),d.sum(),(d*d).sum()) for dt in dT for d in [dt.abs()]])
L0,L1,L2 = max(L0),sum(L1)+1e-300,sum(L2)
L0,L1,L2 = max(L0,max(abs(dw) for dw in dW)), L1+sum(abs(dw) for dw in dW), L2+sum(dw*dw for dw in dW)
if L0 < optTol: return # if < optTol => local optimum
step = min(1.0, 1.0/L1) if f0 is None else min(1.0, direction*(f0-f1)/L1)
step = step if step > 0 else 1.0
f0 = f1; # update "old" objective value
for dt in dT: dt *= direction*step; # premultiply step size into dT
for j in range(10):
newthetas = [th+dt for th,dt in zip(thetas,dT)] # step already pre-multiplied
if Xi is not None: update_weights( newweights, idx, dW, step );
f1 = calc_bound(newthetas,newweights,pri)
#print " ",f0," => ",f1, " (",f0-f1,' ~ ',stepsize*threshold*gradnorm,")"
if (f0 - f1)*direction > step*threshold*L2: # if armijo "enough improvement" satisfied
for th,nth in zip(thetas,newthetas): th.t[:] = nth.t # rewrite tables
for j,wt,w2 in zip(idx,weights,newweights): wt[j] = w2[j];
break;
else: # ow, back off
step *= 0.5;
if step*L0 < progTol: return # if < progTol => no progress possible
for dt in dT: dt *= 0.5
elimOrder = np.asarray(elimOrder);
pri = np.zeros((elimOrder.max()+1,))
pri[elimOrder] = np.arange(len(elimOrder))
#
lnZw = calc_bound(thetas,[weights[th] for th in thetas],pri)
start_time = time.time()
if verbose: print("Iter 0: "+str(lnZw))
for t in xrange(1,maxIter+1): # for each iteration:
# Update each variable in turn:
for Xi in logmodel.X: # for each variable,
theta_i = logmodel.factorsWith(Xi)
if len(theta_i) <= 1: continue;
weight_i = [weights[th] for th in theta_i]
armijo(theta_i,weight_i,pri,Xi, 5, 0.01, direction)
#
# Compute the upper bound on the maximum and the value of our current guess
prev, lnZw = lnZw, calc_bound(thetas,[weights[th] for th in thetas],pri)
if verbose: print("[{}] Iter {} : {}".format(time.time()-start_time,t,lnZw));
if (prev - lnZw)*direction < stop_tol: break
return lnZw, thetas | if verbose: print("Iter "+str(t)+": "+str(lnZ))
return lnZ,beliefs
| random_line_split |
messagepass.py | import numpy as np
import time as time
from sortedcontainers import SortedSet;
from pyGM.factor import *
from pyGM.graphmodel import *
inf = float('inf')
# Basic implementation -- flooding schedule f->v, v->f etc.
#
#
#
def LBP(model, maxIter=100, verbose=False):
beliefs_F = [ f/f.sum() for f in model.factors ] # copies & normalizes each f
beliefs_V = [ Factor([v],1.0/v.states) for v in model.X ] # variable beliefs
msg = {}
for f in model.factors:
for v in f.vars:
msg[v,f] = Factor([v],1.0) # init msg[i->alpha]
msg[f,v] = Factor([v],1.0) # and msg[alpha->i]
for t in xrange(1,maxIter+1): # for each iteration:
# Update beliefs and outgoing messages for each factor:
for a,f in enumerate(model.factors):
beliefs_F[a] = f.copy() # find f * incoming msgs & normalize
for v in f.vars: beliefs_F[a] *= msg[v,f]
beliefs_F[a] /= beliefs_F[a].sum() # divide by i->f & sum out all but Xi
for v in f.vars: msg[f,v] = beliefs_F[a].marginal([v])/msg[v,f]
# Update beliefs and outgoing messages for each variable:
for i,v in enumerate(model.X):
beliefs_V[i] = Factor([v],1.0) # find product of incoming msgs & normalize
for f in model.factorsWith(v): beliefs_V[i] *= msg[f,v]
beliefs_V[i] /= beliefs_V[i].sum() # divide by f->i to get msg i->f
for f in model.factorsWith(v): msg[v,f] = beliefs_V[i]/msg[f,v]
#for f in model.factors: # print msgs and beliefs for debugging
# for v in f.vars:
# print v,"->",f,":",msg[X[v],f].table
# print f,"->",v,":",msg[f,X[v]].table
#for b in beliefs_F: print b, b.table
#for b in beliefs_V: print b, b.table
# Compute estimate of the log partition function:
# E_b [ log f ] + H_Bethe(b) = \sum_f E_bf[log f] + \sum_f H(bf) + \sum (1-di) H(bi)
lnZ = sum([(1-len(model.factorsWith(v)))*beliefs_V[v].entropy() for v in model.X])
for a,f in enumerate(model.factors):
lnZ += (beliefs_F[a] * f.log()).sum()
lnZ += beliefs_F[a].entropy()
if verbose: print("Iter "+str(t)+": "+str(lnZ))
return lnZ,beliefs_V
#@do_profile(follow=[get_number])
def NMF(model, maxIter=100, beliefs=None, verbose=False):
"""Simple naive mean field lower bound on log(Z). Returns lnZ,[bel(Xi) for Xi in X]"""
if beliefs is None: beliefs = [Factor([Xi],1.0/Xi.states) for Xi in model.X]
lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])
for f in model.factors:
m = f.log()
for v in f.vars: m *= beliefs[v]
lnZ += m.sum()
if verbose: print("Iter 0: "+str(lnZ))
for t in xrange(1,maxIter+1): # for each iteration:
# Update all the beliefs via coordinate ascent:
for Xi in model.X: # for each variable,
bNew = 0.0 # compute E[ log f ] as a function of Xi:
for f in model.factorsWith(Xi,copy=False): # for each factor f_a, compute:
m = f.log() # E[log f_a] = \sum \log f_a \prod b_v
for v in f.vars - [Xi]: m *= beliefs[v]
bNew += m.marginal([Xi]) # sum them up to get E[log f]
bNew -= bNew.max() # (numerical issues)
bNew = bNew.exp()
bNew /= bNew.sum() # set b(Xi) = exp( E[log f] ) / Z
beliefs[Xi] = bNew
#
# Compute the lower bound on the partition function:
# E_b [ log f ] + H(b) = \sum_a E[log f_a] + \sum_i H(b_i) for independent beliefs
lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])
for f in model.factors:
m = f.log()
for v in f.vars: m *= beliefs[v]
lnZ += m.sum()
if verbose: print("Iter "+str(t)+": "+str(lnZ))
return lnZ,beliefs
################ DECOMPOSITION METHODS #############################################
#@do_profile(follow=[get_number])
def DualDecomposition(model, maxIter=100, verbose=False):
""" ub,lb,xhat = DualDecomposition( model [,maxiter,verbose] )
Compute a decomposition-based upper bound & estimate of the MAP of a graphical model"""
lnF = sum( np.log(f.max()) for f in model.factors )
lnX, xhat = -np.inf, np.zeros( (len(model.X),), dtype=int)
lnR, rhat = -np.inf, np.zeros( (len(model.X),), dtype=int)
if verbose: print("Iter 0: "+str(lnF))
for t in xrange(1,maxIter+1): # for each iteration:
# Update each variable in turn:
for Xi in model.X: # for each variable,
flist = model.factorsWith(Xi, copy=False)
gamma = [f.maxmarginal([Xi]) for f in flist]
avg = np.prod(gamma)**(1.0/len(gamma))
for f,g in zip(flist,gamma): f *= avg/(g+1e-300) # !!! numerical issues...
xhat[Xi] = avg.argmax()[0] # guess a state for Xi
#
# Compute the upper bound on the maximum and the value of our current guess
lnF = sum( np.log(f.max()) for f in model.factors )
lnX = model.logValue( xhat )
if lnR < lnX: lnR = lnX; rhat[:]=xhat;
if verbose: print("Iter "+str(t)+": "+str(lnF)+" > "+str(lnX))
if (lnF == lnX): break
return lnF,lnR,rhat
def WeightedDD( factors, weights, elimOrder, direction=1.0, maxIter=100, verbose=False, stop_tol=0.0 ):
step_inner = 5;
thetas = [f.log() for f in factors]
weights = { th:wt for th,wt in zip(thetas,weights) }
logmodel = GraphModel(thetas, copy=False)
def wt_elim(f,w,pri):
elim_ord = np.argsort( [pri[x] for x in f.vars] )
tmp = f
for i in elim_ord: tmp = tmp.lsePower([f.v[i]],1.0/w[i])
return tmp
def calc_bound( thetas, weights, pri):
return sum([wt_elim(th,wt,pri) for th,wt in zip(thetas,weights)])
def calc_deriv(th,w,pri,match,Xi=None):
elim_ord = np.argsort( [pri[x] for x in th.vars] )
lnZ0 = th.copy()
lnmu = 0.0 * lnZ0
for i in elim_ord: # run over v[i],w[i] in the given elim order
lnZ1 = lnZ0.lsePower([th.v[i]],1.0/w[i])
lnZ0 -= lnZ1; # update lnmu += (lnZ0 - lnZ1)*(1.0/w[i])
lnZ0 *= (1.0/w[i]);
lnmu += lnZ0; # TODO: save a copy by assigning = lnZ0 on 1st loop?
lnZ0 = lnZ1; # then move to the next conditional
lnmu.expIP()
Hxi = 0.0
if Xi is not None:
keep = [x for x in th.vars if pri[x]>=pri[Xi]]
forH = lnmu.marginal(keep) if len(keep) < th.nvar else lnmu
Hxi = forH.entropy() - forH.sum([Xi]).entropy() if forH.nvar > 1 else forH.entropy()
return lnmu.marginal(match), Hxi
def update_weights(weights,idx,dW,stepW): # TODO only works for positive weights
wtot = 0.0
for j,wt,dw in zip(idx,weights,dW): wt[j] *= np.exp( - stepW * wt[j] * dw ); wtot += wt[j];
for j,wt,dw in zip(idx,weights,dW): wt[j] /= wtot;
def | (thetas,weights,pri,Xi,steps,threshold=1e-4,direction=+1, optTol=1e-8,progTol=1e-8):
import copy
f0,f1 = None, calc_bound(thetas,weights,pri) # init prev, current objective values
match = reduce(lambda a,b: a&b, [th.vars for th in thetas], thetas[0].vars)
idx = [th.v.index(Xi) for th in thetas] if Xi is not None else [] # find location of Xi in var/weight vectors
newweights = copy.deepcopy(weights) if Xi is not None else weights # copy weights if updated
for s in range(steps):
# compute gradients dPhi/dTheta, dPhi/dW (wrt parameters, weights):
dT,dW = zip(*[calc_deriv(th,wt,pri,match,Xi) for th,wt in zip(thetas,weights)])
dT,dW = list(dT),list(dW)
for dt in dT[1:]: dt -= dT[0]; dt *= -1;
dT[0] = -sum(dT[1:])
if Xi is not None:
Hbar = sum([wt[j]*dw for j,dw,wt in zip(idx,dW,weights)])
for j in range(len(dW)): dW[j] -= Hbar
# Compute gradient norms:
L0,L1,L2 = zip(*[ (d.max(),d.sum(),(d*d).sum()) for dt in dT for d in [dt.abs()]])
L0,L1,L2 = max(L0),sum(L1)+1e-300,sum(L2)
L0,L1,L2 = max(L0,max(abs(dw) for dw in dW)), L1+sum(abs(dw) for dw in dW), L2+sum(dw*dw for dw in dW)
if L0 < optTol: return # if < optTol => local optimum
step = min(1.0, 1.0/L1) if f0 is None else min(1.0, direction*(f0-f1)/L1)
step = step if step > 0 else 1.0
f0 = f1; # update "old" objective value
for dt in dT: dt *= direction*step; # premultiply step size into dT
for j in range(10):
newthetas = [th+dt for th,dt in zip(thetas,dT)] # step already pre-multiplied
if Xi is not None: update_weights( newweights, idx, dW, step );
f1 = calc_bound(newthetas,newweights,pri)
#print " ",f0," => ",f1, " (",f0-f1,' ~ ',stepsize*threshold*gradnorm,")"
if (f0 - f1)*direction > step*threshold*L2: # if armijo "enough improvement" satisfied
for th,nth in zip(thetas,newthetas): th.t[:] = nth.t # rewrite tables
for j,wt,w2 in zip(idx,weights,newweights): wt[j] = w2[j];
break;
else: # ow, back off
step *= 0.5;
if step*L0 < progTol: return # if < progTol => no progress possible
for dt in dT: dt *= 0.5
elimOrder = np.asarray(elimOrder);
pri = np.zeros((elimOrder.max()+1,))
pri[elimOrder] = np.arange(len(elimOrder))
#
lnZw = calc_bound(thetas,[weights[th] for th in thetas],pri)
start_time = time.time()
if verbose: print("Iter 0: "+str(lnZw))
for t in xrange(1,maxIter+1): # for each iteration:
# Update each variable in turn:
for Xi in logmodel.X: # for each variable,
theta_i = logmodel.factorsWith(Xi)
if len(theta_i) <= 1: continue;
weight_i = [weights[th] for th in theta_i]
armijo(theta_i,weight_i,pri,Xi, 5, 0.01, direction)
#
# Compute the upper bound on the maximum and the value of our current guess
prev, lnZw = lnZw, calc_bound(thetas,[weights[th] for th in thetas],pri)
if verbose: print("[{}] Iter {} : {}".format(time.time()-start_time,t,lnZw));
if (prev - lnZw)*direction < stop_tol: break
return lnZw, thetas
| armijo | identifier_name |
messagepass.py | import numpy as np
import time as time
from sortedcontainers import SortedSet;
from pyGM.factor import *
from pyGM.graphmodel import *
inf = float('inf')
# Basic implementation -- flooding schedule f->v, v->f etc.
#
#
#
def LBP(model, maxIter=100, verbose=False):
beliefs_F = [ f/f.sum() for f in model.factors ] # copies & normalizes each f
beliefs_V = [ Factor([v],1.0/v.states) for v in model.X ] # variable beliefs
msg = {}
for f in model.factors:
for v in f.vars:
msg[v,f] = Factor([v],1.0) # init msg[i->alpha]
msg[f,v] = Factor([v],1.0) # and msg[alpha->i]
for t in xrange(1,maxIter+1): # for each iteration:
# Update beliefs and outgoing messages for each factor:
for a,f in enumerate(model.factors):
beliefs_F[a] = f.copy() # find f * incoming msgs & normalize
for v in f.vars: beliefs_F[a] *= msg[v,f]
beliefs_F[a] /= beliefs_F[a].sum() # divide by i->f & sum out all but Xi
for v in f.vars: msg[f,v] = beliefs_F[a].marginal([v])/msg[v,f]
# Update beliefs and outgoing messages for each variable:
for i,v in enumerate(model.X):
beliefs_V[i] = Factor([v],1.0) # find product of incoming msgs & normalize
for f in model.factorsWith(v): beliefs_V[i] *= msg[f,v]
beliefs_V[i] /= beliefs_V[i].sum() # divide by f->i to get msg i->f
for f in model.factorsWith(v): msg[v,f] = beliefs_V[i]/msg[f,v]
#for f in model.factors: # print msgs and beliefs for debugging
# for v in f.vars:
# print v,"->",f,":",msg[X[v],f].table
# print f,"->",v,":",msg[f,X[v]].table
#for b in beliefs_F: print b, b.table
#for b in beliefs_V: print b, b.table
# Compute estimate of the log partition function:
# E_b [ log f ] + H_Bethe(b) = \sum_f E_bf[log f] + \sum_f H(bf) + \sum (1-di) H(bi)
lnZ = sum([(1-len(model.factorsWith(v)))*beliefs_V[v].entropy() for v in model.X])
for a,f in enumerate(model.factors):
lnZ += (beliefs_F[a] * f.log()).sum()
lnZ += beliefs_F[a].entropy()
if verbose: print("Iter "+str(t)+": "+str(lnZ))
return lnZ,beliefs_V
#@do_profile(follow=[get_number])
def NMF(model, maxIter=100, beliefs=None, verbose=False):
"""Simple naive mean field lower bound on log(Z). Returns lnZ,[bel(Xi) for Xi in X]"""
if beliefs is None: beliefs = [Factor([Xi],1.0/Xi.states) for Xi in model.X]
lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])
for f in model.factors:
m = f.log()
for v in f.vars: m *= beliefs[v]
lnZ += m.sum()
if verbose: print("Iter 0: "+str(lnZ))
for t in xrange(1,maxIter+1): # for each iteration:
# Update all the beliefs via coordinate ascent:
for Xi in model.X: # for each variable,
bNew = 0.0 # compute E[ log f ] as a function of Xi:
for f in model.factorsWith(Xi,copy=False): # for each factor f_a, compute:
m = f.log() # E[log f_a] = \sum \log f_a \prod b_v
for v in f.vars - [Xi]: m *= beliefs[v]
bNew += m.marginal([Xi]) # sum them up to get E[log f]
bNew -= bNew.max() # (numerical issues)
bNew = bNew.exp()
bNew /= bNew.sum() # set b(Xi) = exp( E[log f] ) / Z
beliefs[Xi] = bNew
#
# Compute the lower bound on the partition function:
# E_b [ log f ] + H(b) = \sum_a E[log f_a] + \sum_i H(b_i) for independent beliefs
lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])
for f in model.factors:
m = f.log()
for v in f.vars: m *= beliefs[v]
lnZ += m.sum()
if verbose: print("Iter "+str(t)+": "+str(lnZ))
return lnZ,beliefs
################ DECOMPOSITION METHODS #############################################
#@do_profile(follow=[get_number])
def DualDecomposition(model, maxIter=100, verbose=False):
""" ub,lb,xhat = DualDecomposition( model [,maxiter,verbose] )
Compute a decomposition-based upper bound & estimate of the MAP of a graphical model"""
lnF = sum( np.log(f.max()) for f in model.factors )
lnX, xhat = -np.inf, np.zeros( (len(model.X),), dtype=int)
lnR, rhat = -np.inf, np.zeros( (len(model.X),), dtype=int)
if verbose: print("Iter 0: "+str(lnF))
for t in xrange(1,maxIter+1): # for each iteration:
# Update each variable in turn:
for Xi in model.X: # for each variable,
flist = model.factorsWith(Xi, copy=False)
gamma = [f.maxmarginal([Xi]) for f in flist]
avg = np.prod(gamma)**(1.0/len(gamma))
for f,g in zip(flist,gamma): f *= avg/(g+1e-300) # !!! numerical issues...
xhat[Xi] = avg.argmax()[0] # guess a state for Xi
#
# Compute the upper bound on the maximum and the value of our current guess
lnF = sum( np.log(f.max()) for f in model.factors )
lnX = model.logValue( xhat )
if lnR < lnX: lnR = lnX; rhat[:]=xhat;
if verbose: print("Iter "+str(t)+": "+str(lnF)+" > "+str(lnX))
if (lnF == lnX): break
return lnF,lnR,rhat
def WeightedDD( factors, weights, elimOrder, direction=1.0, maxIter=100, verbose=False, stop_tol=0.0 ):
step_inner = 5;
thetas = [f.log() for f in factors]
weights = { th:wt for th,wt in zip(thetas,weights) }
logmodel = GraphModel(thetas, copy=False)
def wt_elim(f,w,pri):
elim_ord = np.argsort( [pri[x] for x in f.vars] )
tmp = f
for i in elim_ord: tmp = tmp.lsePower([f.v[i]],1.0/w[i])
return tmp
def calc_bound( thetas, weights, pri):
return sum([wt_elim(th,wt,pri) for th,wt in zip(thetas,weights)])
def calc_deriv(th,w,pri,match,Xi=None):
elim_ord = np.argsort( [pri[x] for x in th.vars] )
lnZ0 = th.copy()
lnmu = 0.0 * lnZ0
for i in elim_ord: # run over v[i],w[i] in the given elim order
lnZ1 = lnZ0.lsePower([th.v[i]],1.0/w[i])
lnZ0 -= lnZ1; # update lnmu += (lnZ0 - lnZ1)*(1.0/w[i])
lnZ0 *= (1.0/w[i]);
lnmu += lnZ0; # TODO: save a copy by assigning = lnZ0 on 1st loop?
lnZ0 = lnZ1; # then move to the next conditional
lnmu.expIP()
Hxi = 0.0
if Xi is not None:
keep = [x for x in th.vars if pri[x]>=pri[Xi]]
forH = lnmu.marginal(keep) if len(keep) < th.nvar else lnmu
Hxi = forH.entropy() - forH.sum([Xi]).entropy() if forH.nvar > 1 else forH.entropy()
return lnmu.marginal(match), Hxi
def update_weights(weights,idx,dW,stepW): # TODO only works for positive weights
wtot = 0.0
for j,wt,dw in zip(idx,weights,dW): wt[j] *= np.exp( - stepW * wt[j] * dw ); wtot += wt[j];
for j,wt,dw in zip(idx,weights,dW): wt[j] /= wtot;
def armijo(thetas,weights,pri,Xi,steps,threshold=1e-4,direction=+1, optTol=1e-8,progTol=1e-8):
import copy
f0,f1 = None, calc_bound(thetas,weights,pri) # init prev, current objective values
match = reduce(lambda a,b: a&b, [th.vars for th in thetas], thetas[0].vars)
idx = [th.v.index(Xi) for th in thetas] if Xi is not None else [] # find location of Xi in var/weight vectors
newweights = copy.deepcopy(weights) if Xi is not None else weights # copy weights if updated
for s in range(steps):
# compute gradients dPhi/dTheta, dPhi/dW (wrt parameters, weights):
dT,dW = zip(*[calc_deriv(th,wt,pri,match,Xi) for th,wt in zip(thetas,weights)])
dT,dW = list(dT),list(dW)
for dt in dT[1:]: dt -= dT[0]; dt *= -1;
dT[0] = -sum(dT[1:])
if Xi is not None:
Hbar = sum([wt[j]*dw for j,dw,wt in zip(idx,dW,weights)])
for j in range(len(dW)): dW[j] -= Hbar
# Compute gradient norms:
L0,L1,L2 = zip(*[ (d.max(),d.sum(),(d*d).sum()) for dt in dT for d in [dt.abs()]])
L0,L1,L2 = max(L0),sum(L1)+1e-300,sum(L2)
L0,L1,L2 = max(L0,max(abs(dw) for dw in dW)), L1+sum(abs(dw) for dw in dW), L2+sum(dw*dw for dw in dW)
if L0 < optTol: return # if < optTol => local optimum
step = min(1.0, 1.0/L1) if f0 is None else min(1.0, direction*(f0-f1)/L1)
step = step if step > 0 else 1.0
f0 = f1; # update "old" objective value
for dt in dT: dt *= direction*step; # premultiply step size into dT
for j in range(10):
newthetas = [th+dt for th,dt in zip(thetas,dT)] # step already pre-multiplied
if Xi is not None: update_weights( newweights, idx, dW, step );
f1 = calc_bound(newthetas,newweights,pri)
#print " ",f0," => ",f1, " (",f0-f1,' ~ ',stepsize*threshold*gradnorm,")"
if (f0 - f1)*direction > step*threshold*L2: # if armijo "enough improvement" satisfied
for th,nth in zip(thetas,newthetas): |
for j,wt,w2 in zip(idx,weights,newweights): wt[j] = w2[j];
break;
else: # ow, back off
step *= 0.5;
if step*L0 < progTol: return # if < progTol => no progress possible
for dt in dT: dt *= 0.5
elimOrder = np.asarray(elimOrder);
pri = np.zeros((elimOrder.max()+1,))
pri[elimOrder] = np.arange(len(elimOrder))
#
lnZw = calc_bound(thetas,[weights[th] for th in thetas],pri)
start_time = time.time()
if verbose: print("Iter 0: "+str(lnZw))
for t in xrange(1,maxIter+1): # for each iteration:
# Update each variable in turn:
for Xi in logmodel.X: # for each variable,
theta_i = logmodel.factorsWith(Xi)
if len(theta_i) <= 1: continue;
weight_i = [weights[th] for th in theta_i]
armijo(theta_i,weight_i,pri,Xi, 5, 0.01, direction)
#
# Compute the upper bound on the maximum and the value of our current guess
prev, lnZw = lnZw, calc_bound(thetas,[weights[th] for th in thetas],pri)
if verbose: print("[{}] Iter {} : {}".format(time.time()-start_time,t,lnZw));
if (prev - lnZw)*direction < stop_tol: break
return lnZw, thetas
| th.t[:] = nth.t # rewrite tables | conditional_block |
messagepass.py | import numpy as np
import time as time
from sortedcontainers import SortedSet;
from pyGM.factor import *
from pyGM.graphmodel import *
inf = float('inf')
# Basic implementation -- flooding schedule f->v, v->f etc.
#
#
#
def LBP(model, maxIter=100, verbose=False):
beliefs_F = [ f/f.sum() for f in model.factors ] # copies & normalizes each f
beliefs_V = [ Factor([v],1.0/v.states) for v in model.X ] # variable beliefs
msg = {}
for f in model.factors:
for v in f.vars:
msg[v,f] = Factor([v],1.0) # init msg[i->alpha]
msg[f,v] = Factor([v],1.0) # and msg[alpha->i]
for t in xrange(1,maxIter+1): # for each iteration:
# Update beliefs and outgoing messages for each factor:
for a,f in enumerate(model.factors):
beliefs_F[a] = f.copy() # find f * incoming msgs & normalize
for v in f.vars: beliefs_F[a] *= msg[v,f]
beliefs_F[a] /= beliefs_F[a].sum() # divide by i->f & sum out all but Xi
for v in f.vars: msg[f,v] = beliefs_F[a].marginal([v])/msg[v,f]
# Update beliefs and outgoing messages for each variable:
for i,v in enumerate(model.X):
beliefs_V[i] = Factor([v],1.0) # find product of incoming msgs & normalize
for f in model.factorsWith(v): beliefs_V[i] *= msg[f,v]
beliefs_V[i] /= beliefs_V[i].sum() # divide by f->i to get msg i->f
for f in model.factorsWith(v): msg[v,f] = beliefs_V[i]/msg[f,v]
#for f in model.factors: # print msgs and beliefs for debugging
# for v in f.vars:
# print v,"->",f,":",msg[X[v],f].table
# print f,"->",v,":",msg[f,X[v]].table
#for b in beliefs_F: print b, b.table
#for b in beliefs_V: print b, b.table
# Compute estimate of the log partition function:
# E_b [ log f ] + H_Bethe(b) = \sum_f E_bf[log f] + \sum_f H(bf) + \sum (1-di) H(bi)
lnZ = sum([(1-len(model.factorsWith(v)))*beliefs_V[v].entropy() for v in model.X])
for a,f in enumerate(model.factors):
lnZ += (beliefs_F[a] * f.log()).sum()
lnZ += beliefs_F[a].entropy()
if verbose: print("Iter "+str(t)+": "+str(lnZ))
return lnZ,beliefs_V
#@do_profile(follow=[get_number])
def NMF(model, maxIter=100, beliefs=None, verbose=False):
"""Simple naive mean field lower bound on log(Z). Returns lnZ,[bel(Xi) for Xi in X]"""
if beliefs is None: beliefs = [Factor([Xi],1.0/Xi.states) for Xi in model.X]
lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])
for f in model.factors:
m = f.log()
for v in f.vars: m *= beliefs[v]
lnZ += m.sum()
if verbose: print("Iter 0: "+str(lnZ))
for t in xrange(1,maxIter+1): # for each iteration:
# Update all the beliefs via coordinate ascent:
for Xi in model.X: # for each variable,
bNew = 0.0 # compute E[ log f ] as a function of Xi:
for f in model.factorsWith(Xi,copy=False): # for each factor f_a, compute:
m = f.log() # E[log f_a] = \sum \log f_a \prod b_v
for v in f.vars - [Xi]: m *= beliefs[v]
bNew += m.marginal([Xi]) # sum them up to get E[log f]
bNew -= bNew.max() # (numerical issues)
bNew = bNew.exp()
bNew /= bNew.sum() # set b(Xi) = exp( E[log f] ) / Z
beliefs[Xi] = bNew
#
# Compute the lower bound on the partition function:
# E_b [ log f ] + H(b) = \sum_a E[log f_a] + \sum_i H(b_i) for independent beliefs
lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])
for f in model.factors:
m = f.log()
for v in f.vars: m *= beliefs[v]
lnZ += m.sum()
if verbose: print("Iter "+str(t)+": "+str(lnZ))
return lnZ,beliefs
################ DECOMPOSITION METHODS #############################################
#@do_profile(follow=[get_number])
def DualDecomposition(model, maxIter=100, verbose=False):
""" ub,lb,xhat = DualDecomposition( model [,maxiter,verbose] )
Compute a decomposition-based upper bound & estimate of the MAP of a graphical model"""
lnF = sum( np.log(f.max()) for f in model.factors )
lnX, xhat = -np.inf, np.zeros( (len(model.X),), dtype=int)
lnR, rhat = -np.inf, np.zeros( (len(model.X),), dtype=int)
if verbose: print("Iter 0: "+str(lnF))
for t in xrange(1,maxIter+1): # for each iteration:
# Update each variable in turn:
for Xi in model.X: # for each variable,
flist = model.factorsWith(Xi, copy=False)
gamma = [f.maxmarginal([Xi]) for f in flist]
avg = np.prod(gamma)**(1.0/len(gamma))
for f,g in zip(flist,gamma): f *= avg/(g+1e-300) # !!! numerical issues...
xhat[Xi] = avg.argmax()[0] # guess a state for Xi
#
# Compute the upper bound on the maximum and the value of our current guess
lnF = sum( np.log(f.max()) for f in model.factors )
lnX = model.logValue( xhat )
if lnR < lnX: lnR = lnX; rhat[:]=xhat;
if verbose: print("Iter "+str(t)+": "+str(lnF)+" > "+str(lnX))
if (lnF == lnX): break
return lnF,lnR,rhat
def WeightedDD( factors, weights, elimOrder, direction=1.0, maxIter=100, verbose=False, stop_tol=0.0 ):
step_inner = 5;
thetas = [f.log() for f in factors]
weights = { th:wt for th,wt in zip(thetas,weights) }
logmodel = GraphModel(thetas, copy=False)
def wt_elim(f,w,pri):
|
def calc_bound( thetas, weights, pri):
return sum([wt_elim(th,wt,pri) for th,wt in zip(thetas,weights)])
def calc_deriv(th,w,pri,match,Xi=None):
elim_ord = np.argsort( [pri[x] for x in th.vars] )
lnZ0 = th.copy()
lnmu = 0.0 * lnZ0
for i in elim_ord: # run over v[i],w[i] in the given elim order
lnZ1 = lnZ0.lsePower([th.v[i]],1.0/w[i])
lnZ0 -= lnZ1; # update lnmu += (lnZ0 - lnZ1)*(1.0/w[i])
lnZ0 *= (1.0/w[i]);
lnmu += lnZ0; # TODO: save a copy by assigning = lnZ0 on 1st loop?
lnZ0 = lnZ1; # then move to the next conditional
lnmu.expIP()
Hxi = 0.0
if Xi is not None:
keep = [x for x in th.vars if pri[x]>=pri[Xi]]
forH = lnmu.marginal(keep) if len(keep) < th.nvar else lnmu
Hxi = forH.entropy() - forH.sum([Xi]).entropy() if forH.nvar > 1 else forH.entropy()
return lnmu.marginal(match), Hxi
def update_weights(weights,idx,dW,stepW): # TODO only works for positive weights
wtot = 0.0
for j,wt,dw in zip(idx,weights,dW): wt[j] *= np.exp( - stepW * wt[j] * dw ); wtot += wt[j];
for j,wt,dw in zip(idx,weights,dW): wt[j] /= wtot;
def armijo(thetas,weights,pri,Xi,steps,threshold=1e-4,direction=+1, optTol=1e-8,progTol=1e-8):
import copy
f0,f1 = None, calc_bound(thetas,weights,pri) # init prev, current objective values
match = reduce(lambda a,b: a&b, [th.vars for th in thetas], thetas[0].vars)
idx = [th.v.index(Xi) for th in thetas] if Xi is not None else [] # find location of Xi in var/weight vectors
newweights = copy.deepcopy(weights) if Xi is not None else weights # copy weights if updated
for s in range(steps):
# compute gradients dPhi/dTheta, dPhi/dW (wrt parameters, weights):
dT,dW = zip(*[calc_deriv(th,wt,pri,match,Xi) for th,wt in zip(thetas,weights)])
dT,dW = list(dT),list(dW)
for dt in dT[1:]: dt -= dT[0]; dt *= -1;
dT[0] = -sum(dT[1:])
if Xi is not None:
Hbar = sum([wt[j]*dw for j,dw,wt in zip(idx,dW,weights)])
for j in range(len(dW)): dW[j] -= Hbar
# Compute gradient norms:
L0,L1,L2 = zip(*[ (d.max(),d.sum(),(d*d).sum()) for dt in dT for d in [dt.abs()]])
L0,L1,L2 = max(L0),sum(L1)+1e-300,sum(L2)
L0,L1,L2 = max(L0,max(abs(dw) for dw in dW)), L1+sum(abs(dw) for dw in dW), L2+sum(dw*dw for dw in dW)
if L0 < optTol: return # if < optTol => local optimum
step = min(1.0, 1.0/L1) if f0 is None else min(1.0, direction*(f0-f1)/L1)
step = step if step > 0 else 1.0
f0 = f1; # update "old" objective value
for dt in dT: dt *= direction*step; # premultiply step size into dT
for j in range(10):
newthetas = [th+dt for th,dt in zip(thetas,dT)] # step already pre-multiplied
if Xi is not None: update_weights( newweights, idx, dW, step );
f1 = calc_bound(newthetas,newweights,pri)
#print " ",f0," => ",f1, " (",f0-f1,' ~ ',stepsize*threshold*gradnorm,")"
if (f0 - f1)*direction > step*threshold*L2: # if armijo "enough improvement" satisfied
for th,nth in zip(thetas,newthetas): th.t[:] = nth.t # rewrite tables
for j,wt,w2 in zip(idx,weights,newweights): wt[j] = w2[j];
break;
else: # ow, back off
step *= 0.5;
if step*L0 < progTol: return # if < progTol => no progress possible
for dt in dT: dt *= 0.5
elimOrder = np.asarray(elimOrder);
pri = np.zeros((elimOrder.max()+1,))
pri[elimOrder] = np.arange(len(elimOrder))
#
lnZw = calc_bound(thetas,[weights[th] for th in thetas],pri)
start_time = time.time()
if verbose: print("Iter 0: "+str(lnZw))
for t in xrange(1,maxIter+1): # for each iteration:
# Update each variable in turn:
for Xi in logmodel.X: # for each variable,
theta_i = logmodel.factorsWith(Xi)
if len(theta_i) <= 1: continue;
weight_i = [weights[th] for th in theta_i]
armijo(theta_i,weight_i,pri,Xi, 5, 0.01, direction)
#
# Compute the upper bound on the maximum and the value of our current guess
prev, lnZw = lnZw, calc_bound(thetas,[weights[th] for th in thetas],pri)
if verbose: print("[{}] Iter {} : {}".format(time.time()-start_time,t,lnZw));
if (prev - lnZw)*direction < stop_tol: break
return lnZw, thetas
| elim_ord = np.argsort( [pri[x] for x in f.vars] )
tmp = f
for i in elim_ord: tmp = tmp.lsePower([f.v[i]],1.0/w[i])
return tmp | identifier_body |
ledgerstate.go | package tangle
import (
"fmt"
"time"
"github.com/cockroachdb/errors"
"github.com/iotaledger/hive.go/types"
"github.com/iotaledger/goshimmer/packages/ledgerstate"
)
// region LedgerState //////////////////////////////////////////////////////////////////////////////////////////////////
// LedgerState is a Tangle component that wraps the components of the ledgerstate package and makes them available at a
// "single point of contact".
type LedgerState struct {
tangle *Tangle
BranchDAG *ledgerstate.BranchDAG
UTXODAG *ledgerstate.UTXODAG
totalSupply uint64
}
// NewLedgerState is the constructor of the LedgerState component.
func NewLedgerState(tangle *Tangle) (ledgerState *LedgerState) {
branchDAG := ledgerstate.NewBranchDAG(tangle.Options.Store)
return &LedgerState{
tangle: tangle,
BranchDAG: branchDAG,
UTXODAG: ledgerstate.NewUTXODAG(tangle.Options.Store, branchDAG),
}
}
// Shutdown shuts down the LedgerState and persists its state.
func (l *LedgerState) Shutdown() {
l.UTXODAG.Shutdown()
l.BranchDAG.Shutdown()
}
// InheritBranch implements the inheritance rules for Branches in the Tangle. It returns a single inherited Branch
// and automatically creates an AggregatedBranch if necessary.
func (l *LedgerState) InheritBranch(referencedBranchIDs ledgerstate.BranchIDs) (inheritedBranch ledgerstate.BranchID, err error) {
if referencedBranchIDs.Contains(ledgerstate.InvalidBranchID) {
inheritedBranch = ledgerstate.InvalidBranchID
return
}
branchIDsContainRejectedBranch, inheritedBranch := l.BranchDAG.BranchIDsContainRejectedBranch(referencedBranchIDs)
if branchIDsContainRejectedBranch {
return
}
cachedAggregatedBranch, _, err := l.BranchDAG.AggregateBranches(referencedBranchIDs)
if err != nil {
if errors.Is(err, ledgerstate.ErrInvalidStateTransition) {
inheritedBranch = ledgerstate.InvalidBranchID
err = nil
return
}
err = errors.Errorf("failed to aggregate BranchIDs: %w", err)
return
}
cachedAggregatedBranch.Release()
inheritedBranch = cachedAggregatedBranch.ID()
return
}
// TransactionValid performs some fast checks of the Transaction and triggers a MessageInvalid event if the checks do
// not pass.
func (l *LedgerState) TransactionValid(transaction *ledgerstate.Transaction, messageID MessageID) (err error) {
if err = l.UTXODAG.CheckTransaction(transaction); err != nil {
l.tangle.Storage.MessageMetadata(messageID).Consume(func(messagemetadata *MessageMetadata) {
messagemetadata.SetInvalid(true)
})
l.tangle.Events.MessageInvalid.Trigger(messageID)
return errors.Errorf("invalid transaction in message with %s: %w", messageID, err)
}
return nil
}
// TransactionConflicting returns whether the given transaction is part of a conflict.
func (l *LedgerState) TransactionConflicting(transactionID ledgerstate.TransactionID) bool {
return l.BranchID(transactionID) == ledgerstate.NewBranchID(transactionID)
}
// TransactionMetadata retrieves the TransactionMetadata with the given TransactionID from the object storage.
func (l *LedgerState) TransactionMetadata(transactionID ledgerstate.TransactionID) (cachedTransactionMetadata *ledgerstate.CachedTransactionMetadata) {
return l.UTXODAG.CachedTransactionMetadata(transactionID)
}
// Transaction retrieves the Transaction with the given TransactionID from the object storage.
func (l *LedgerState) Transaction(transactionID ledgerstate.TransactionID) *ledgerstate.CachedTransaction {
return l.UTXODAG.CachedTransaction(transactionID)
}
// BookTransaction books the given Transaction into the underlying LedgerState and returns the target Branch and an
// eventual error.
func (l *LedgerState) BookTransaction(transaction *ledgerstate.Transaction, messageID MessageID) (targetBranch ledgerstate.BranchID, err error) {
targetBranch, err = l.UTXODAG.BookTransaction(transaction)
if err != nil {
if !errors.Is(err, ledgerstate.ErrTransactionInvalid) && !errors.Is(err, ledgerstate.ErrTransactionNotSolid) {
err = errors.Errorf("failed to book Transaction: %w", err)
return
}
l.tangle.Storage.MessageMetadata(messageID).Consume(func(messagemetadata *MessageMetadata) {
messagemetadata.SetInvalid(true)
})
l.tangle.Events.MessageInvalid.Trigger(messageID)
// non-fatal errors should not bubble up - we trigger a MessageInvalid event instead
err = nil
return
}
return
}
// ConflictSet returns the list of transactionIDs conflicting with the given transactionID.
func (l *LedgerState) ConflictSet(transactionID ledgerstate.TransactionID) (conflictSet ledgerstate.TransactionIDs) {
conflictIDs := make(ledgerstate.ConflictIDs)
conflictSet = make(ledgerstate.TransactionIDs)
l.BranchDAG.Branch(ledgerstate.NewBranchID(transactionID)).Consume(func(branch ledgerstate.Branch) {
conflictIDs = branch.(*ledgerstate.ConflictBranch).Conflicts()
})
for conflictID := range conflictIDs {
l.BranchDAG.ConflictMembers(conflictID).Consume(func(conflictMember *ledgerstate.ConflictMember) {
conflictSet[ledgerstate.TransactionID(conflictMember.BranchID())] = types.Void
})
}
return
}
// TransactionInclusionState returns the InclusionState of the Transaction with the given TransactionID which can either be
// Pending, Confirmed or Rejected.
func (l *LedgerState) TransactionInclusionState(transactionID ledgerstate.TransactionID) (ledgerstate.InclusionState, error) {
return l.UTXODAG.InclusionState(transactionID)
}
// BranchInclusionState returns the InclusionState of the Branch with the given BranchID which can either be
// Pending, Confirmed or Rejected.
func (l *LedgerState) BranchInclusionState(branchID ledgerstate.BranchID) (inclusionState ledgerstate.InclusionState) {
l.BranchDAG.Branch(branchID).Consume(func(branch ledgerstate.Branch) {
inclusionState = branch.InclusionState()
})
return
}
// BranchID returns the branchID of the given transactionID.
func (l *LedgerState) BranchID(transactionID ledgerstate.TransactionID) (branchID ledgerstate.BranchID) {
l.UTXODAG.CachedTransactionMetadata(transactionID).Consume(func(transactionMetadata *ledgerstate.TransactionMetadata) {
branchID = transactionMetadata.BranchID()
})
return
}
// LoadSnapshot creates a set of outputs in the UTXO-DAG, that are forming the genesis for future transactions.
func (l *LedgerState) LoadSnapshot(snapshot *ledgerstate.Snapshot) (err error) {
l.UTXODAG.LoadSnapshot(snapshot)
// add attachment link between txs from snapshot and the genesis message (EmptyMessageID).
for txID, record := range snapshot.Transactions {
fmt.Println("... Loading snapshot transaction: ", txID, "#outputs=", len(record.Essence.Outputs()), record.UnspentOutputs)
attachment, _ := l.tangle.Storage.StoreAttachment(txID, EmptyMessageID)
if attachment != nil {
attachment.Release()
}
for i, output := range record.Essence.Outputs() {
if !record.UnspentOutputs[i] {
continue
}
output.Balances().ForEach(func(color ledgerstate.Color, balance uint64) bool {
l.totalSupply += balance
return true
})
}
}
attachment, _ := l.tangle.Storage.StoreAttachment(ledgerstate.GenesisTransactionID, EmptyMessageID)
if attachment != nil {
attachment.Release()
}
return
}
// SnapshotUTXO returns the UTXO snapshot, which is a list of transactions with unspent outputs.
func (l *LedgerState) SnapshotUTXO() (snapshot *ledgerstate.Snapshot) {
// The following parameter should be larger than the max allowed timestamp variation, and the required time for confirmation.
// We can snapshot this far in the past, since global snapshots dont occur frequent and it is ok to ignore the last few minutes.
minAge := 120 * time.Second
snapshot = &ledgerstate.Snapshot{
Transactions: make(map[ledgerstate.TransactionID]ledgerstate.Record),
}
startSnapshot := time.Now()
copyLedgerState := l.Transactions() // consider that this may take quite some time
for _, transaction := range copyLedgerState {
// skip unconfirmed transactions
inclusionState, err := l.TransactionInclusionState(transaction.ID())
if err != nil || inclusionState != ledgerstate.Confirmed {
continue
}
// skip transactions that are too recent before startSnapshot
if startSnapshot.Sub(transaction.Essence().Timestamp()) < minAge {
continue
}
unspentOutputs := make([]bool, len(transaction.Essence().Outputs()))
includeTransaction := false
for i, output := range transaction.Essence().Outputs() |
// include only transactions with at least one unspent output
if includeTransaction {
snapshot.Transactions[transaction.ID()] = ledgerstate.Record{
Essence: transaction.Essence(),
UnlockBlocks: transaction.UnlockBlocks(),
UnspentOutputs: unspentOutputs,
}
}
}
// TODO ??? due to possible race conditions we could add a check for the consistency of the UTXO snapshot
return snapshot
}
// ReturnTransaction returns a specific transaction.
func (l *LedgerState) ReturnTransaction(transactionID ledgerstate.TransactionID) (transaction *ledgerstate.Transaction) {
return l.UTXODAG.Transaction(transactionID)
}
// Transactions returns all the transactions.
func (l *LedgerState) Transactions() (transactions map[ledgerstate.TransactionID]*ledgerstate.Transaction) {
return l.UTXODAG.Transactions()
}
// CachedOutput returns the Output with the given ID.
func (l *LedgerState) CachedOutput(outputID ledgerstate.OutputID) *ledgerstate.CachedOutput {
return l.UTXODAG.CachedOutput(outputID)
}
// CachedOutputMetadata returns the OutputMetadata with the given ID.
func (l *LedgerState) CachedOutputMetadata(outputID ledgerstate.OutputID) *ledgerstate.CachedOutputMetadata {
return l.UTXODAG.CachedOutputMetadata(outputID)
}
// CachedOutputsOnAddress retrieves all the Outputs that are associated with an address.
func (l *LedgerState) CachedOutputsOnAddress(address ledgerstate.Address) (cachedOutputs ledgerstate.CachedOutputs) {
l.UTXODAG.CachedAddressOutputMapping(address).Consume(func(addressOutputMapping *ledgerstate.AddressOutputMapping) {
cachedOutputs = append(cachedOutputs, l.CachedOutput(addressOutputMapping.OutputID()))
})
return
}
// CheckTransaction contains fast checks that have to be performed before booking a Transaction.
func (l *LedgerState) CheckTransaction(transaction *ledgerstate.Transaction) (err error) {
return l.UTXODAG.CheckTransaction(transaction)
}
// ConsumedOutputs returns the consumed (cached)Outputs of the given Transaction.
func (l *LedgerState) ConsumedOutputs(transaction *ledgerstate.Transaction) (cachedInputs ledgerstate.CachedOutputs) {
return l.UTXODAG.ConsumedOutputs(transaction)
}
// Consumers returns the (cached) consumers of the given outputID.
func (l *LedgerState) Consumers(outputID ledgerstate.OutputID) (cachedTransactions ledgerstate.CachedConsumers) {
return l.UTXODAG.CachedConsumers(outputID)
}
// TotalSupply returns the total supply.
func (l *LedgerState) TotalSupply() (totalSupply uint64) {
return l.totalSupply
}
// endregion ///////////////////////////////////////////////////////////////////////////////////////////////////////////
| {
l.CachedOutputMetadata(output.ID()).Consume(func(outputMetadata *ledgerstate.OutputMetadata) {
if outputMetadata.ConfirmedConsumer() == ledgerstate.GenesisTransactionID { // no consumer yet
unspentOutputs[i] = true
includeTransaction = true
} else {
tx := copyLedgerState[outputMetadata.ConfirmedConsumer()]
// ignore consumers that are not confirmed long enough or even in the future.
if startSnapshot.Sub(tx.Essence().Timestamp()) < minAge {
unspentOutputs[i] = true
includeTransaction = true
}
}
})
} | conditional_block |
ledgerstate.go | package tangle
import (
"fmt"
"time"
"github.com/cockroachdb/errors"
"github.com/iotaledger/hive.go/types"
"github.com/iotaledger/goshimmer/packages/ledgerstate"
)
// region LedgerState //////////////////////////////////////////////////////////////////////////////////////////////////
// LedgerState is a Tangle component that wraps the components of the ledgerstate package and makes them available at a
// "single point of contact".
type LedgerState struct {
tangle *Tangle
BranchDAG *ledgerstate.BranchDAG
UTXODAG *ledgerstate.UTXODAG
totalSupply uint64
}
// NewLedgerState is the constructor of the LedgerState component.
func NewLedgerState(tangle *Tangle) (ledgerState *LedgerState) {
branchDAG := ledgerstate.NewBranchDAG(tangle.Options.Store)
return &LedgerState{
tangle: tangle,
BranchDAG: branchDAG,
UTXODAG: ledgerstate.NewUTXODAG(tangle.Options.Store, branchDAG),
}
}
// Shutdown shuts down the LedgerState and persists its state.
func (l *LedgerState) Shutdown() {
l.UTXODAG.Shutdown()
l.BranchDAG.Shutdown()
}
// InheritBranch implements the inheritance rules for Branches in the Tangle. It returns a single inherited Branch
// and automatically creates an AggregatedBranch if necessary.
func (l *LedgerState) InheritBranch(referencedBranchIDs ledgerstate.BranchIDs) (inheritedBranch ledgerstate.BranchID, err error) {
if referencedBranchIDs.Contains(ledgerstate.InvalidBranchID) {
inheritedBranch = ledgerstate.InvalidBranchID
return
}
branchIDsContainRejectedBranch, inheritedBranch := l.BranchDAG.BranchIDsContainRejectedBranch(referencedBranchIDs)
if branchIDsContainRejectedBranch {
return
}
cachedAggregatedBranch, _, err := l.BranchDAG.AggregateBranches(referencedBranchIDs)
if err != nil {
if errors.Is(err, ledgerstate.ErrInvalidStateTransition) {
inheritedBranch = ledgerstate.InvalidBranchID
err = nil
return
}
err = errors.Errorf("failed to aggregate BranchIDs: %w", err)
return
}
cachedAggregatedBranch.Release()
inheritedBranch = cachedAggregatedBranch.ID()
return
}
// TransactionValid performs some fast checks of the Transaction and triggers a MessageInvalid event if the checks do
// not pass.
func (l *LedgerState) TransactionValid(transaction *ledgerstate.Transaction, messageID MessageID) (err error) {
if err = l.UTXODAG.CheckTransaction(transaction); err != nil {
l.tangle.Storage.MessageMetadata(messageID).Consume(func(messagemetadata *MessageMetadata) {
messagemetadata.SetInvalid(true)
})
l.tangle.Events.MessageInvalid.Trigger(messageID)
return errors.Errorf("invalid transaction in message with %s: %w", messageID, err)
}
return nil
}
// TransactionConflicting returns whether the given transaction is part of a conflict.
func (l *LedgerState) TransactionConflicting(transactionID ledgerstate.TransactionID) bool {
return l.BranchID(transactionID) == ledgerstate.NewBranchID(transactionID)
}
// TransactionMetadata retrieves the TransactionMetadata with the given TransactionID from the object storage.
func (l *LedgerState) TransactionMetadata(transactionID ledgerstate.TransactionID) (cachedTransactionMetadata *ledgerstate.CachedTransactionMetadata) {
return l.UTXODAG.CachedTransactionMetadata(transactionID)
}
// Transaction retrieves the Transaction with the given TransactionID from the object storage.
func (l *LedgerState) Transaction(transactionID ledgerstate.TransactionID) *ledgerstate.CachedTransaction {
return l.UTXODAG.CachedTransaction(transactionID)
}
// BookTransaction books the given Transaction into the underlying LedgerState and returns the target Branch and an
// eventual error.
func (l *LedgerState) BookTransaction(transaction *ledgerstate.Transaction, messageID MessageID) (targetBranch ledgerstate.BranchID, err error) {
targetBranch, err = l.UTXODAG.BookTransaction(transaction)
if err != nil {
if !errors.Is(err, ledgerstate.ErrTransactionInvalid) && !errors.Is(err, ledgerstate.ErrTransactionNotSolid) {
err = errors.Errorf("failed to book Transaction: %w", err)
return
}
l.tangle.Storage.MessageMetadata(messageID).Consume(func(messagemetadata *MessageMetadata) {
messagemetadata.SetInvalid(true)
})
l.tangle.Events.MessageInvalid.Trigger(messageID)
// non-fatal errors should not bubble up - we trigger a MessageInvalid event instead
err = nil
return
}
return
}
// ConflictSet returns the list of transactionIDs conflicting with the given transactionID.
func (l *LedgerState) ConflictSet(transactionID ledgerstate.TransactionID) (conflictSet ledgerstate.TransactionIDs) {
conflictIDs := make(ledgerstate.ConflictIDs)
conflictSet = make(ledgerstate.TransactionIDs)
l.BranchDAG.Branch(ledgerstate.NewBranchID(transactionID)).Consume(func(branch ledgerstate.Branch) {
conflictIDs = branch.(*ledgerstate.ConflictBranch).Conflicts()
})
for conflictID := range conflictIDs {
l.BranchDAG.ConflictMembers(conflictID).Consume(func(conflictMember *ledgerstate.ConflictMember) {
conflictSet[ledgerstate.TransactionID(conflictMember.BranchID())] = types.Void
})
}
return
}
// TransactionInclusionState returns the InclusionState of the Transaction with the given TransactionID which can either be
// Pending, Confirmed or Rejected.
func (l *LedgerState) TransactionInclusionState(transactionID ledgerstate.TransactionID) (ledgerstate.InclusionState, error) {
return l.UTXODAG.InclusionState(transactionID)
}
// BranchInclusionState returns the InclusionState of the Branch with the given BranchID which can either be
// Pending, Confirmed or Rejected.
func (l *LedgerState) BranchInclusionState(branchID ledgerstate.BranchID) (inclusionState ledgerstate.InclusionState) {
l.BranchDAG.Branch(branchID).Consume(func(branch ledgerstate.Branch) {
inclusionState = branch.InclusionState()
})
return
}
// BranchID returns the branchID of the given transactionID.
func (l *LedgerState) BranchID(transactionID ledgerstate.TransactionID) (branchID ledgerstate.BranchID) {
l.UTXODAG.CachedTransactionMetadata(transactionID).Consume(func(transactionMetadata *ledgerstate.TransactionMetadata) {
branchID = transactionMetadata.BranchID()
})
return
}
// LoadSnapshot creates a set of outputs in the UTXO-DAG, that are forming the genesis for future transactions.
func (l *LedgerState) LoadSnapshot(snapshot *ledgerstate.Snapshot) (err error) {
l.UTXODAG.LoadSnapshot(snapshot)
// add attachment link between txs from snapshot and the genesis message (EmptyMessageID).
for txID, record := range snapshot.Transactions {
fmt.Println("... Loading snapshot transaction: ", txID, "#outputs=", len(record.Essence.Outputs()), record.UnspentOutputs)
attachment, _ := l.tangle.Storage.StoreAttachment(txID, EmptyMessageID)
if attachment != nil {
attachment.Release()
}
for i, output := range record.Essence.Outputs() {
if !record.UnspentOutputs[i] {
continue
}
output.Balances().ForEach(func(color ledgerstate.Color, balance uint64) bool {
l.totalSupply += balance
return true
})
}
}
attachment, _ := l.tangle.Storage.StoreAttachment(ledgerstate.GenesisTransactionID, EmptyMessageID)
if attachment != nil {
attachment.Release()
}
return
}
// SnapshotUTXO returns the UTXO snapshot, which is a list of transactions with unspent outputs.
func (l *LedgerState) SnapshotUTXO() (snapshot *ledgerstate.Snapshot) {
// The following parameter should be larger than the max allowed timestamp variation, and the required time for confirmation.
// We can snapshot this far in the past, since global snapshots dont occur frequent and it is ok to ignore the last few minutes.
minAge := 120 * time.Second
snapshot = &ledgerstate.Snapshot{
Transactions: make(map[ledgerstate.TransactionID]ledgerstate.Record),
} |
startSnapshot := time.Now()
copyLedgerState := l.Transactions() // consider that this may take quite some time
for _, transaction := range copyLedgerState {
// skip unconfirmed transactions
inclusionState, err := l.TransactionInclusionState(transaction.ID())
if err != nil || inclusionState != ledgerstate.Confirmed {
continue
}
// skip transactions that are too recent before startSnapshot
if startSnapshot.Sub(transaction.Essence().Timestamp()) < minAge {
continue
}
unspentOutputs := make([]bool, len(transaction.Essence().Outputs()))
includeTransaction := false
for i, output := range transaction.Essence().Outputs() {
l.CachedOutputMetadata(output.ID()).Consume(func(outputMetadata *ledgerstate.OutputMetadata) {
if outputMetadata.ConfirmedConsumer() == ledgerstate.GenesisTransactionID { // no consumer yet
unspentOutputs[i] = true
includeTransaction = true
} else {
tx := copyLedgerState[outputMetadata.ConfirmedConsumer()]
// ignore consumers that are not confirmed long enough or even in the future.
if startSnapshot.Sub(tx.Essence().Timestamp()) < minAge {
unspentOutputs[i] = true
includeTransaction = true
}
}
})
}
// include only transactions with at least one unspent output
if includeTransaction {
snapshot.Transactions[transaction.ID()] = ledgerstate.Record{
Essence: transaction.Essence(),
UnlockBlocks: transaction.UnlockBlocks(),
UnspentOutputs: unspentOutputs,
}
}
}
// TODO ??? due to possible race conditions we could add a check for the consistency of the UTXO snapshot
return snapshot
}
// ReturnTransaction returns a specific transaction.
func (l *LedgerState) ReturnTransaction(transactionID ledgerstate.TransactionID) (transaction *ledgerstate.Transaction) {
return l.UTXODAG.Transaction(transactionID)
}
// Transactions returns all the transactions.
func (l *LedgerState) Transactions() (transactions map[ledgerstate.TransactionID]*ledgerstate.Transaction) {
return l.UTXODAG.Transactions()
}
// CachedOutput returns the Output with the given ID.
func (l *LedgerState) CachedOutput(outputID ledgerstate.OutputID) *ledgerstate.CachedOutput {
return l.UTXODAG.CachedOutput(outputID)
}
// CachedOutputMetadata returns the OutputMetadata with the given ID.
func (l *LedgerState) CachedOutputMetadata(outputID ledgerstate.OutputID) *ledgerstate.CachedOutputMetadata {
return l.UTXODAG.CachedOutputMetadata(outputID)
}
// CachedOutputsOnAddress retrieves all the Outputs that are associated with an address.
func (l *LedgerState) CachedOutputsOnAddress(address ledgerstate.Address) (cachedOutputs ledgerstate.CachedOutputs) {
l.UTXODAG.CachedAddressOutputMapping(address).Consume(func(addressOutputMapping *ledgerstate.AddressOutputMapping) {
cachedOutputs = append(cachedOutputs, l.CachedOutput(addressOutputMapping.OutputID()))
})
return
}
// CheckTransaction contains fast checks that have to be performed before booking a Transaction.
func (l *LedgerState) CheckTransaction(transaction *ledgerstate.Transaction) (err error) {
return l.UTXODAG.CheckTransaction(transaction)
}
// ConsumedOutputs returns the consumed (cached)Outputs of the given Transaction.
func (l *LedgerState) ConsumedOutputs(transaction *ledgerstate.Transaction) (cachedInputs ledgerstate.CachedOutputs) {
return l.UTXODAG.ConsumedOutputs(transaction)
}
// Consumers returns the (cached) consumers of the given outputID.
func (l *LedgerState) Consumers(outputID ledgerstate.OutputID) (cachedTransactions ledgerstate.CachedConsumers) {
return l.UTXODAG.CachedConsumers(outputID)
}
// TotalSupply returns the total supply.
func (l *LedgerState) TotalSupply() (totalSupply uint64) {
return l.totalSupply
}
// endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// | random_line_split | |
ledgerstate.go | package tangle
import (
"fmt"
"time"
"github.com/cockroachdb/errors"
"github.com/iotaledger/hive.go/types"
"github.com/iotaledger/goshimmer/packages/ledgerstate"
)
// region LedgerState //////////////////////////////////////////////////////////////////////////////////////////////////
// LedgerState is a Tangle component that wraps the components of the ledgerstate package and makes them available at a
// "single point of contact".
type LedgerState struct {
tangle *Tangle
BranchDAG *ledgerstate.BranchDAG
UTXODAG *ledgerstate.UTXODAG
totalSupply uint64
}
// NewLedgerState is the constructor of the LedgerState component.
func NewLedgerState(tangle *Tangle) (ledgerState *LedgerState) {
branchDAG := ledgerstate.NewBranchDAG(tangle.Options.Store)
return &LedgerState{
tangle: tangle,
BranchDAG: branchDAG,
UTXODAG: ledgerstate.NewUTXODAG(tangle.Options.Store, branchDAG),
}
}
// Shutdown shuts down the LedgerState and persists its state.
func (l *LedgerState) Shutdown() {
l.UTXODAG.Shutdown()
l.BranchDAG.Shutdown()
}
// InheritBranch implements the inheritance rules for Branches in the Tangle. It returns a single inherited Branch
// and automatically creates an AggregatedBranch if necessary.
func (l *LedgerState) InheritBranch(referencedBranchIDs ledgerstate.BranchIDs) (inheritedBranch ledgerstate.BranchID, err error) {
if referencedBranchIDs.Contains(ledgerstate.InvalidBranchID) {
inheritedBranch = ledgerstate.InvalidBranchID
return
}
branchIDsContainRejectedBranch, inheritedBranch := l.BranchDAG.BranchIDsContainRejectedBranch(referencedBranchIDs)
if branchIDsContainRejectedBranch {
return
}
cachedAggregatedBranch, _, err := l.BranchDAG.AggregateBranches(referencedBranchIDs)
if err != nil {
if errors.Is(err, ledgerstate.ErrInvalidStateTransition) {
inheritedBranch = ledgerstate.InvalidBranchID
err = nil
return
}
err = errors.Errorf("failed to aggregate BranchIDs: %w", err)
return
}
cachedAggregatedBranch.Release()
inheritedBranch = cachedAggregatedBranch.ID()
return
}
// TransactionValid performs some fast checks of the Transaction and triggers a MessageInvalid event if the checks do
// not pass.
func (l *LedgerState) TransactionValid(transaction *ledgerstate.Transaction, messageID MessageID) (err error) {
if err = l.UTXODAG.CheckTransaction(transaction); err != nil {
l.tangle.Storage.MessageMetadata(messageID).Consume(func(messagemetadata *MessageMetadata) {
messagemetadata.SetInvalid(true)
})
l.tangle.Events.MessageInvalid.Trigger(messageID)
return errors.Errorf("invalid transaction in message with %s: %w", messageID, err)
}
return nil
}
// TransactionConflicting returns whether the given transaction is part of a conflict.
func (l *LedgerState) TransactionConflicting(transactionID ledgerstate.TransactionID) bool {
return l.BranchID(transactionID) == ledgerstate.NewBranchID(transactionID)
}
// TransactionMetadata retrieves the TransactionMetadata with the given TransactionID from the object storage.
func (l *LedgerState) TransactionMetadata(transactionID ledgerstate.TransactionID) (cachedTransactionMetadata *ledgerstate.CachedTransactionMetadata) {
return l.UTXODAG.CachedTransactionMetadata(transactionID)
}
// Transaction retrieves the Transaction with the given TransactionID from the object storage.
func (l *LedgerState) Transaction(transactionID ledgerstate.TransactionID) *ledgerstate.CachedTransaction {
return l.UTXODAG.CachedTransaction(transactionID)
}
// BookTransaction books the given Transaction into the underlying LedgerState and returns the target Branch and an
// eventual error.
func (l *LedgerState) BookTransaction(transaction *ledgerstate.Transaction, messageID MessageID) (targetBranch ledgerstate.BranchID, err error) {
targetBranch, err = l.UTXODAG.BookTransaction(transaction)
if err != nil {
if !errors.Is(err, ledgerstate.ErrTransactionInvalid) && !errors.Is(err, ledgerstate.ErrTransactionNotSolid) {
err = errors.Errorf("failed to book Transaction: %w", err)
return
}
l.tangle.Storage.MessageMetadata(messageID).Consume(func(messagemetadata *MessageMetadata) {
messagemetadata.SetInvalid(true)
})
l.tangle.Events.MessageInvalid.Trigger(messageID)
// non-fatal errors should not bubble up - we trigger a MessageInvalid event instead
err = nil
return
}
return
}
// ConflictSet returns the list of transactionIDs conflicting with the given transactionID.
func (l *LedgerState) ConflictSet(transactionID ledgerstate.TransactionID) (conflictSet ledgerstate.TransactionIDs) {
conflictIDs := make(ledgerstate.ConflictIDs)
conflictSet = make(ledgerstate.TransactionIDs)
l.BranchDAG.Branch(ledgerstate.NewBranchID(transactionID)).Consume(func(branch ledgerstate.Branch) {
conflictIDs = branch.(*ledgerstate.ConflictBranch).Conflicts()
})
for conflictID := range conflictIDs {
l.BranchDAG.ConflictMembers(conflictID).Consume(func(conflictMember *ledgerstate.ConflictMember) {
conflictSet[ledgerstate.TransactionID(conflictMember.BranchID())] = types.Void
})
}
return
}
// TransactionInclusionState returns the InclusionState of the Transaction with the given TransactionID which can either be
// Pending, Confirmed or Rejected.
func (l *LedgerState) TransactionInclusionState(transactionID ledgerstate.TransactionID) (ledgerstate.InclusionState, error) {
return l.UTXODAG.InclusionState(transactionID)
}
// BranchInclusionState returns the InclusionState of the Branch with the given BranchID which can either be
// Pending, Confirmed or Rejected.
func (l *LedgerState) BranchInclusionState(branchID ledgerstate.BranchID) (inclusionState ledgerstate.InclusionState) {
l.BranchDAG.Branch(branchID).Consume(func(branch ledgerstate.Branch) {
inclusionState = branch.InclusionState()
})
return
}
// BranchID returns the branchID of the given transactionID.
func (l *LedgerState) BranchID(transactionID ledgerstate.TransactionID) (branchID ledgerstate.BranchID) {
l.UTXODAG.CachedTransactionMetadata(transactionID).Consume(func(transactionMetadata *ledgerstate.TransactionMetadata) {
branchID = transactionMetadata.BranchID()
})
return
}
// LoadSnapshot creates a set of outputs in the UTXO-DAG, that are forming the genesis for future transactions.
func (l *LedgerState) LoadSnapshot(snapshot *ledgerstate.Snapshot) (err error) {
l.UTXODAG.LoadSnapshot(snapshot)
// add attachment link between txs from snapshot and the genesis message (EmptyMessageID).
for txID, record := range snapshot.Transactions {
fmt.Println("... Loading snapshot transaction: ", txID, "#outputs=", len(record.Essence.Outputs()), record.UnspentOutputs)
attachment, _ := l.tangle.Storage.StoreAttachment(txID, EmptyMessageID)
if attachment != nil {
attachment.Release()
}
for i, output := range record.Essence.Outputs() {
if !record.UnspentOutputs[i] {
continue
}
output.Balances().ForEach(func(color ledgerstate.Color, balance uint64) bool {
l.totalSupply += balance
return true
})
}
}
attachment, _ := l.tangle.Storage.StoreAttachment(ledgerstate.GenesisTransactionID, EmptyMessageID)
if attachment != nil {
attachment.Release()
}
return
}
// SnapshotUTXO returns the UTXO snapshot, which is a list of transactions with unspent outputs.
func (l *LedgerState) SnapshotUTXO() (snapshot *ledgerstate.Snapshot) {
// The following parameter should be larger than the max allowed timestamp variation, and the required time for confirmation.
// We can snapshot this far in the past, since global snapshots dont occur frequent and it is ok to ignore the last few minutes.
minAge := 120 * time.Second
snapshot = &ledgerstate.Snapshot{
Transactions: make(map[ledgerstate.TransactionID]ledgerstate.Record),
}
startSnapshot := time.Now()
copyLedgerState := l.Transactions() // consider that this may take quite some time
for _, transaction := range copyLedgerState {
// skip unconfirmed transactions
inclusionState, err := l.TransactionInclusionState(transaction.ID())
if err != nil || inclusionState != ledgerstate.Confirmed {
continue
}
// skip transactions that are too recent before startSnapshot
if startSnapshot.Sub(transaction.Essence().Timestamp()) < minAge {
continue
}
unspentOutputs := make([]bool, len(transaction.Essence().Outputs()))
includeTransaction := false
for i, output := range transaction.Essence().Outputs() {
l.CachedOutputMetadata(output.ID()).Consume(func(outputMetadata *ledgerstate.OutputMetadata) {
if outputMetadata.ConfirmedConsumer() == ledgerstate.GenesisTransactionID { // no consumer yet
unspentOutputs[i] = true
includeTransaction = true
} else {
tx := copyLedgerState[outputMetadata.ConfirmedConsumer()]
// ignore consumers that are not confirmed long enough or even in the future.
if startSnapshot.Sub(tx.Essence().Timestamp()) < minAge {
unspentOutputs[i] = true
includeTransaction = true
}
}
})
}
// include only transactions with at least one unspent output
if includeTransaction {
snapshot.Transactions[transaction.ID()] = ledgerstate.Record{
Essence: transaction.Essence(),
UnlockBlocks: transaction.UnlockBlocks(),
UnspentOutputs: unspentOutputs,
}
}
}
// TODO ??? due to possible race conditions we could add a check for the consistency of the UTXO snapshot
return snapshot
}
// ReturnTransaction returns a specific transaction.
func (l *LedgerState) ReturnTransaction(transactionID ledgerstate.TransactionID) (transaction *ledgerstate.Transaction) {
return l.UTXODAG.Transaction(transactionID)
}
// Transactions returns all the transactions.
func (l *LedgerState) Transactions() (transactions map[ledgerstate.TransactionID]*ledgerstate.Transaction) {
return l.UTXODAG.Transactions()
}
// CachedOutput returns the Output with the given ID.
func (l *LedgerState) CachedOutput(outputID ledgerstate.OutputID) *ledgerstate.CachedOutput {
return l.UTXODAG.CachedOutput(outputID)
}
// CachedOutputMetadata returns the OutputMetadata with the given ID.
func (l *LedgerState) | (outputID ledgerstate.OutputID) *ledgerstate.CachedOutputMetadata {
return l.UTXODAG.CachedOutputMetadata(outputID)
}
// CachedOutputsOnAddress retrieves all the Outputs that are associated with an address.
func (l *LedgerState) CachedOutputsOnAddress(address ledgerstate.Address) (cachedOutputs ledgerstate.CachedOutputs) {
l.UTXODAG.CachedAddressOutputMapping(address).Consume(func(addressOutputMapping *ledgerstate.AddressOutputMapping) {
cachedOutputs = append(cachedOutputs, l.CachedOutput(addressOutputMapping.OutputID()))
})
return
}
// CheckTransaction contains fast checks that have to be performed before booking a Transaction.
func (l *LedgerState) CheckTransaction(transaction *ledgerstate.Transaction) (err error) {
return l.UTXODAG.CheckTransaction(transaction)
}
// ConsumedOutputs returns the consumed (cached)Outputs of the given Transaction.
func (l *LedgerState) ConsumedOutputs(transaction *ledgerstate.Transaction) (cachedInputs ledgerstate.CachedOutputs) {
return l.UTXODAG.ConsumedOutputs(transaction)
}
// Consumers returns the (cached) consumers of the given outputID.
func (l *LedgerState) Consumers(outputID ledgerstate.OutputID) (cachedTransactions ledgerstate.CachedConsumers) {
return l.UTXODAG.CachedConsumers(outputID)
}
// TotalSupply returns the total supply.
func (l *LedgerState) TotalSupply() (totalSupply uint64) {
return l.totalSupply
}
// endregion ///////////////////////////////////////////////////////////////////////////////////////////////////////////
| CachedOutputMetadata | identifier_name |
ledgerstate.go | package tangle
import (
"fmt"
"time"
"github.com/cockroachdb/errors"
"github.com/iotaledger/hive.go/types"
"github.com/iotaledger/goshimmer/packages/ledgerstate"
)
// region LedgerState //////////////////////////////////////////////////////////////////////////////////////////////////
// LedgerState is a Tangle component that wraps the components of the ledgerstate package and makes them available at a
// "single point of contact".
type LedgerState struct {
tangle *Tangle
BranchDAG *ledgerstate.BranchDAG
UTXODAG *ledgerstate.UTXODAG
totalSupply uint64
}
// NewLedgerState is the constructor of the LedgerState component.
func NewLedgerState(tangle *Tangle) (ledgerState *LedgerState) {
branchDAG := ledgerstate.NewBranchDAG(tangle.Options.Store)
return &LedgerState{
tangle: tangle,
BranchDAG: branchDAG,
UTXODAG: ledgerstate.NewUTXODAG(tangle.Options.Store, branchDAG),
}
}
// Shutdown shuts down the LedgerState and persists its state.
func (l *LedgerState) Shutdown() |
// InheritBranch implements the inheritance rules for Branches in the Tangle. It returns a single inherited Branch
// and automatically creates an AggregatedBranch if necessary.
func (l *LedgerState) InheritBranch(referencedBranchIDs ledgerstate.BranchIDs) (inheritedBranch ledgerstate.BranchID, err error) {
if referencedBranchIDs.Contains(ledgerstate.InvalidBranchID) {
inheritedBranch = ledgerstate.InvalidBranchID
return
}
branchIDsContainRejectedBranch, inheritedBranch := l.BranchDAG.BranchIDsContainRejectedBranch(referencedBranchIDs)
if branchIDsContainRejectedBranch {
return
}
cachedAggregatedBranch, _, err := l.BranchDAG.AggregateBranches(referencedBranchIDs)
if err != nil {
if errors.Is(err, ledgerstate.ErrInvalidStateTransition) {
inheritedBranch = ledgerstate.InvalidBranchID
err = nil
return
}
err = errors.Errorf("failed to aggregate BranchIDs: %w", err)
return
}
cachedAggregatedBranch.Release()
inheritedBranch = cachedAggregatedBranch.ID()
return
}
// TransactionValid performs some fast checks of the Transaction and triggers a MessageInvalid event if the checks do
// not pass.
func (l *LedgerState) TransactionValid(transaction *ledgerstate.Transaction, messageID MessageID) (err error) {
if err = l.UTXODAG.CheckTransaction(transaction); err != nil {
l.tangle.Storage.MessageMetadata(messageID).Consume(func(messagemetadata *MessageMetadata) {
messagemetadata.SetInvalid(true)
})
l.tangle.Events.MessageInvalid.Trigger(messageID)
return errors.Errorf("invalid transaction in message with %s: %w", messageID, err)
}
return nil
}
// TransactionConflicting returns whether the given transaction is part of a conflict.
func (l *LedgerState) TransactionConflicting(transactionID ledgerstate.TransactionID) bool {
return l.BranchID(transactionID) == ledgerstate.NewBranchID(transactionID)
}
// TransactionMetadata retrieves the TransactionMetadata with the given TransactionID from the object storage.
func (l *LedgerState) TransactionMetadata(transactionID ledgerstate.TransactionID) (cachedTransactionMetadata *ledgerstate.CachedTransactionMetadata) {
return l.UTXODAG.CachedTransactionMetadata(transactionID)
}
// Transaction retrieves the Transaction with the given TransactionID from the object storage.
func (l *LedgerState) Transaction(transactionID ledgerstate.TransactionID) *ledgerstate.CachedTransaction {
return l.UTXODAG.CachedTransaction(transactionID)
}
// BookTransaction books the given Transaction into the underlying LedgerState and returns the target Branch and an
// eventual error.
func (l *LedgerState) BookTransaction(transaction *ledgerstate.Transaction, messageID MessageID) (targetBranch ledgerstate.BranchID, err error) {
targetBranch, err = l.UTXODAG.BookTransaction(transaction)
if err != nil {
if !errors.Is(err, ledgerstate.ErrTransactionInvalid) && !errors.Is(err, ledgerstate.ErrTransactionNotSolid) {
err = errors.Errorf("failed to book Transaction: %w", err)
return
}
l.tangle.Storage.MessageMetadata(messageID).Consume(func(messagemetadata *MessageMetadata) {
messagemetadata.SetInvalid(true)
})
l.tangle.Events.MessageInvalid.Trigger(messageID)
// non-fatal errors should not bubble up - we trigger a MessageInvalid event instead
err = nil
return
}
return
}
// ConflictSet returns the list of transactionIDs conflicting with the given transactionID.
func (l *LedgerState) ConflictSet(transactionID ledgerstate.TransactionID) (conflictSet ledgerstate.TransactionIDs) {
conflictIDs := make(ledgerstate.ConflictIDs)
conflictSet = make(ledgerstate.TransactionIDs)
l.BranchDAG.Branch(ledgerstate.NewBranchID(transactionID)).Consume(func(branch ledgerstate.Branch) {
conflictIDs = branch.(*ledgerstate.ConflictBranch).Conflicts()
})
for conflictID := range conflictIDs {
l.BranchDAG.ConflictMembers(conflictID).Consume(func(conflictMember *ledgerstate.ConflictMember) {
conflictSet[ledgerstate.TransactionID(conflictMember.BranchID())] = types.Void
})
}
return
}
// TransactionInclusionState returns the InclusionState of the Transaction with the given TransactionID which can either be
// Pending, Confirmed or Rejected.
func (l *LedgerState) TransactionInclusionState(transactionID ledgerstate.TransactionID) (ledgerstate.InclusionState, error) {
return l.UTXODAG.InclusionState(transactionID)
}
// BranchInclusionState returns the InclusionState of the Branch with the given BranchID which can either be
// Pending, Confirmed or Rejected.
func (l *LedgerState) BranchInclusionState(branchID ledgerstate.BranchID) (inclusionState ledgerstate.InclusionState) {
l.BranchDAG.Branch(branchID).Consume(func(branch ledgerstate.Branch) {
inclusionState = branch.InclusionState()
})
return
}
// BranchID returns the branchID of the given transactionID.
func (l *LedgerState) BranchID(transactionID ledgerstate.TransactionID) (branchID ledgerstate.BranchID) {
l.UTXODAG.CachedTransactionMetadata(transactionID).Consume(func(transactionMetadata *ledgerstate.TransactionMetadata) {
branchID = transactionMetadata.BranchID()
})
return
}
// LoadSnapshot creates a set of outputs in the UTXO-DAG, that are forming the genesis for future transactions.
func (l *LedgerState) LoadSnapshot(snapshot *ledgerstate.Snapshot) (err error) {
l.UTXODAG.LoadSnapshot(snapshot)
// add attachment link between txs from snapshot and the genesis message (EmptyMessageID).
for txID, record := range snapshot.Transactions {
fmt.Println("... Loading snapshot transaction: ", txID, "#outputs=", len(record.Essence.Outputs()), record.UnspentOutputs)
attachment, _ := l.tangle.Storage.StoreAttachment(txID, EmptyMessageID)
if attachment != nil {
attachment.Release()
}
for i, output := range record.Essence.Outputs() {
if !record.UnspentOutputs[i] {
continue
}
output.Balances().ForEach(func(color ledgerstate.Color, balance uint64) bool {
l.totalSupply += balance
return true
})
}
}
attachment, _ := l.tangle.Storage.StoreAttachment(ledgerstate.GenesisTransactionID, EmptyMessageID)
if attachment != nil {
attachment.Release()
}
return
}
// SnapshotUTXO returns the UTXO snapshot, which is a list of transactions with unspent outputs.
func (l *LedgerState) SnapshotUTXO() (snapshot *ledgerstate.Snapshot) {
// The following parameter should be larger than the max allowed timestamp variation, and the required time for confirmation.
// We can snapshot this far in the past, since global snapshots dont occur frequent and it is ok to ignore the last few minutes.
minAge := 120 * time.Second
snapshot = &ledgerstate.Snapshot{
Transactions: make(map[ledgerstate.TransactionID]ledgerstate.Record),
}
startSnapshot := time.Now()
copyLedgerState := l.Transactions() // consider that this may take quite some time
for _, transaction := range copyLedgerState {
// skip unconfirmed transactions
inclusionState, err := l.TransactionInclusionState(transaction.ID())
if err != nil || inclusionState != ledgerstate.Confirmed {
continue
}
// skip transactions that are too recent before startSnapshot
if startSnapshot.Sub(transaction.Essence().Timestamp()) < minAge {
continue
}
unspentOutputs := make([]bool, len(transaction.Essence().Outputs()))
includeTransaction := false
for i, output := range transaction.Essence().Outputs() {
l.CachedOutputMetadata(output.ID()).Consume(func(outputMetadata *ledgerstate.OutputMetadata) {
if outputMetadata.ConfirmedConsumer() == ledgerstate.GenesisTransactionID { // no consumer yet
unspentOutputs[i] = true
includeTransaction = true
} else {
tx := copyLedgerState[outputMetadata.ConfirmedConsumer()]
// ignore consumers that are not confirmed long enough or even in the future.
if startSnapshot.Sub(tx.Essence().Timestamp()) < minAge {
unspentOutputs[i] = true
includeTransaction = true
}
}
})
}
// include only transactions with at least one unspent output
if includeTransaction {
snapshot.Transactions[transaction.ID()] = ledgerstate.Record{
Essence: transaction.Essence(),
UnlockBlocks: transaction.UnlockBlocks(),
UnspentOutputs: unspentOutputs,
}
}
}
// TODO ??? due to possible race conditions we could add a check for the consistency of the UTXO snapshot
return snapshot
}
// ReturnTransaction returns a specific transaction.
func (l *LedgerState) ReturnTransaction(transactionID ledgerstate.TransactionID) (transaction *ledgerstate.Transaction) {
return l.UTXODAG.Transaction(transactionID)
}
// Transactions returns all the transactions.
func (l *LedgerState) Transactions() (transactions map[ledgerstate.TransactionID]*ledgerstate.Transaction) {
return l.UTXODAG.Transactions()
}
// CachedOutput returns the Output with the given ID.
func (l *LedgerState) CachedOutput(outputID ledgerstate.OutputID) *ledgerstate.CachedOutput {
return l.UTXODAG.CachedOutput(outputID)
}
// CachedOutputMetadata returns the OutputMetadata with the given ID.
func (l *LedgerState) CachedOutputMetadata(outputID ledgerstate.OutputID) *ledgerstate.CachedOutputMetadata {
return l.UTXODAG.CachedOutputMetadata(outputID)
}
// CachedOutputsOnAddress retrieves all the Outputs that are associated with an address.
func (l *LedgerState) CachedOutputsOnAddress(address ledgerstate.Address) (cachedOutputs ledgerstate.CachedOutputs) {
l.UTXODAG.CachedAddressOutputMapping(address).Consume(func(addressOutputMapping *ledgerstate.AddressOutputMapping) {
cachedOutputs = append(cachedOutputs, l.CachedOutput(addressOutputMapping.OutputID()))
})
return
}
// CheckTransaction contains fast checks that have to be performed before booking a Transaction.
func (l *LedgerState) CheckTransaction(transaction *ledgerstate.Transaction) (err error) {
return l.UTXODAG.CheckTransaction(transaction)
}
// ConsumedOutputs returns the consumed (cached)Outputs of the given Transaction.
func (l *LedgerState) ConsumedOutputs(transaction *ledgerstate.Transaction) (cachedInputs ledgerstate.CachedOutputs) {
return l.UTXODAG.ConsumedOutputs(transaction)
}
// Consumers returns the (cached) consumers of the given outputID.
func (l *LedgerState) Consumers(outputID ledgerstate.OutputID) (cachedTransactions ledgerstate.CachedConsumers) {
return l.UTXODAG.CachedConsumers(outputID)
}
// TotalSupply returns the total supply.
func (l *LedgerState) TotalSupply() (totalSupply uint64) {
return l.totalSupply
}
// endregion ///////////////////////////////////////////////////////////////////////////////////////////////////////////
| {
l.UTXODAG.Shutdown()
l.BranchDAG.Shutdown()
} | identifier_body |
evaluation_utils.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility for evaluating various tasks, e.g., translation & summarization."""
import codecs
import os
import re
import subprocess
import sys
import tensorflow as tf
from ..scripts import bleu
from ..scripts import rouge
__all__ = ["evaluate"]
def evaluate(ref_file, trans_file, metric, subword_option=None):
"""Pick a metric and evaluate depending on task."""
# BLEU scores for translation task
if '@' in metric.lower():
pos = metric.lower().index('@')
if subword_option is None:
subword_option = 'None'
subword_option += metric[pos:]
metric = metric[0:pos]
if metric.lower() == "bleu":
evaluation_score = _bleu(ref_file, trans_file,
subword_option=subword_option)
elif len(metric.lower()) > 4 and metric.lower()[0:4]=='bleu':
max_order = int(metric.lower()[5:])
evaluation_score = _bleu(ref_file, trans_file,max_order=max_order,
subword_option=subword_option)
# ROUGE scores for summarization tasks
elif metric.lower() == "rouge":
evaluation_score = _rouge(ref_file, trans_file,
subword_option=subword_option)
elif metric.lower() == "accuracy":
evaluation_score = _accuracy(ref_file, trans_file,
subword_option=subword_option)
elif metric.lower() == "word_accuracy":
evaluation_score = _word_accuracy(ref_file, trans_file,
subword_option=subword_option)
elif metric.lower()[0:len('distinct')] == 'distinct':
max_order = int(metric.lower()[len('distinct')+1:])
evaluation_score = _distinct(trans_file,max_order,subword_option=subword_option)
elif metric.lower()[0:len('distinct_c')] == 'distinct_c':
max_order = int(metric.lower()[len('distinct_c')+1:])
evaluation_score = _distinct_c(trans_file,max_order,subword_option=subword_option)
else:
raise ValueError("Unknown metric %s" % metric)
return evaluation_score
def _clean(sentence, subword_option):
|
def _distinct(trans_file,max_order=1, subword_option=None):
"""Compute Distinct Score"""
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
line = _clean(line, subword_option=subword_option)
translations.append(line.split(" "))
num_tokens = 0
unique_tokens = set()
for items in translations:
#print(items)
for i in range(0, len(items) - max_order + 1):
tmp = ' '.join(items[i:i+max_order])
unique_tokens.add(tmp)
num_tokens += 1
ratio = len(unique_tokens) / num_tokens
return 100 * ratio
def _distinct_c(trans_file,max_order=1, subword_option=None):
"""Compute Distinct Score"""
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
line = _clean(line, subword_option=subword_option)
translations.append(line.split(" "))
num_tokens = 0
unique_tokens = set()
for items in translations:
#print(items)
for i in range(0, len(items) - max_order + 1):
tmp = ' '.join(items[i:i+max_order])
unique_tokens.add(tmp)
num_tokens += 1
ratio = len(unique_tokens)
return ratio
# Follow //transconsole/localization/machine_translation/metrics/bleu_calc.py
def _bleu(ref_file, trans_file,max_order=4, subword_option=None):
"""Compute BLEU scores and handling BPE."""
smooth = False
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with codecs.getreader("utf-8")(
tf.gfile.GFile(reference_filename, "rb")) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference = _clean(reference, subword_option)
reference_list.append(reference.split(" "))
per_segment_references.append(reference_list)
print(per_segment_references[0:15])
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
line = _clean(line, subword_option=subword_option)
translations.append(line.split(" "))
print(translations[0:15])
# bleu_score, precisions, bp, ratio, translation_length, reference_length
bleu_score, _, _, _, _, _ = bleu.compute_bleu(
per_segment_references, translations, max_order, smooth)
return 100 * bleu_score
def _rouge(ref_file, summarization_file, subword_option=None):
"""Compute ROUGE scores and handling BPE."""
references = []
with codecs.getreader("utf-8")(tf.gfile.GFile(ref_file, "rb")) as fh:
for line in fh:
references.append(_clean(line, subword_option))
hypotheses = []
with codecs.getreader("utf-8")(
tf.gfile.GFile(summarization_file, "rb")) as fh:
for line in fh:
hypotheses.append(_clean(line, subword_option=subword_option))
rouge_score_map = rouge.rouge(hypotheses, references)
return 100 * rouge_score_map["rouge_l/f_score"]
def _accuracy(label_file, pred_file,subword_option=None):
"""Compute accuracy, each line contains a label."""
with open(label_file, "r", encoding='utf-8') as label_fh:
with open(pred_file, "r", encoding='utf-8') as pred_fh:
count = 0.0
match = 0.0
for label in label_fh:
label = label.strip()
label = " ".join(_clean(label,subword_option))
pred = pred_fh.readline().strip()
pred = " ".join(_clean(pred,subword_option))
if label == pred:
match += 1
count += 1
return 100 * match / count
def _word_accuracy(label_file, pred_file,subword_option=None):
"""Compute accuracy on per word basis."""
with open(label_file, "r", encoding='utf-8') as label_fh:
with open(pred_file, "r", encoding='utf-8') as pred_fh:
total_acc, total_count = 0., 0.
for sentence in label_fh:
sentence = " ".join(_clean(sentence, subword_option))
labels = sentence.strip().split(" ")
preds = " ".join(_clean(pred_fh.readline(), subword_option))
preds = preds.strip().split(" ")
match = 0.0
for pos in range(min(len(labels), len(preds))):
label = labels[pos]
pred = preds[pos]
if label == pred:
match += 1
total_acc += 100 * match / max(len(labels), len(preds))
total_count += 1
return total_acc / total_count
def _moses_bleu(multi_bleu_script, tgt_test, trans_file, subword_option=None):
"""Compute BLEU scores using Moses multi-bleu.perl script."""
# TODO(thangluong): perform rewrite using python
# BPE
if subword_option == "bpe":
debpe_tgt_test = tgt_test + ".debpe"
if not os.path.exists(debpe_tgt_test):
# TODO(thangluong): not use shell=True, can be a security hazard
subprocess.call("cp %s %s" % (tgt_test, debpe_tgt_test), shell=True)
subprocess.call("sed s/@@ //g %s" % (debpe_tgt_test),
shell=True)
tgt_test = debpe_tgt_test
elif subword_option == "spm":
despm_tgt_test = tgt_test + ".despm"
if not os.path.exists(despm_tgt_test):
subprocess.call("cp %s %s" % (tgt_test, despm_tgt_test))
subprocess.call("sed s/ //g %s" % (despm_tgt_test))
subprocess.call(u"sed s/^\u2581/g %s" % (despm_tgt_test))
subprocess.call(u"sed s/\u2581/ /g %s" % (despm_tgt_test))
tgt_test = despm_tgt_test
cmd = "%s %s < %s" % (multi_bleu_script, tgt_test, trans_file)
# subprocess
# TODO(thangluong): not use shell=True, can be a security hazard
bleu_output = subprocess.check_output(cmd, shell=True)
# extract BLEU score
m = re.search("BLEU = (.+?),", bleu_output)
bleu_score = float(m.group(1))
return bleu_score
if __name__ == "__main__":
sys.path.append(os.path.dirname(sys.path[0]))
model_id = sys.argv[1]
ref_file = sys.argv[2] # r"D:\nmt\ref\dev.20000.response"
#ref_file = r"D:\nmt\ref\char2_dev.response"
trans_file = sys.argv[3] # r"D:\nmt\beam_search\word_4W_10_dev_f.inf.response"
out_path = sys.argv[4]
metrics = sys.argv[5].split(',')
subword = None
with open(out_path,'w+',encoding='utf-8') as fout:
for metric in metrics:
score = evaluate(ref_file, trans_file, metric, subword_option=subword)
fout.write(('%s\t%f\n') % (metric, score))
# print('res file: %s' % ref_file)
# print('trans_file:%s' % trans_file)
# scores = []
# for metric in metrics:
# score = evaluate(ref_file,trans_file,metric+'@hybrid',subword_option=subword)
# scores.append(str(score))
# print('\t'.join(scores))
# scores = []
# for metric in ['rouge', 'bleu-1', 'bleu-2', 'bleu-3', 'bleu-4']:
# score = evaluate(ref_file, trans_file, metric + '@char', subword_option=subword)
# scores.append(str(score))
# print('\t'.join(scores)) | """Clean and handle BPE or SPM outputs."""
sentence = sentence.strip()
if subword_option is not None and '@' in subword_option:
subword_option_0 = subword_option.split('@')[0]
subword_option_1 = subword_option.split('@')[1]
else:
subword_option_0 = None
subword_option_1 = None
# BPE
if subword_option_0 == "bpe":
sentence = re.sub("@@ ", "", sentence)
# SPM
elif subword_option_0 == "spm":
sentence = u"".join(sentence.split()).replace(u"\u2581", u" ").lstrip()
# speical for chinese
if subword_option_1 == 'bpe':
sentence = re.sub("@@ ", "", sentence)
if subword_option_1 == 'space':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("<SPACE>"," ")
if subword_option_1 == 'char':
sentence = sentence.replace("<SPACE>", "")
sentence = sentence.replace("@@", "")
sentence = sentence.replace(" ","")
sentence = " ".join(sentence)
elif subword_option_1 == 'char2char':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("@@", "")
sentence = " ".join(sentence)
elif subword_option_1 == 'char2word':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("@@", " ")
# sentence = " ".join(sentence)
elif subword_option_1 == 'hybrid':
sentence = sentence.replace(" @@ ", "")
sentence = sentence.replace("@@ ", "")
sentence = sentence.replace(" @@", "")
elif subword_option_1 == 'hybrid2':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("@@", " ")
return sentence | identifier_body |
evaluation_utils.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility for evaluating various tasks, e.g., translation & summarization."""
import codecs
import os
import re
import subprocess
import sys
import tensorflow as tf
from ..scripts import bleu
from ..scripts import rouge
__all__ = ["evaluate"]
def evaluate(ref_file, trans_file, metric, subword_option=None):
"""Pick a metric and evaluate depending on task."""
# BLEU scores for translation task
if '@' in metric.lower():
pos = metric.lower().index('@')
if subword_option is None:
subword_option = 'None'
subword_option += metric[pos:]
metric = metric[0:pos]
if metric.lower() == "bleu":
evaluation_score = _bleu(ref_file, trans_file,
subword_option=subword_option)
elif len(metric.lower()) > 4 and metric.lower()[0:4]=='bleu':
max_order = int(metric.lower()[5:])
evaluation_score = _bleu(ref_file, trans_file,max_order=max_order,
subword_option=subword_option)
# ROUGE scores for summarization tasks
elif metric.lower() == "rouge":
evaluation_score = _rouge(ref_file, trans_file,
subword_option=subword_option)
elif metric.lower() == "accuracy":
evaluation_score = _accuracy(ref_file, trans_file,
subword_option=subword_option)
elif metric.lower() == "word_accuracy":
evaluation_score = _word_accuracy(ref_file, trans_file,
subword_option=subword_option)
elif metric.lower()[0:len('distinct')] == 'distinct':
|
elif metric.lower()[0:len('distinct_c')] == 'distinct_c':
max_order = int(metric.lower()[len('distinct_c')+1:])
evaluation_score = _distinct_c(trans_file,max_order,subword_option=subword_option)
else:
raise ValueError("Unknown metric %s" % metric)
return evaluation_score
def _clean(sentence, subword_option):
"""Clean and handle BPE or SPM outputs."""
sentence = sentence.strip()
if subword_option is not None and '@' in subword_option:
subword_option_0 = subword_option.split('@')[0]
subword_option_1 = subword_option.split('@')[1]
else:
subword_option_0 = None
subword_option_1 = None
# BPE
if subword_option_0 == "bpe":
sentence = re.sub("@@ ", "", sentence)
# SPM
elif subword_option_0 == "spm":
sentence = u"".join(sentence.split()).replace(u"\u2581", u" ").lstrip()
# speical for chinese
if subword_option_1 == 'bpe':
sentence = re.sub("@@ ", "", sentence)
if subword_option_1 == 'space':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("<SPACE>"," ")
if subword_option_1 == 'char':
sentence = sentence.replace("<SPACE>", "")
sentence = sentence.replace("@@", "")
sentence = sentence.replace(" ","")
sentence = " ".join(sentence)
elif subword_option_1 == 'char2char':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("@@", "")
sentence = " ".join(sentence)
elif subword_option_1 == 'char2word':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("@@", " ")
# sentence = " ".join(sentence)
elif subword_option_1 == 'hybrid':
sentence = sentence.replace(" @@ ", "")
sentence = sentence.replace("@@ ", "")
sentence = sentence.replace(" @@", "")
elif subword_option_1 == 'hybrid2':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("@@", " ")
return sentence
def _distinct(trans_file,max_order=1, subword_option=None):
"""Compute Distinct Score"""
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
line = _clean(line, subword_option=subword_option)
translations.append(line.split(" "))
num_tokens = 0
unique_tokens = set()
for items in translations:
#print(items)
for i in range(0, len(items) - max_order + 1):
tmp = ' '.join(items[i:i+max_order])
unique_tokens.add(tmp)
num_tokens += 1
ratio = len(unique_tokens) / num_tokens
return 100 * ratio
def _distinct_c(trans_file,max_order=1, subword_option=None):
"""Compute Distinct Score"""
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
line = _clean(line, subword_option=subword_option)
translations.append(line.split(" "))
num_tokens = 0
unique_tokens = set()
for items in translations:
#print(items)
for i in range(0, len(items) - max_order + 1):
tmp = ' '.join(items[i:i+max_order])
unique_tokens.add(tmp)
num_tokens += 1
ratio = len(unique_tokens)
return ratio
# Follow //transconsole/localization/machine_translation/metrics/bleu_calc.py
def _bleu(ref_file, trans_file,max_order=4, subword_option=None):
"""Compute BLEU scores and handling BPE."""
smooth = False
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with codecs.getreader("utf-8")(
tf.gfile.GFile(reference_filename, "rb")) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference = _clean(reference, subword_option)
reference_list.append(reference.split(" "))
per_segment_references.append(reference_list)
print(per_segment_references[0:15])
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
line = _clean(line, subword_option=subword_option)
translations.append(line.split(" "))
print(translations[0:15])
# bleu_score, precisions, bp, ratio, translation_length, reference_length
bleu_score, _, _, _, _, _ = bleu.compute_bleu(
per_segment_references, translations, max_order, smooth)
return 100 * bleu_score
def _rouge(ref_file, summarization_file, subword_option=None):
"""Compute ROUGE scores and handling BPE."""
references = []
with codecs.getreader("utf-8")(tf.gfile.GFile(ref_file, "rb")) as fh:
for line in fh:
references.append(_clean(line, subword_option))
hypotheses = []
with codecs.getreader("utf-8")(
tf.gfile.GFile(summarization_file, "rb")) as fh:
for line in fh:
hypotheses.append(_clean(line, subword_option=subword_option))
rouge_score_map = rouge.rouge(hypotheses, references)
return 100 * rouge_score_map["rouge_l/f_score"]
def _accuracy(label_file, pred_file,subword_option=None):
"""Compute accuracy, each line contains a label."""
with open(label_file, "r", encoding='utf-8') as label_fh:
with open(pred_file, "r", encoding='utf-8') as pred_fh:
count = 0.0
match = 0.0
for label in label_fh:
label = label.strip()
label = " ".join(_clean(label,subword_option))
pred = pred_fh.readline().strip()
pred = " ".join(_clean(pred,subword_option))
if label == pred:
match += 1
count += 1
return 100 * match / count
def _word_accuracy(label_file, pred_file,subword_option=None):
"""Compute accuracy on per word basis."""
with open(label_file, "r", encoding='utf-8') as label_fh:
with open(pred_file, "r", encoding='utf-8') as pred_fh:
total_acc, total_count = 0., 0.
for sentence in label_fh:
sentence = " ".join(_clean(sentence, subword_option))
labels = sentence.strip().split(" ")
preds = " ".join(_clean(pred_fh.readline(), subword_option))
preds = preds.strip().split(" ")
match = 0.0
for pos in range(min(len(labels), len(preds))):
label = labels[pos]
pred = preds[pos]
if label == pred:
match += 1
total_acc += 100 * match / max(len(labels), len(preds))
total_count += 1
return total_acc / total_count
def _moses_bleu(multi_bleu_script, tgt_test, trans_file, subword_option=None):
"""Compute BLEU scores using Moses multi-bleu.perl script."""
# TODO(thangluong): perform rewrite using python
# BPE
if subword_option == "bpe":
debpe_tgt_test = tgt_test + ".debpe"
if not os.path.exists(debpe_tgt_test):
# TODO(thangluong): not use shell=True, can be a security hazard
subprocess.call("cp %s %s" % (tgt_test, debpe_tgt_test), shell=True)
subprocess.call("sed s/@@ //g %s" % (debpe_tgt_test),
shell=True)
tgt_test = debpe_tgt_test
elif subword_option == "spm":
despm_tgt_test = tgt_test + ".despm"
if not os.path.exists(despm_tgt_test):
subprocess.call("cp %s %s" % (tgt_test, despm_tgt_test))
subprocess.call("sed s/ //g %s" % (despm_tgt_test))
subprocess.call(u"sed s/^\u2581/g %s" % (despm_tgt_test))
subprocess.call(u"sed s/\u2581/ /g %s" % (despm_tgt_test))
tgt_test = despm_tgt_test
cmd = "%s %s < %s" % (multi_bleu_script, tgt_test, trans_file)
# subprocess
# TODO(thangluong): not use shell=True, can be a security hazard
bleu_output = subprocess.check_output(cmd, shell=True)
# extract BLEU score
m = re.search("BLEU = (.+?),", bleu_output)
bleu_score = float(m.group(1))
return bleu_score
if __name__ == "__main__":
sys.path.append(os.path.dirname(sys.path[0]))
model_id = sys.argv[1]
ref_file = sys.argv[2] # r"D:\nmt\ref\dev.20000.response"
#ref_file = r"D:\nmt\ref\char2_dev.response"
trans_file = sys.argv[3] # r"D:\nmt\beam_search\word_4W_10_dev_f.inf.response"
out_path = sys.argv[4]
metrics = sys.argv[5].split(',')
subword = None
with open(out_path,'w+',encoding='utf-8') as fout:
for metric in metrics:
score = evaluate(ref_file, trans_file, metric, subword_option=subword)
fout.write(('%s\t%f\n') % (metric, score))
# print('res file: %s' % ref_file)
# print('trans_file:%s' % trans_file)
# scores = []
# for metric in metrics:
# score = evaluate(ref_file,trans_file,metric+'@hybrid',subword_option=subword)
# scores.append(str(score))
# print('\t'.join(scores))
# scores = []
# for metric in ['rouge', 'bleu-1', 'bleu-2', 'bleu-3', 'bleu-4']:
# score = evaluate(ref_file, trans_file, metric + '@char', subword_option=subword)
# scores.append(str(score))
# print('\t'.join(scores)) | max_order = int(metric.lower()[len('distinct')+1:])
evaluation_score = _distinct(trans_file,max_order,subword_option=subword_option) | conditional_block |
evaluation_utils.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility for evaluating various tasks, e.g., translation & summarization."""
import codecs
import os
import re
import subprocess
import sys
import tensorflow as tf
from ..scripts import bleu
from ..scripts import rouge
__all__ = ["evaluate"]
def evaluate(ref_file, trans_file, metric, subword_option=None):
"""Pick a metric and evaluate depending on task."""
# BLEU scores for translation task
if '@' in metric.lower():
pos = metric.lower().index('@')
if subword_option is None:
subword_option = 'None'
subword_option += metric[pos:]
metric = metric[0:pos]
if metric.lower() == "bleu":
evaluation_score = _bleu(ref_file, trans_file,
subword_option=subword_option)
elif len(metric.lower()) > 4 and metric.lower()[0:4]=='bleu':
max_order = int(metric.lower()[5:])
evaluation_score = _bleu(ref_file, trans_file,max_order=max_order,
subword_option=subword_option)
# ROUGE scores for summarization tasks
elif metric.lower() == "rouge":
evaluation_score = _rouge(ref_file, trans_file,
subword_option=subword_option)
elif metric.lower() == "accuracy":
evaluation_score = _accuracy(ref_file, trans_file,
subword_option=subword_option)
elif metric.lower() == "word_accuracy":
evaluation_score = _word_accuracy(ref_file, trans_file,
subword_option=subword_option)
elif metric.lower()[0:len('distinct')] == 'distinct':
max_order = int(metric.lower()[len('distinct')+1:])
evaluation_score = _distinct(trans_file,max_order,subword_option=subword_option)
elif metric.lower()[0:len('distinct_c')] == 'distinct_c':
max_order = int(metric.lower()[len('distinct_c')+1:])
evaluation_score = _distinct_c(trans_file,max_order,subword_option=subword_option)
else:
raise ValueError("Unknown metric %s" % metric)
return evaluation_score
def _clean(sentence, subword_option):
"""Clean and handle BPE or SPM outputs."""
sentence = sentence.strip()
if subword_option is not None and '@' in subword_option:
subword_option_0 = subword_option.split('@')[0]
subword_option_1 = subword_option.split('@')[1]
else:
subword_option_0 = None
subword_option_1 = None
# BPE
if subword_option_0 == "bpe":
sentence = re.sub("@@ ", "", sentence)
# SPM
elif subword_option_0 == "spm":
sentence = u"".join(sentence.split()).replace(u"\u2581", u" ").lstrip()
# speical for chinese
if subword_option_1 == 'bpe':
sentence = re.sub("@@ ", "", sentence)
if subword_option_1 == 'space':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("<SPACE>"," ")
if subword_option_1 == 'char':
sentence = sentence.replace("<SPACE>", "")
sentence = sentence.replace("@@", "")
sentence = sentence.replace(" ","")
sentence = " ".join(sentence)
elif subword_option_1 == 'char2char':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("@@", "")
sentence = " ".join(sentence)
elif subword_option_1 == 'char2word':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("@@", " ")
# sentence = " ".join(sentence)
elif subword_option_1 == 'hybrid':
sentence = sentence.replace(" @@ ", "")
sentence = sentence.replace("@@ ", "")
sentence = sentence.replace(" @@", "")
elif subword_option_1 == 'hybrid2':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("@@", " ")
return sentence
def _distinct(trans_file,max_order=1, subword_option=None):
"""Compute Distinct Score"""
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
line = _clean(line, subword_option=subword_option)
translations.append(line.split(" "))
num_tokens = 0
unique_tokens = set()
for items in translations:
#print(items)
for i in range(0, len(items) - max_order + 1):
tmp = ' '.join(items[i:i+max_order])
unique_tokens.add(tmp)
num_tokens += 1
ratio = len(unique_tokens) / num_tokens
return 100 * ratio
def _distinct_c(trans_file,max_order=1, subword_option=None):
"""Compute Distinct Score"""
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
line = _clean(line, subword_option=subword_option)
translations.append(line.split(" "))
num_tokens = 0
unique_tokens = set()
for items in translations:
#print(items)
for i in range(0, len(items) - max_order + 1):
tmp = ' '.join(items[i:i+max_order])
unique_tokens.add(tmp)
num_tokens += 1
ratio = len(unique_tokens)
return ratio
# Follow //transconsole/localization/machine_translation/metrics/bleu_calc.py
def _bleu(ref_file, trans_file,max_order=4, subword_option=None):
"""Compute BLEU scores and handling BPE."""
smooth = False
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with codecs.getreader("utf-8")(
tf.gfile.GFile(reference_filename, "rb")) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference = _clean(reference, subword_option)
reference_list.append(reference.split(" "))
per_segment_references.append(reference_list)
print(per_segment_references[0:15])
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh: | # bleu_score, precisions, bp, ratio, translation_length, reference_length
bleu_score, _, _, _, _, _ = bleu.compute_bleu(
per_segment_references, translations, max_order, smooth)
return 100 * bleu_score
def _rouge(ref_file, summarization_file, subword_option=None):
"""Compute ROUGE scores and handling BPE."""
references = []
with codecs.getreader("utf-8")(tf.gfile.GFile(ref_file, "rb")) as fh:
for line in fh:
references.append(_clean(line, subword_option))
hypotheses = []
with codecs.getreader("utf-8")(
tf.gfile.GFile(summarization_file, "rb")) as fh:
for line in fh:
hypotheses.append(_clean(line, subword_option=subword_option))
rouge_score_map = rouge.rouge(hypotheses, references)
return 100 * rouge_score_map["rouge_l/f_score"]
def _accuracy(label_file, pred_file,subword_option=None):
"""Compute accuracy, each line contains a label."""
with open(label_file, "r", encoding='utf-8') as label_fh:
with open(pred_file, "r", encoding='utf-8') as pred_fh:
count = 0.0
match = 0.0
for label in label_fh:
label = label.strip()
label = " ".join(_clean(label,subword_option))
pred = pred_fh.readline().strip()
pred = " ".join(_clean(pred,subword_option))
if label == pred:
match += 1
count += 1
return 100 * match / count
def _word_accuracy(label_file, pred_file,subword_option=None):
"""Compute accuracy on per word basis."""
with open(label_file, "r", encoding='utf-8') as label_fh:
with open(pred_file, "r", encoding='utf-8') as pred_fh:
total_acc, total_count = 0., 0.
for sentence in label_fh:
sentence = " ".join(_clean(sentence, subword_option))
labels = sentence.strip().split(" ")
preds = " ".join(_clean(pred_fh.readline(), subword_option))
preds = preds.strip().split(" ")
match = 0.0
for pos in range(min(len(labels), len(preds))):
label = labels[pos]
pred = preds[pos]
if label == pred:
match += 1
total_acc += 100 * match / max(len(labels), len(preds))
total_count += 1
return total_acc / total_count
def _moses_bleu(multi_bleu_script, tgt_test, trans_file, subword_option=None):
"""Compute BLEU scores using Moses multi-bleu.perl script."""
# TODO(thangluong): perform rewrite using python
# BPE
if subword_option == "bpe":
debpe_tgt_test = tgt_test + ".debpe"
if not os.path.exists(debpe_tgt_test):
# TODO(thangluong): not use shell=True, can be a security hazard
subprocess.call("cp %s %s" % (tgt_test, debpe_tgt_test), shell=True)
subprocess.call("sed s/@@ //g %s" % (debpe_tgt_test),
shell=True)
tgt_test = debpe_tgt_test
elif subword_option == "spm":
despm_tgt_test = tgt_test + ".despm"
if not os.path.exists(despm_tgt_test):
subprocess.call("cp %s %s" % (tgt_test, despm_tgt_test))
subprocess.call("sed s/ //g %s" % (despm_tgt_test))
subprocess.call(u"sed s/^\u2581/g %s" % (despm_tgt_test))
subprocess.call(u"sed s/\u2581/ /g %s" % (despm_tgt_test))
tgt_test = despm_tgt_test
cmd = "%s %s < %s" % (multi_bleu_script, tgt_test, trans_file)
# subprocess
# TODO(thangluong): not use shell=True, can be a security hazard
bleu_output = subprocess.check_output(cmd, shell=True)
# extract BLEU score
m = re.search("BLEU = (.+?),", bleu_output)
bleu_score = float(m.group(1))
return bleu_score
if __name__ == "__main__":
sys.path.append(os.path.dirname(sys.path[0]))
model_id = sys.argv[1]
ref_file = sys.argv[2] # r"D:\nmt\ref\dev.20000.response"
#ref_file = r"D:\nmt\ref\char2_dev.response"
trans_file = sys.argv[3] # r"D:\nmt\beam_search\word_4W_10_dev_f.inf.response"
out_path = sys.argv[4]
metrics = sys.argv[5].split(',')
subword = None
with open(out_path,'w+',encoding='utf-8') as fout:
for metric in metrics:
score = evaluate(ref_file, trans_file, metric, subword_option=subword)
fout.write(('%s\t%f\n') % (metric, score))
# print('res file: %s' % ref_file)
# print('trans_file:%s' % trans_file)
# scores = []
# for metric in metrics:
# score = evaluate(ref_file,trans_file,metric+'@hybrid',subword_option=subword)
# scores.append(str(score))
# print('\t'.join(scores))
# scores = []
# for metric in ['rouge', 'bleu-1', 'bleu-2', 'bleu-3', 'bleu-4']:
# score = evaluate(ref_file, trans_file, metric + '@char', subword_option=subword)
# scores.append(str(score))
# print('\t'.join(scores)) | for line in fh:
line = _clean(line, subword_option=subword_option)
translations.append(line.split(" "))
print(translations[0:15]) | random_line_split |
evaluation_utils.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility for evaluating various tasks, e.g., translation & summarization."""
import codecs
import os
import re
import subprocess
import sys
import tensorflow as tf
from ..scripts import bleu
from ..scripts import rouge
__all__ = ["evaluate"]
def evaluate(ref_file, trans_file, metric, subword_option=None):
"""Pick a metric and evaluate depending on task."""
# BLEU scores for translation task
if '@' in metric.lower():
pos = metric.lower().index('@')
if subword_option is None:
subword_option = 'None'
subword_option += metric[pos:]
metric = metric[0:pos]
if metric.lower() == "bleu":
evaluation_score = _bleu(ref_file, trans_file,
subword_option=subword_option)
elif len(metric.lower()) > 4 and metric.lower()[0:4]=='bleu':
max_order = int(metric.lower()[5:])
evaluation_score = _bleu(ref_file, trans_file,max_order=max_order,
subword_option=subword_option)
# ROUGE scores for summarization tasks
elif metric.lower() == "rouge":
evaluation_score = _rouge(ref_file, trans_file,
subword_option=subword_option)
elif metric.lower() == "accuracy":
evaluation_score = _accuracy(ref_file, trans_file,
subword_option=subword_option)
elif metric.lower() == "word_accuracy":
evaluation_score = _word_accuracy(ref_file, trans_file,
subword_option=subword_option)
elif metric.lower()[0:len('distinct')] == 'distinct':
max_order = int(metric.lower()[len('distinct')+1:])
evaluation_score = _distinct(trans_file,max_order,subword_option=subword_option)
elif metric.lower()[0:len('distinct_c')] == 'distinct_c':
max_order = int(metric.lower()[len('distinct_c')+1:])
evaluation_score = _distinct_c(trans_file,max_order,subword_option=subword_option)
else:
raise ValueError("Unknown metric %s" % metric)
return evaluation_score
def _clean(sentence, subword_option):
"""Clean and handle BPE or SPM outputs."""
sentence = sentence.strip()
if subword_option is not None and '@' in subword_option:
subword_option_0 = subword_option.split('@')[0]
subword_option_1 = subword_option.split('@')[1]
else:
subword_option_0 = None
subword_option_1 = None
# BPE
if subword_option_0 == "bpe":
sentence = re.sub("@@ ", "", sentence)
# SPM
elif subword_option_0 == "spm":
sentence = u"".join(sentence.split()).replace(u"\u2581", u" ").lstrip()
# speical for chinese
if subword_option_1 == 'bpe':
sentence = re.sub("@@ ", "", sentence)
if subword_option_1 == 'space':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("<SPACE>"," ")
if subword_option_1 == 'char':
sentence = sentence.replace("<SPACE>", "")
sentence = sentence.replace("@@", "")
sentence = sentence.replace(" ","")
sentence = " ".join(sentence)
elif subword_option_1 == 'char2char':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("@@", "")
sentence = " ".join(sentence)
elif subword_option_1 == 'char2word':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("@@", " ")
# sentence = " ".join(sentence)
elif subword_option_1 == 'hybrid':
sentence = sentence.replace(" @@ ", "")
sentence = sentence.replace("@@ ", "")
sentence = sentence.replace(" @@", "")
elif subword_option_1 == 'hybrid2':
sentence = sentence.replace(" ", "")
sentence = sentence.replace("@@", " ")
return sentence
def | (trans_file,max_order=1, subword_option=None):
"""Compute Distinct Score"""
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
line = _clean(line, subword_option=subword_option)
translations.append(line.split(" "))
num_tokens = 0
unique_tokens = set()
for items in translations:
#print(items)
for i in range(0, len(items) - max_order + 1):
tmp = ' '.join(items[i:i+max_order])
unique_tokens.add(tmp)
num_tokens += 1
ratio = len(unique_tokens) / num_tokens
return 100 * ratio
def _distinct_c(trans_file,max_order=1, subword_option=None):
"""Compute Distinct Score"""
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
line = _clean(line, subword_option=subword_option)
translations.append(line.split(" "))
num_tokens = 0
unique_tokens = set()
for items in translations:
#print(items)
for i in range(0, len(items) - max_order + 1):
tmp = ' '.join(items[i:i+max_order])
unique_tokens.add(tmp)
num_tokens += 1
ratio = len(unique_tokens)
return ratio
# Follow //transconsole/localization/machine_translation/metrics/bleu_calc.py
def _bleu(ref_file, trans_file,max_order=4, subword_option=None):
"""Compute BLEU scores and handling BPE."""
smooth = False
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with codecs.getreader("utf-8")(
tf.gfile.GFile(reference_filename, "rb")) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference = _clean(reference, subword_option)
reference_list.append(reference.split(" "))
per_segment_references.append(reference_list)
print(per_segment_references[0:15])
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
line = _clean(line, subword_option=subword_option)
translations.append(line.split(" "))
print(translations[0:15])
# bleu_score, precisions, bp, ratio, translation_length, reference_length
bleu_score, _, _, _, _, _ = bleu.compute_bleu(
per_segment_references, translations, max_order, smooth)
return 100 * bleu_score
def _rouge(ref_file, summarization_file, subword_option=None):
"""Compute ROUGE scores and handling BPE."""
references = []
with codecs.getreader("utf-8")(tf.gfile.GFile(ref_file, "rb")) as fh:
for line in fh:
references.append(_clean(line, subword_option))
hypotheses = []
with codecs.getreader("utf-8")(
tf.gfile.GFile(summarization_file, "rb")) as fh:
for line in fh:
hypotheses.append(_clean(line, subword_option=subword_option))
rouge_score_map = rouge.rouge(hypotheses, references)
return 100 * rouge_score_map["rouge_l/f_score"]
def _accuracy(label_file, pred_file,subword_option=None):
"""Compute accuracy, each line contains a label."""
with open(label_file, "r", encoding='utf-8') as label_fh:
with open(pred_file, "r", encoding='utf-8') as pred_fh:
count = 0.0
match = 0.0
for label in label_fh:
label = label.strip()
label = " ".join(_clean(label,subword_option))
pred = pred_fh.readline().strip()
pred = " ".join(_clean(pred,subword_option))
if label == pred:
match += 1
count += 1
return 100 * match / count
def _word_accuracy(label_file, pred_file,subword_option=None):
"""Compute accuracy on per word basis."""
with open(label_file, "r", encoding='utf-8') as label_fh:
with open(pred_file, "r", encoding='utf-8') as pred_fh:
total_acc, total_count = 0., 0.
for sentence in label_fh:
sentence = " ".join(_clean(sentence, subword_option))
labels = sentence.strip().split(" ")
preds = " ".join(_clean(pred_fh.readline(), subword_option))
preds = preds.strip().split(" ")
match = 0.0
for pos in range(min(len(labels), len(preds))):
label = labels[pos]
pred = preds[pos]
if label == pred:
match += 1
total_acc += 100 * match / max(len(labels), len(preds))
total_count += 1
return total_acc / total_count
def _moses_bleu(multi_bleu_script, tgt_test, trans_file, subword_option=None):
"""Compute BLEU scores using Moses multi-bleu.perl script."""
# TODO(thangluong): perform rewrite using python
# BPE
if subword_option == "bpe":
debpe_tgt_test = tgt_test + ".debpe"
if not os.path.exists(debpe_tgt_test):
# TODO(thangluong): not use shell=True, can be a security hazard
subprocess.call("cp %s %s" % (tgt_test, debpe_tgt_test), shell=True)
subprocess.call("sed s/@@ //g %s" % (debpe_tgt_test),
shell=True)
tgt_test = debpe_tgt_test
elif subword_option == "spm":
despm_tgt_test = tgt_test + ".despm"
if not os.path.exists(despm_tgt_test):
subprocess.call("cp %s %s" % (tgt_test, despm_tgt_test))
subprocess.call("sed s/ //g %s" % (despm_tgt_test))
subprocess.call(u"sed s/^\u2581/g %s" % (despm_tgt_test))
subprocess.call(u"sed s/\u2581/ /g %s" % (despm_tgt_test))
tgt_test = despm_tgt_test
cmd = "%s %s < %s" % (multi_bleu_script, tgt_test, trans_file)
# subprocess
# TODO(thangluong): not use shell=True, can be a security hazard
bleu_output = subprocess.check_output(cmd, shell=True)
# extract BLEU score
m = re.search("BLEU = (.+?),", bleu_output)
bleu_score = float(m.group(1))
return bleu_score
if __name__ == "__main__":
sys.path.append(os.path.dirname(sys.path[0]))
model_id = sys.argv[1]
ref_file = sys.argv[2] # r"D:\nmt\ref\dev.20000.response"
#ref_file = r"D:\nmt\ref\char2_dev.response"
trans_file = sys.argv[3] # r"D:\nmt\beam_search\word_4W_10_dev_f.inf.response"
out_path = sys.argv[4]
metrics = sys.argv[5].split(',')
subword = None
with open(out_path,'w+',encoding='utf-8') as fout:
for metric in metrics:
score = evaluate(ref_file, trans_file, metric, subword_option=subword)
fout.write(('%s\t%f\n') % (metric, score))
# print('res file: %s' % ref_file)
# print('trans_file:%s' % trans_file)
# scores = []
# for metric in metrics:
# score = evaluate(ref_file,trans_file,metric+'@hybrid',subword_option=subword)
# scores.append(str(score))
# print('\t'.join(scores))
# scores = []
# for metric in ['rouge', 'bleu-1', 'bleu-2', 'bleu-3', 'bleu-4']:
# score = evaluate(ref_file, trans_file, metric + '@char', subword_option=subword)
# scores.append(str(score))
# print('\t'.join(scores)) | _distinct | identifier_name |
router.go | package message
import (
"context"
"fmt"
"runtime/debug"
"sync"
"time"
"github.com/pkg/errors"
"github.com/ThreeDotsLabs/watermill"
"github.com/ThreeDotsLabs/watermill/internal"
sync_internal "github.com/ThreeDotsLabs/watermill/pubsub/sync"
)
var (
// ErrOutputInNoPublisherHandler happens when a handler func returned some messages in a no-publisher handler.
// todo: maybe change the handler func signature in no-publisher handler so that there's no possibility for this
ErrOutputInNoPublisherHandler = errors.New("returned output messages in a handler without publisher")
)
// HandlerFunc is function called when message is received.
//
// msg.Ack() is called automatically when HandlerFunc doesn't return error.
// When HandlerFunc returns error, msg.Nack() is called.
// When msg.Ack() was called in handler and HandlerFunc returns error,
// msg.Nack() will be not sent because Ack was already sent.
//
// HandlerFunc's are executed parallel when multiple messages was received
// (because msg.Ack() was sent in HandlerFunc or Subscriber supports multiple consumers).
type HandlerFunc func(msg *Message) ([]*Message, error)
// NoPublishHandlerFunc is HandlerFunc alternative, which doesn't produce any messages.
type NoPublishHandlerFunc func(msg *Message) error
// PassthroughHandler is a handler that passes the message unchanged from the subscriber to the publisher.
var PassthroughHandler HandlerFunc = func(msg *Message) ([]*Message, error) {
return []*Message{msg}, nil
}
// HandlerMiddleware allows us to write something like decorators to HandlerFunc.
// It can execute something before handler (for example: modify consumed message)
// or after (modify produced messages, ack/nack on consumed message, handle errors, logging, etc.).
//
// It can be attached to the router by using `AddMiddleware` method.
//
// Example:
//
// func ExampleMiddleware(h message.HandlerFunc) message.HandlerFunc {
// return func(message *message.Message) ([]*message.Message, error) {
// fmt.Println("executed before handler")
// producedMessages, err := h(message)
// fmt.Println("executed after handler")
//
// return producedMessages, err
// }
// }
type HandlerMiddleware func(h HandlerFunc) HandlerFunc
// RouterPlugin is function which is executed on Router start.
type RouterPlugin func(*Router) error
// PublisherDecorator wraps the underlying Publisher, adding some functionality.
type PublisherDecorator func(pub Publisher) (Publisher, error)
// SubscriberDecorator wraps the underlying Subscriber, adding some functionality.
type SubscriberDecorator func(sub Subscriber) (Subscriber, error)
// RouterConfig holds the Router's configuration options.
type RouterConfig struct {
// CloseTimeout determines how long router should work for handlers when closing.
CloseTimeout time.Duration
}
func (c *RouterConfig) setDefaults() {
if c.CloseTimeout == 0 {
c.CloseTimeout = time.Second * 30
}
}
// Validate returns Router configuration error, if any.
func (c RouterConfig) Validate() error {
return nil
}
// NewRouter creates a new Router with given configuration.
func NewRouter(config RouterConfig, logger watermill.LoggerAdapter) (*Router, error) {
config.setDefaults()
if err := config.Validate(); err != nil {
return nil, errors.Wrap(err, "invalid config")
}
if logger == nil {
logger = watermill.NopLogger{}
}
return &Router{
config: config,
handlers: map[string]*handler{},
handlersWg: &sync.WaitGroup{},
runningHandlersWg: &sync.WaitGroup{},
runningHandlersWgLock: &sync.Mutex{},
handlerAdded: make(chan struct{}),
handlersLock: &sync.RWMutex{},
closingInProgressCh: make(chan struct{}),
closedCh: make(chan struct{}),
logger: logger,
running: make(chan struct{}),
}, nil
}
type middleware struct {
Handler HandlerMiddleware
HandlerName string
IsRouterLevel bool
}
// Router is responsible for handling messages from subscribers using provided handler functions.
//
// If the handler function returns a message, the message is published with the publisher.
// You can use middlewares to wrap handlers with common logic like logging, instrumentation, etc.
type Router struct {
config RouterConfig
middlewares []middleware
plugins []RouterPlugin
handlers map[string]*handler
handlersLock *sync.RWMutex
handlersWg *sync.WaitGroup
runningHandlersWg *sync.WaitGroup
runningHandlersWgLock *sync.Mutex
handlerAdded chan struct{}
closingInProgressCh chan struct{}
closedCh chan struct{}
closed bool
closedLock sync.Mutex
logger watermill.LoggerAdapter
publisherDecorators []PublisherDecorator
subscriberDecorators []SubscriberDecorator
isRunning bool
running chan struct{}
}
// Logger returns the Router's logger.
func (r *Router) Logger() watermill.LoggerAdapter {
return r.logger
}
// AddMiddleware adds a new middleware to the router.
//
// The order of middleware matters. Middleware added at the beginning is executed first.
func (r *Router) AddMiddleware(m ...HandlerMiddleware) |
func (r *Router) addRouterLevelMiddleware(m ...HandlerMiddleware) {
for _, handlerMiddleware := range m {
middleware := middleware{
Handler: handlerMiddleware,
HandlerName: "",
IsRouterLevel: true,
}
r.middlewares = append(r.middlewares, middleware)
}
}
func (r *Router) addHandlerLevelMiddleware(handlerName string, m ...HandlerMiddleware) {
for _, handlerMiddleware := range m {
middleware := middleware{
Handler: handlerMiddleware,
HandlerName: handlerName,
IsRouterLevel: false,
}
r.middlewares = append(r.middlewares, middleware)
}
}
// AddPlugin adds a new plugin to the router.
// Plugins are executed during startup of the router.
//
// A plugin can, for example, close the router after SIGINT or SIGTERM is sent to the process (SignalsHandler plugin).
func (r *Router) AddPlugin(p ...RouterPlugin) {
r.logger.Debug("Adding plugins", watermill.LogFields{"count": fmt.Sprintf("%d", len(p))})
r.plugins = append(r.plugins, p...)
}
// AddPublisherDecorators wraps the router's Publisher.
// The first decorator is the innermost, i.e. calls the original publisher.
func (r *Router) AddPublisherDecorators(dec ...PublisherDecorator) {
r.logger.Debug("Adding publisher decorators", watermill.LogFields{"count": fmt.Sprintf("%d", len(dec))})
r.publisherDecorators = append(r.publisherDecorators, dec...)
}
// AddSubscriberDecorators wraps the router's Subscriber.
// The first decorator is the innermost, i.e. calls the original subscriber.
func (r *Router) AddSubscriberDecorators(dec ...SubscriberDecorator) {
r.logger.Debug("Adding subscriber decorators", watermill.LogFields{"count": fmt.Sprintf("%d", len(dec))})
r.subscriberDecorators = append(r.subscriberDecorators, dec...)
}
// Handlers returns all registered handlers.
func (r *Router) Handlers() map[string]HandlerFunc {
handlers := map[string]HandlerFunc{}
for handlerName, handler := range r.handlers {
handlers[handlerName] = handler.handlerFunc
}
return handlers
}
// DuplicateHandlerNameError is sent in a panic when you try to add a second handler with the same name.
type DuplicateHandlerNameError struct {
HandlerName string
}
func (d DuplicateHandlerNameError) Error() string {
return fmt.Sprintf("handler with name %s already exists", d.HandlerName)
}
// AddHandler adds a new handler.
//
// handlerName must be unique. For now, it is used only for debugging.
//
// subscribeTopic is a topic from which handler will receive messages.
//
// publishTopic is a topic to which router will produce messages returned by handlerFunc.
// When handler needs to publish to multiple topics,
// it is recommended to just inject Publisher to Handler or implement middleware
// which will catch messages and publish to topic based on metadata for example.
//
// If handler is added while router is already running, you need to explicitly call RunHandlers().
func (r *Router) AddHandler(
handlerName string,
subscribeTopic string,
subscriber Subscriber,
publishTopic string,
publisher Publisher,
handlerFunc HandlerFunc,
) *Handler {
r.logger.Info("Adding handler", watermill.LogFields{
"handler_name": handlerName,
"topic": subscribeTopic,
})
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
if _, ok := r.handlers[handlerName]; ok {
panic(DuplicateHandlerNameError{handlerName})
}
publisherName, subscriberName := internal.StructName(publisher), internal.StructName(subscriber)
newHandler := &handler{
name: handlerName,
logger: r.logger,
subscriber: subscriber,
subscribeTopic: subscribeTopic,
subscriberName: subscriberName,
publisher: publisher,
publishTopic: publishTopic,
publisherName: publisherName,
handlerFunc: handlerFunc,
runningHandlersWg: r.runningHandlersWg,
runningHandlersWgLock: r.runningHandlersWgLock,
messagesCh: nil,
routersCloseCh: r.closingInProgressCh,
startedCh: make(chan struct{}),
}
r.handlersWg.Add(1)
r.handlers[handlerName] = newHandler
select {
case r.handlerAdded <- struct{}{}:
default:
// closeWhenAllHandlersStopped is not always waiting for handlerAdded
}
return &Handler{
router: r,
handler: newHandler,
}
}
// AddNoPublisherHandler adds a new handler.
// This handler cannot return messages.
// When message is returned it will occur an error and Nack will be sent.
//
// handlerName must be unique. For now, it is used only for debugging.
//
// subscribeTopic is a topic from which handler will receive messages.
//
// subscriber is Subscriber from which messages will be consumed.
//
// If handler is added while router is already running, you need to explicitly call RunHandlers().
func (r *Router) AddNoPublisherHandler(
handlerName string,
subscribeTopic string,
subscriber Subscriber,
handlerFunc NoPublishHandlerFunc,
) *Handler {
handlerFuncAdapter := func(msg *Message) ([]*Message, error) {
return nil, handlerFunc(msg)
}
return r.AddHandler(handlerName, subscribeTopic, subscriber, "", disabledPublisher{}, handlerFuncAdapter)
}
// Run runs all plugins and handlers and starts subscribing to provided topics.
// This call is blocking while the router is running.
//
// When all handlers have stopped (for example, because subscriptions were closed), the router will also stop.
//
// To stop Run() you should call Close() on the router.
//
// ctx will be propagated to all subscribers.
//
// When all handlers are stopped (for example: because of closed connection), Run() will be also stopped.
func (r *Router) Run(ctx context.Context) (err error) {
if r.isRunning {
return errors.New("router is already running")
}
r.isRunning = true
ctx, cancel := context.WithCancel(ctx)
defer cancel()
r.logger.Debug("Loading plugins", nil)
for _, plugin := range r.plugins {
if err := plugin(r); err != nil {
return errors.Wrapf(err, "cannot initialize plugin %v", plugin)
}
}
if err := r.RunHandlers(ctx); err != nil {
return err
}
close(r.running)
go r.closeWhenAllHandlersStopped(ctx)
<-r.closingInProgressCh
cancel()
r.logger.Info("Waiting for messages", watermill.LogFields{
"timeout": r.config.CloseTimeout,
})
<-r.closedCh
r.logger.Info("All messages processed", nil)
return nil
}
// RunHandlers runs all handlers that were added after Run().
// RunHandlers is idempotent, so can be called multiple times safely.
func (r *Router) RunHandlers(ctx context.Context) error {
if !r.isRunning {
return errors.New("you can't call RunHandlers on non-running router")
}
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
r.logger.Info("Running router handlers", watermill.LogFields{"count": len(r.handlers)})
for name, h := range r.handlers {
name := name
h := h
if h.started {
continue
}
if err := r.decorateHandlerPublisher(h); err != nil {
return errors.Wrapf(err, "could not decorate publisher of handler %s", name)
}
if err := r.decorateHandlerSubscriber(h); err != nil {
return errors.Wrapf(err, "could not decorate subscriber of handler %s", name)
}
r.logger.Debug("Subscribing to topic", watermill.LogFields{
"subscriber_name": h.name,
"topic": h.subscribeTopic,
})
ctx, cancel := context.WithCancel(ctx)
messages, err := h.subscriber.Subscribe(ctx, h.subscribeTopic)
if err != nil {
cancel()
return errors.Wrapf(err, "cannot subscribe topic %s", h.subscribeTopic)
}
h.messagesCh = messages
h.started = true
close(h.startedCh)
h.stopFn = cancel
h.stopped = make(chan struct{})
go func() {
defer cancel()
h.run(ctx, r.middlewares)
r.handlersWg.Done()
r.logger.Info("Subscriber stopped", watermill.LogFields{
"subscriber_name": h.name,
"topic": h.subscribeTopic,
})
r.handlersLock.Lock()
delete(r.handlers, name)
r.handlersLock.Unlock()
}()
}
return nil
}
// closeWhenAllHandlersStopped closed router, when all handlers has stopped,
// because for example all subscriptions are closed.
func (r *Router) closeWhenAllHandlersStopped(ctx context.Context) {
r.handlersLock.RLock()
hasHandlers := len(r.handlers) == 0
r.handlersLock.RUnlock()
if hasHandlers {
// in that situation router will be closed immediately (even if they are no routers)
// let's wait for
select {
case <-r.handlerAdded:
// it should be some handler to track
case <-r.closedCh:
// let's avoid goroutine leak
return
}
}
r.handlersWg.Wait()
if r.IsClosed() {
// already closed
return
}
// Only log an error if the context was not canceled, but handlers were stopped.
select {
case <-ctx.Done():
default:
r.logger.Error("All handlers stopped, closing router", errors.New("all router handlers stopped"), nil)
}
if err := r.Close(); err != nil {
r.logger.Error("Cannot close router", err, nil)
}
}
// Running is closed when router is running.
// In other words: you can wait till router is running using
//
// fmt.Println("Starting router")
// go r.Run(ctx)
// <- r.Running()
// fmt.Println("Router is running")
//
// Warning: for historical reasons, this channel is not aware of router closing - the channel will be closed if the router has been running and closed.
func (r *Router) Running() chan struct{} {
return r.running
}
// IsRunning returns true when router is running.
//
// Warning: for historical reasons, this method is not aware of router closing.
// If you want to know if the router was closed, use IsClosed.
func (r *Router) IsRunning() bool {
select {
case <-r.running:
return true
default:
return false
}
}
// Close gracefully closes the router with a timeout provided in the configuration.
func (r *Router) Close() error {
r.closedLock.Lock()
defer r.closedLock.Unlock()
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
if r.closed {
return nil
}
r.closed = true
r.logger.Info("Closing router", nil)
defer r.logger.Info("Router closed", nil)
close(r.closingInProgressCh)
defer close(r.closedCh)
timeouted := r.waitForHandlers()
if timeouted {
return errors.New("router close timeout")
}
return nil
}
func (r *Router) waitForHandlers() bool {
var waitGroup sync.WaitGroup
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
r.handlersWg.Wait()
}()
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
r.runningHandlersWgLock.Lock()
defer r.runningHandlersWgLock.Unlock()
r.runningHandlersWg.Wait()
}()
return sync_internal.WaitGroupTimeout(&waitGroup, r.config.CloseTimeout)
}
func (r *Router) IsClosed() bool {
r.closedLock.Lock()
defer r.closedLock.Unlock()
return r.closed
}
type handler struct {
name string
logger watermill.LoggerAdapter
subscriber Subscriber
subscribeTopic string
subscriberName string
publisher Publisher
publishTopic string
publisherName string
handlerFunc HandlerFunc
runningHandlersWg *sync.WaitGroup
runningHandlersWgLock *sync.Mutex
messagesCh <-chan *Message
started bool
startedCh chan struct{}
stopFn context.CancelFunc
stopped chan struct{}
routersCloseCh chan struct{}
}
func (h *handler) run(ctx context.Context, middlewares []middleware) {
h.logger.Info("Starting handler", watermill.LogFields{
"subscriber_name": h.name,
"topic": h.subscribeTopic,
})
middlewareHandler := h.handlerFunc
// first added middlewares should be executed first (so should be at the top of call stack)
for i := len(middlewares) - 1; i >= 0; i-- {
currentMiddleware := middlewares[i]
isValidHandlerLevelMiddleware := currentMiddleware.HandlerName == h.name
if currentMiddleware.IsRouterLevel || isValidHandlerLevelMiddleware {
middlewareHandler = currentMiddleware.Handler(middlewareHandler)
}
}
go h.handleClose(ctx)
for msg := range h.messagesCh {
h.runningHandlersWgLock.Lock()
h.runningHandlersWg.Add(1)
h.runningHandlersWgLock.Unlock()
go h.handleMessage(msg, middlewareHandler)
}
if h.publisher != nil {
h.logger.Debug("Waiting for publisher to close", nil)
if err := h.publisher.Close(); err != nil {
h.logger.Error("Failed to close publisher", err, nil)
}
h.logger.Debug("Publisher closed", nil)
}
h.logger.Debug("Router handler stopped", nil)
close(h.stopped)
}
// Handler handles Messages.
type Handler struct {
router *Router
handler *handler
}
// AddMiddleware adds new middleware to the specified handler in the router.
//
// The order of middleware matters. Middleware added at the beginning is executed first.
func (h *Handler) AddMiddleware(m ...HandlerMiddleware) {
handler := h.handler
handler.logger.Debug("Adding middleware to handler", watermill.LogFields{
"count": fmt.Sprintf("%d", len(m)),
"handlerName": handler.name,
})
h.router.addHandlerLevelMiddleware(handler.name, m...)
}
// Started returns channel which is stopped when handler is running.
func (h *Handler) Started() chan struct{} {
return h.handler.startedCh
}
// Stop stops the handler.
// Stop is asynchronous.
// You can check if handler was stopped with Stopped() function.
func (h *Handler) Stop() {
if !h.handler.started {
panic("handler is not started")
}
h.handler.stopFn()
}
// Stopped returns channel which is stopped when handler did stop.
func (h *Handler) Stopped() chan struct{} {
return h.handler.stopped
}
// decorateHandlerPublisher applies the decorator chain to handler's publisher.
// They are applied in reverse order, so that the later decorators use the result of former ones.
func (r *Router) decorateHandlerPublisher(h *handler) error {
var err error
pub := h.publisher
for i := len(r.publisherDecorators) - 1; i >= 0; i-- {
decorator := r.publisherDecorators[i]
pub, err = decorator(pub)
if err != nil {
return errors.Wrap(err, "could not apply publisher decorator")
}
}
r.handlers[h.name].publisher = pub
return nil
}
// decorateHandlerSubscriber applies the decorator chain to handler's subscriber.
// They are applied in regular order, so that the later decorators use the result of former ones.
func (r *Router) decorateHandlerSubscriber(h *handler) error {
var err error
sub := h.subscriber
// add values to message context to subscriber
// it goes before other decorators, so that they may take advantage of these values
messageTransform := func(msg *Message) {
if msg != nil {
h.addHandlerContext(msg)
}
}
sub, err = MessageTransformSubscriberDecorator(messageTransform)(sub)
if err != nil {
return errors.Wrapf(err, "cannot wrap subscriber with context decorator")
}
for _, decorator := range r.subscriberDecorators {
sub, err = decorator(sub)
if err != nil {
return errors.Wrap(err, "could not apply subscriber decorator")
}
}
r.handlers[h.name].subscriber = sub
return nil
}
// addHandlerContext enriches the contex with values that are relevant within this handler's context.
func (h *handler) addHandlerContext(messages ...*Message) {
for i, msg := range messages {
ctx := msg.Context()
if h.name != "" {
ctx = context.WithValue(ctx, handlerNameKey, h.name)
}
if h.publisherName != "" {
ctx = context.WithValue(ctx, publisherNameKey, h.publisherName)
}
if h.subscriberName != "" {
ctx = context.WithValue(ctx, subscriberNameKey, h.subscriberName)
}
if h.subscribeTopic != "" {
ctx = context.WithValue(ctx, subscribeTopicKey, h.subscribeTopic)
}
if h.publishTopic != "" {
ctx = context.WithValue(ctx, publishTopicKey, h.publishTopic)
}
messages[i].SetContext(ctx)
}
}
func (h *handler) handleClose(ctx context.Context) {
select {
case <-h.routersCloseCh:
// for backward compatibility we are closing subscriber
h.logger.Debug("Waiting for subscriber to close", nil)
if err := h.subscriber.Close(); err != nil {
h.logger.Error("Failed to close subscriber", err, nil)
}
h.logger.Debug("Subscriber closed", nil)
case <-ctx.Done():
// we are closing subscriber just when entire router is closed
}
h.stopFn()
}
func (h *handler) handleMessage(msg *Message, handler HandlerFunc) {
defer h.runningHandlersWg.Done()
msgFields := watermill.LogFields{"message_uuid": msg.UUID}
defer func() {
if recovered := recover(); recovered != nil {
h.logger.Error(
"Panic recovered in handler. Stack: "+string(debug.Stack()),
errors.Errorf("%s", recovered),
msgFields,
)
msg.Nack()
}
}()
h.logger.Trace("Received message", msgFields)
producedMessages, err := handler(msg)
if err != nil {
h.logger.Error("Handler returned error", err, nil)
msg.Nack()
return
}
h.addHandlerContext(producedMessages...)
if err := h.publishProducedMessages(producedMessages, msgFields); err != nil {
h.logger.Error("Publishing produced messages failed", err, nil)
msg.Nack()
return
}
msg.Ack()
h.logger.Trace("Message acked", msgFields)
}
func (h *handler) publishProducedMessages(producedMessages Messages, msgFields watermill.LogFields) error {
if len(producedMessages) == 0 {
return nil
}
if h.publisher == nil {
return ErrOutputInNoPublisherHandler
}
h.logger.Trace("Sending produced messages", msgFields.Add(watermill.LogFields{
"produced_messages_count": len(producedMessages),
"publish_topic": h.publishTopic,
}))
for _, msg := range producedMessages {
if err := h.publisher.Publish(h.publishTopic, msg); err != nil {
// todo - how to deal with it better/transactional/retry?
h.logger.Error("Cannot publish message", err, msgFields.Add(watermill.LogFields{
"not_sent_message": fmt.Sprintf("%#v", producedMessages),
}))
return err
}
}
return nil
}
type disabledPublisher struct{}
func (disabledPublisher) Publish(topic string, messages ...*Message) error {
return ErrOutputInNoPublisherHandler
}
func (disabledPublisher) Close() error {
return nil
}
| {
r.logger.Debug("Adding middleware", watermill.LogFields{"count": fmt.Sprintf("%d", len(m))})
r.addRouterLevelMiddleware(m...)
} | identifier_body |
router.go | package message
import (
"context"
"fmt"
"runtime/debug"
"sync"
"time"
"github.com/pkg/errors"
"github.com/ThreeDotsLabs/watermill"
"github.com/ThreeDotsLabs/watermill/internal"
sync_internal "github.com/ThreeDotsLabs/watermill/pubsub/sync"
)
var (
// ErrOutputInNoPublisherHandler happens when a handler func returned some messages in a no-publisher handler.
// todo: maybe change the handler func signature in no-publisher handler so that there's no possibility for this
ErrOutputInNoPublisherHandler = errors.New("returned output messages in a handler without publisher")
)
// HandlerFunc is function called when message is received.
//
// msg.Ack() is called automatically when HandlerFunc doesn't return error.
// When HandlerFunc returns error, msg.Nack() is called.
// When msg.Ack() was called in handler and HandlerFunc returns error,
// msg.Nack() will be not sent because Ack was already sent.
//
// HandlerFunc's are executed parallel when multiple messages was received
// (because msg.Ack() was sent in HandlerFunc or Subscriber supports multiple consumers).
type HandlerFunc func(msg *Message) ([]*Message, error)
// NoPublishHandlerFunc is HandlerFunc alternative, which doesn't produce any messages.
type NoPublishHandlerFunc func(msg *Message) error
// PassthroughHandler is a handler that passes the message unchanged from the subscriber to the publisher.
var PassthroughHandler HandlerFunc = func(msg *Message) ([]*Message, error) {
return []*Message{msg}, nil
}
// HandlerMiddleware allows us to write something like decorators to HandlerFunc.
// It can execute something before handler (for example: modify consumed message)
// or after (modify produced messages, ack/nack on consumed message, handle errors, logging, etc.).
//
// It can be attached to the router by using `AddMiddleware` method.
//
// Example:
//
// func ExampleMiddleware(h message.HandlerFunc) message.HandlerFunc {
// return func(message *message.Message) ([]*message.Message, error) {
// fmt.Println("executed before handler")
// producedMessages, err := h(message)
// fmt.Println("executed after handler")
//
// return producedMessages, err
// }
// }
type HandlerMiddleware func(h HandlerFunc) HandlerFunc
// RouterPlugin is function which is executed on Router start.
type RouterPlugin func(*Router) error
// PublisherDecorator wraps the underlying Publisher, adding some functionality.
type PublisherDecorator func(pub Publisher) (Publisher, error)
// SubscriberDecorator wraps the underlying Subscriber, adding some functionality.
type SubscriberDecorator func(sub Subscriber) (Subscriber, error)
// RouterConfig holds the Router's configuration options.
type RouterConfig struct {
// CloseTimeout determines how long router should work for handlers when closing.
CloseTimeout time.Duration
}
func (c *RouterConfig) setDefaults() {
if c.CloseTimeout == 0 {
c.CloseTimeout = time.Second * 30
}
}
// Validate returns Router configuration error, if any.
func (c RouterConfig) Validate() error {
return nil
}
// NewRouter creates a new Router with given configuration.
func NewRouter(config RouterConfig, logger watermill.LoggerAdapter) (*Router, error) {
config.setDefaults()
if err := config.Validate(); err != nil {
return nil, errors.Wrap(err, "invalid config")
}
if logger == nil {
logger = watermill.NopLogger{}
}
return &Router{
config: config,
handlers: map[string]*handler{},
handlersWg: &sync.WaitGroup{},
runningHandlersWg: &sync.WaitGroup{},
runningHandlersWgLock: &sync.Mutex{},
handlerAdded: make(chan struct{}),
handlersLock: &sync.RWMutex{},
closingInProgressCh: make(chan struct{}),
closedCh: make(chan struct{}),
logger: logger,
running: make(chan struct{}),
}, nil
}
type middleware struct {
Handler HandlerMiddleware
HandlerName string
IsRouterLevel bool
}
// Router is responsible for handling messages from subscribers using provided handler functions.
//
// If the handler function returns a message, the message is published with the publisher.
// You can use middlewares to wrap handlers with common logic like logging, instrumentation, etc.
type Router struct {
config RouterConfig
middlewares []middleware
plugins []RouterPlugin
handlers map[string]*handler
handlersLock *sync.RWMutex
handlersWg *sync.WaitGroup
runningHandlersWg *sync.WaitGroup
runningHandlersWgLock *sync.Mutex
handlerAdded chan struct{}
closingInProgressCh chan struct{}
closedCh chan struct{}
closed bool
closedLock sync.Mutex
logger watermill.LoggerAdapter
publisherDecorators []PublisherDecorator
subscriberDecorators []SubscriberDecorator
isRunning bool
running chan struct{}
}
// Logger returns the Router's logger.
func (r *Router) Logger() watermill.LoggerAdapter {
return r.logger
}
// AddMiddleware adds a new middleware to the router.
//
// The order of middleware matters. Middleware added at the beginning is executed first.
func (r *Router) AddMiddleware(m ...HandlerMiddleware) {
r.logger.Debug("Adding middleware", watermill.LogFields{"count": fmt.Sprintf("%d", len(m))})
r.addRouterLevelMiddleware(m...)
}
func (r *Router) addRouterLevelMiddleware(m ...HandlerMiddleware) {
for _, handlerMiddleware := range m {
middleware := middleware{
Handler: handlerMiddleware,
HandlerName: "",
IsRouterLevel: true,
}
r.middlewares = append(r.middlewares, middleware)
}
}
func (r *Router) addHandlerLevelMiddleware(handlerName string, m ...HandlerMiddleware) {
for _, handlerMiddleware := range m {
middleware := middleware{
Handler: handlerMiddleware,
HandlerName: handlerName,
IsRouterLevel: false,
}
r.middlewares = append(r.middlewares, middleware)
}
}
// AddPlugin adds a new plugin to the router.
// Plugins are executed during startup of the router.
//
// A plugin can, for example, close the router after SIGINT or SIGTERM is sent to the process (SignalsHandler plugin).
func (r *Router) AddPlugin(p ...RouterPlugin) {
r.logger.Debug("Adding plugins", watermill.LogFields{"count": fmt.Sprintf("%d", len(p))})
r.plugins = append(r.plugins, p...)
}
// AddPublisherDecorators wraps the router's Publisher.
// The first decorator is the innermost, i.e. calls the original publisher.
func (r *Router) AddPublisherDecorators(dec ...PublisherDecorator) {
r.logger.Debug("Adding publisher decorators", watermill.LogFields{"count": fmt.Sprintf("%d", len(dec))})
r.publisherDecorators = append(r.publisherDecorators, dec...)
}
// AddSubscriberDecorators wraps the router's Subscriber.
// The first decorator is the innermost, i.e. calls the original subscriber.
func (r *Router) AddSubscriberDecorators(dec ...SubscriberDecorator) {
r.logger.Debug("Adding subscriber decorators", watermill.LogFields{"count": fmt.Sprintf("%d", len(dec))})
r.subscriberDecorators = append(r.subscriberDecorators, dec...)
}
// Handlers returns all registered handlers.
func (r *Router) Handlers() map[string]HandlerFunc {
handlers := map[string]HandlerFunc{}
for handlerName, handler := range r.handlers {
handlers[handlerName] = handler.handlerFunc
}
return handlers
}
// DuplicateHandlerNameError is sent in a panic when you try to add a second handler with the same name.
type DuplicateHandlerNameError struct {
HandlerName string
}
func (d DuplicateHandlerNameError) Error() string {
return fmt.Sprintf("handler with name %s already exists", d.HandlerName)
}
// AddHandler adds a new handler.
//
// handlerName must be unique. For now, it is used only for debugging.
//
// subscribeTopic is a topic from which handler will receive messages.
//
// publishTopic is a topic to which router will produce messages returned by handlerFunc.
// When handler needs to publish to multiple topics,
// it is recommended to just inject Publisher to Handler or implement middleware
// which will catch messages and publish to topic based on metadata for example.
//
// If handler is added while router is already running, you need to explicitly call RunHandlers().
func (r *Router) AddHandler(
handlerName string,
subscribeTopic string,
subscriber Subscriber,
publishTopic string,
publisher Publisher,
handlerFunc HandlerFunc,
) *Handler {
r.logger.Info("Adding handler", watermill.LogFields{
"handler_name": handlerName,
"topic": subscribeTopic,
})
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
if _, ok := r.handlers[handlerName]; ok {
panic(DuplicateHandlerNameError{handlerName})
}
publisherName, subscriberName := internal.StructName(publisher), internal.StructName(subscriber)
newHandler := &handler{
name: handlerName,
logger: r.logger,
subscriber: subscriber,
subscribeTopic: subscribeTopic,
subscriberName: subscriberName,
publisher: publisher,
publishTopic: publishTopic,
publisherName: publisherName,
handlerFunc: handlerFunc,
runningHandlersWg: r.runningHandlersWg,
runningHandlersWgLock: r.runningHandlersWgLock,
messagesCh: nil,
routersCloseCh: r.closingInProgressCh,
startedCh: make(chan struct{}),
}
r.handlersWg.Add(1)
r.handlers[handlerName] = newHandler
select {
case r.handlerAdded <- struct{}{}:
default:
// closeWhenAllHandlersStopped is not always waiting for handlerAdded
}
return &Handler{
router: r,
handler: newHandler,
}
}
// AddNoPublisherHandler adds a new handler.
// This handler cannot return messages.
// When message is returned it will occur an error and Nack will be sent.
//
// handlerName must be unique. For now, it is used only for debugging.
//
// subscribeTopic is a topic from which handler will receive messages.
//
// subscriber is Subscriber from which messages will be consumed.
//
// If handler is added while router is already running, you need to explicitly call RunHandlers().
func (r *Router) AddNoPublisherHandler(
handlerName string,
subscribeTopic string,
subscriber Subscriber,
handlerFunc NoPublishHandlerFunc,
) *Handler {
handlerFuncAdapter := func(msg *Message) ([]*Message, error) {
return nil, handlerFunc(msg)
}
return r.AddHandler(handlerName, subscribeTopic, subscriber, "", disabledPublisher{}, handlerFuncAdapter)
}
// Run runs all plugins and handlers and starts subscribing to provided topics.
// This call is blocking while the router is running.
//
// When all handlers have stopped (for example, because subscriptions were closed), the router will also stop.
//
// To stop Run() you should call Close() on the router.
//
// ctx will be propagated to all subscribers.
//
// When all handlers are stopped (for example: because of closed connection), Run() will be also stopped.
func (r *Router) Run(ctx context.Context) (err error) {
if r.isRunning {
return errors.New("router is already running")
}
r.isRunning = true
ctx, cancel := context.WithCancel(ctx)
defer cancel()
r.logger.Debug("Loading plugins", nil)
for _, plugin := range r.plugins {
if err := plugin(r); err != nil {
return errors.Wrapf(err, "cannot initialize plugin %v", plugin)
}
}
if err := r.RunHandlers(ctx); err != nil {
return err
}
close(r.running)
go r.closeWhenAllHandlersStopped(ctx)
<-r.closingInProgressCh
cancel()
r.logger.Info("Waiting for messages", watermill.LogFields{
"timeout": r.config.CloseTimeout,
})
<-r.closedCh
r.logger.Info("All messages processed", nil)
return nil
}
// RunHandlers runs all handlers that were added after Run().
// RunHandlers is idempotent, so can be called multiple times safely.
func (r *Router) RunHandlers(ctx context.Context) error {
if !r.isRunning {
return errors.New("you can't call RunHandlers on non-running router")
}
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
r.logger.Info("Running router handlers", watermill.LogFields{"count": len(r.handlers)})
for name, h := range r.handlers {
name := name
h := h
if h.started {
continue
}
if err := r.decorateHandlerPublisher(h); err != nil {
return errors.Wrapf(err, "could not decorate publisher of handler %s", name)
}
if err := r.decorateHandlerSubscriber(h); err != nil {
return errors.Wrapf(err, "could not decorate subscriber of handler %s", name)
}
r.logger.Debug("Subscribing to topic", watermill.LogFields{
"subscriber_name": h.name,
"topic": h.subscribeTopic,
})
ctx, cancel := context.WithCancel(ctx)
messages, err := h.subscriber.Subscribe(ctx, h.subscribeTopic)
if err != nil {
cancel()
return errors.Wrapf(err, "cannot subscribe topic %s", h.subscribeTopic)
}
h.messagesCh = messages
h.started = true
close(h.startedCh)
h.stopFn = cancel
h.stopped = make(chan struct{})
go func() {
defer cancel()
h.run(ctx, r.middlewares)
r.handlersWg.Done()
r.logger.Info("Subscriber stopped", watermill.LogFields{
"subscriber_name": h.name,
"topic": h.subscribeTopic,
})
r.handlersLock.Lock()
delete(r.handlers, name)
r.handlersLock.Unlock()
}()
}
return nil
}
// closeWhenAllHandlersStopped closed router, when all handlers has stopped,
// because for example all subscriptions are closed.
func (r *Router) closeWhenAllHandlersStopped(ctx context.Context) {
r.handlersLock.RLock()
hasHandlers := len(r.handlers) == 0
r.handlersLock.RUnlock()
if hasHandlers {
// in that situation router will be closed immediately (even if they are no routers)
// let's wait for
select {
case <-r.handlerAdded:
// it should be some handler to track
case <-r.closedCh:
// let's avoid goroutine leak
return
}
}
r.handlersWg.Wait()
if r.IsClosed() {
// already closed
return
}
// Only log an error if the context was not canceled, but handlers were stopped.
select {
case <-ctx.Done():
default:
r.logger.Error("All handlers stopped, closing router", errors.New("all router handlers stopped"), nil)
}
if err := r.Close(); err != nil {
r.logger.Error("Cannot close router", err, nil)
}
}
// Running is closed when router is running.
// In other words: you can wait till router is running using
//
// fmt.Println("Starting router")
// go r.Run(ctx)
// <- r.Running()
// fmt.Println("Router is running")
//
// Warning: for historical reasons, this channel is not aware of router closing - the channel will be closed if the router has been running and closed.
func (r *Router) Running() chan struct{} {
return r.running
}
// IsRunning returns true when router is running.
//
// Warning: for historical reasons, this method is not aware of router closing.
// If you want to know if the router was closed, use IsClosed.
func (r *Router) IsRunning() bool {
select {
case <-r.running:
return true
default:
return false
}
}
// Close gracefully closes the router with a timeout provided in the configuration.
func (r *Router) Close() error {
r.closedLock.Lock()
defer r.closedLock.Unlock()
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
if r.closed {
return nil
}
r.closed = true
r.logger.Info("Closing router", nil)
defer r.logger.Info("Router closed", nil)
close(r.closingInProgressCh)
defer close(r.closedCh)
timeouted := r.waitForHandlers()
if timeouted {
return errors.New("router close timeout")
}
return nil
}
func (r *Router) waitForHandlers() bool {
var waitGroup sync.WaitGroup
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
r.handlersWg.Wait()
}()
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
r.runningHandlersWgLock.Lock()
defer r.runningHandlersWgLock.Unlock()
r.runningHandlersWg.Wait()
}()
return sync_internal.WaitGroupTimeout(&waitGroup, r.config.CloseTimeout)
}
func (r *Router) IsClosed() bool {
r.closedLock.Lock()
defer r.closedLock.Unlock()
return r.closed
}
type handler struct {
name string
logger watermill.LoggerAdapter
subscriber Subscriber
subscribeTopic string
subscriberName string
publisher Publisher
publishTopic string
publisherName string
handlerFunc HandlerFunc
runningHandlersWg *sync.WaitGroup
runningHandlersWgLock *sync.Mutex
messagesCh <-chan *Message
started bool
startedCh chan struct{}
stopFn context.CancelFunc
stopped chan struct{}
routersCloseCh chan struct{}
}
func (h *handler) run(ctx context.Context, middlewares []middleware) {
h.logger.Info("Starting handler", watermill.LogFields{
"subscriber_name": h.name,
"topic": h.subscribeTopic,
})
middlewareHandler := h.handlerFunc
// first added middlewares should be executed first (so should be at the top of call stack)
for i := len(middlewares) - 1; i >= 0; i-- {
currentMiddleware := middlewares[i]
isValidHandlerLevelMiddleware := currentMiddleware.HandlerName == h.name
if currentMiddleware.IsRouterLevel || isValidHandlerLevelMiddleware {
middlewareHandler = currentMiddleware.Handler(middlewareHandler)
}
}
go h.handleClose(ctx)
for msg := range h.messagesCh {
h.runningHandlersWgLock.Lock()
h.runningHandlersWg.Add(1)
h.runningHandlersWgLock.Unlock()
go h.handleMessage(msg, middlewareHandler)
}
if h.publisher != nil {
h.logger.Debug("Waiting for publisher to close", nil)
if err := h.publisher.Close(); err != nil {
h.logger.Error("Failed to close publisher", err, nil)
}
h.logger.Debug("Publisher closed", nil)
}
h.logger.Debug("Router handler stopped", nil)
close(h.stopped)
}
// Handler handles Messages.
type Handler struct {
router *Router
handler *handler
}
// AddMiddleware adds new middleware to the specified handler in the router.
//
// The order of middleware matters. Middleware added at the beginning is executed first.
func (h *Handler) AddMiddleware(m ...HandlerMiddleware) {
handler := h.handler
handler.logger.Debug("Adding middleware to handler", watermill.LogFields{
"count": fmt.Sprintf("%d", len(m)),
"handlerName": handler.name,
})
h.router.addHandlerLevelMiddleware(handler.name, m...)
}
// Started returns channel which is stopped when handler is running.
func (h *Handler) Started() chan struct{} {
return h.handler.startedCh
}
// Stop stops the handler.
// Stop is asynchronous.
// You can check if handler was stopped with Stopped() function.
func (h *Handler) Stop() {
if !h.handler.started {
panic("handler is not started")
}
h.handler.stopFn()
}
// Stopped returns channel which is stopped when handler did stop.
func (h *Handler) | () chan struct{} {
return h.handler.stopped
}
// decorateHandlerPublisher applies the decorator chain to handler's publisher.
// They are applied in reverse order, so that the later decorators use the result of former ones.
func (r *Router) decorateHandlerPublisher(h *handler) error {
var err error
pub := h.publisher
for i := len(r.publisherDecorators) - 1; i >= 0; i-- {
decorator := r.publisherDecorators[i]
pub, err = decorator(pub)
if err != nil {
return errors.Wrap(err, "could not apply publisher decorator")
}
}
r.handlers[h.name].publisher = pub
return nil
}
// decorateHandlerSubscriber applies the decorator chain to handler's subscriber.
// They are applied in regular order, so that the later decorators use the result of former ones.
func (r *Router) decorateHandlerSubscriber(h *handler) error {
var err error
sub := h.subscriber
// add values to message context to subscriber
// it goes before other decorators, so that they may take advantage of these values
messageTransform := func(msg *Message) {
if msg != nil {
h.addHandlerContext(msg)
}
}
sub, err = MessageTransformSubscriberDecorator(messageTransform)(sub)
if err != nil {
return errors.Wrapf(err, "cannot wrap subscriber with context decorator")
}
for _, decorator := range r.subscriberDecorators {
sub, err = decorator(sub)
if err != nil {
return errors.Wrap(err, "could not apply subscriber decorator")
}
}
r.handlers[h.name].subscriber = sub
return nil
}
// addHandlerContext enriches the contex with values that are relevant within this handler's context.
func (h *handler) addHandlerContext(messages ...*Message) {
for i, msg := range messages {
ctx := msg.Context()
if h.name != "" {
ctx = context.WithValue(ctx, handlerNameKey, h.name)
}
if h.publisherName != "" {
ctx = context.WithValue(ctx, publisherNameKey, h.publisherName)
}
if h.subscriberName != "" {
ctx = context.WithValue(ctx, subscriberNameKey, h.subscriberName)
}
if h.subscribeTopic != "" {
ctx = context.WithValue(ctx, subscribeTopicKey, h.subscribeTopic)
}
if h.publishTopic != "" {
ctx = context.WithValue(ctx, publishTopicKey, h.publishTopic)
}
messages[i].SetContext(ctx)
}
}
func (h *handler) handleClose(ctx context.Context) {
select {
case <-h.routersCloseCh:
// for backward compatibility we are closing subscriber
h.logger.Debug("Waiting for subscriber to close", nil)
if err := h.subscriber.Close(); err != nil {
h.logger.Error("Failed to close subscriber", err, nil)
}
h.logger.Debug("Subscriber closed", nil)
case <-ctx.Done():
// we are closing subscriber just when entire router is closed
}
h.stopFn()
}
func (h *handler) handleMessage(msg *Message, handler HandlerFunc) {
defer h.runningHandlersWg.Done()
msgFields := watermill.LogFields{"message_uuid": msg.UUID}
defer func() {
if recovered := recover(); recovered != nil {
h.logger.Error(
"Panic recovered in handler. Stack: "+string(debug.Stack()),
errors.Errorf("%s", recovered),
msgFields,
)
msg.Nack()
}
}()
h.logger.Trace("Received message", msgFields)
producedMessages, err := handler(msg)
if err != nil {
h.logger.Error("Handler returned error", err, nil)
msg.Nack()
return
}
h.addHandlerContext(producedMessages...)
if err := h.publishProducedMessages(producedMessages, msgFields); err != nil {
h.logger.Error("Publishing produced messages failed", err, nil)
msg.Nack()
return
}
msg.Ack()
h.logger.Trace("Message acked", msgFields)
}
func (h *handler) publishProducedMessages(producedMessages Messages, msgFields watermill.LogFields) error {
if len(producedMessages) == 0 {
return nil
}
if h.publisher == nil {
return ErrOutputInNoPublisherHandler
}
h.logger.Trace("Sending produced messages", msgFields.Add(watermill.LogFields{
"produced_messages_count": len(producedMessages),
"publish_topic": h.publishTopic,
}))
for _, msg := range producedMessages {
if err := h.publisher.Publish(h.publishTopic, msg); err != nil {
// todo - how to deal with it better/transactional/retry?
h.logger.Error("Cannot publish message", err, msgFields.Add(watermill.LogFields{
"not_sent_message": fmt.Sprintf("%#v", producedMessages),
}))
return err
}
}
return nil
}
type disabledPublisher struct{}
func (disabledPublisher) Publish(topic string, messages ...*Message) error {
return ErrOutputInNoPublisherHandler
}
func (disabledPublisher) Close() error {
return nil
}
| Stopped | identifier_name |
router.go | package message
import (
"context"
"fmt"
"runtime/debug"
"sync"
"time"
"github.com/pkg/errors"
"github.com/ThreeDotsLabs/watermill"
"github.com/ThreeDotsLabs/watermill/internal"
sync_internal "github.com/ThreeDotsLabs/watermill/pubsub/sync"
)
var (
// ErrOutputInNoPublisherHandler happens when a handler func returned some messages in a no-publisher handler.
// todo: maybe change the handler func signature in no-publisher handler so that there's no possibility for this
ErrOutputInNoPublisherHandler = errors.New("returned output messages in a handler without publisher")
)
// HandlerFunc is function called when message is received.
//
// msg.Ack() is called automatically when HandlerFunc doesn't return error.
// When HandlerFunc returns error, msg.Nack() is called.
// When msg.Ack() was called in handler and HandlerFunc returns error,
// msg.Nack() will be not sent because Ack was already sent.
//
// HandlerFunc's are executed parallel when multiple messages was received
// (because msg.Ack() was sent in HandlerFunc or Subscriber supports multiple consumers).
type HandlerFunc func(msg *Message) ([]*Message, error)
// NoPublishHandlerFunc is HandlerFunc alternative, which doesn't produce any messages.
type NoPublishHandlerFunc func(msg *Message) error
// PassthroughHandler is a handler that passes the message unchanged from the subscriber to the publisher.
var PassthroughHandler HandlerFunc = func(msg *Message) ([]*Message, error) {
return []*Message{msg}, nil
}
// HandlerMiddleware allows us to write something like decorators to HandlerFunc.
// It can execute something before handler (for example: modify consumed message)
// or after (modify produced messages, ack/nack on consumed message, handle errors, logging, etc.).
//
// It can be attached to the router by using `AddMiddleware` method.
//
// Example:
//
// func ExampleMiddleware(h message.HandlerFunc) message.HandlerFunc {
// return func(message *message.Message) ([]*message.Message, error) {
// fmt.Println("executed before handler")
// producedMessages, err := h(message)
// fmt.Println("executed after handler")
//
// return producedMessages, err
// }
// }
type HandlerMiddleware func(h HandlerFunc) HandlerFunc
// RouterPlugin is function which is executed on Router start.
type RouterPlugin func(*Router) error
// PublisherDecorator wraps the underlying Publisher, adding some functionality.
type PublisherDecorator func(pub Publisher) (Publisher, error)
// SubscriberDecorator wraps the underlying Subscriber, adding some functionality.
type SubscriberDecorator func(sub Subscriber) (Subscriber, error)
// RouterConfig holds the Router's configuration options.
type RouterConfig struct {
// CloseTimeout determines how long router should work for handlers when closing.
CloseTimeout time.Duration
}
func (c *RouterConfig) setDefaults() {
if c.CloseTimeout == 0 {
c.CloseTimeout = time.Second * 30
}
}
// Validate returns Router configuration error, if any.
func (c RouterConfig) Validate() error {
return nil
}
// NewRouter creates a new Router with given configuration.
func NewRouter(config RouterConfig, logger watermill.LoggerAdapter) (*Router, error) {
config.setDefaults()
if err := config.Validate(); err != nil {
return nil, errors.Wrap(err, "invalid config")
}
if logger == nil {
logger = watermill.NopLogger{}
}
return &Router{
config: config,
handlers: map[string]*handler{},
handlersWg: &sync.WaitGroup{},
runningHandlersWg: &sync.WaitGroup{},
runningHandlersWgLock: &sync.Mutex{},
handlerAdded: make(chan struct{}),
handlersLock: &sync.RWMutex{},
closingInProgressCh: make(chan struct{}),
closedCh: make(chan struct{}),
logger: logger,
running: make(chan struct{}),
}, nil
}
type middleware struct {
Handler HandlerMiddleware
HandlerName string
IsRouterLevel bool
}
// Router is responsible for handling messages from subscribers using provided handler functions.
//
// If the handler function returns a message, the message is published with the publisher.
// You can use middlewares to wrap handlers with common logic like logging, instrumentation, etc.
type Router struct {
config RouterConfig
middlewares []middleware
plugins []RouterPlugin
handlers map[string]*handler
handlersLock *sync.RWMutex
handlersWg *sync.WaitGroup
runningHandlersWg *sync.WaitGroup
runningHandlersWgLock *sync.Mutex
handlerAdded chan struct{}
closingInProgressCh chan struct{}
closedCh chan struct{}
closed bool
closedLock sync.Mutex
logger watermill.LoggerAdapter
publisherDecorators []PublisherDecorator
subscriberDecorators []SubscriberDecorator
isRunning bool
running chan struct{}
}
// Logger returns the Router's logger.
func (r *Router) Logger() watermill.LoggerAdapter {
return r.logger
}
// AddMiddleware adds a new middleware to the router.
//
// The order of middleware matters. Middleware added at the beginning is executed first.
func (r *Router) AddMiddleware(m ...HandlerMiddleware) {
r.logger.Debug("Adding middleware", watermill.LogFields{"count": fmt.Sprintf("%d", len(m))})
r.addRouterLevelMiddleware(m...)
}
func (r *Router) addRouterLevelMiddleware(m ...HandlerMiddleware) {
for _, handlerMiddleware := range m {
middleware := middleware{
Handler: handlerMiddleware,
HandlerName: "",
IsRouterLevel: true,
}
r.middlewares = append(r.middlewares, middleware)
}
}
func (r *Router) addHandlerLevelMiddleware(handlerName string, m ...HandlerMiddleware) {
for _, handlerMiddleware := range m {
middleware := middleware{
Handler: handlerMiddleware,
HandlerName: handlerName,
IsRouterLevel: false,
}
r.middlewares = append(r.middlewares, middleware)
}
}
// AddPlugin adds a new plugin to the router.
// Plugins are executed during startup of the router.
//
// A plugin can, for example, close the router after SIGINT or SIGTERM is sent to the process (SignalsHandler plugin).
func (r *Router) AddPlugin(p ...RouterPlugin) {
r.logger.Debug("Adding plugins", watermill.LogFields{"count": fmt.Sprintf("%d", len(p))})
r.plugins = append(r.plugins, p...)
}
// AddPublisherDecorators wraps the router's Publisher.
// The first decorator is the innermost, i.e. calls the original publisher.
func (r *Router) AddPublisherDecorators(dec ...PublisherDecorator) {
r.logger.Debug("Adding publisher decorators", watermill.LogFields{"count": fmt.Sprintf("%d", len(dec))})
r.publisherDecorators = append(r.publisherDecorators, dec...)
}
// AddSubscriberDecorators wraps the router's Subscriber.
// The first decorator is the innermost, i.e. calls the original subscriber.
func (r *Router) AddSubscriberDecorators(dec ...SubscriberDecorator) {
r.logger.Debug("Adding subscriber decorators", watermill.LogFields{"count": fmt.Sprintf("%d", len(dec))}) | // Handlers returns all registered handlers.
func (r *Router) Handlers() map[string]HandlerFunc {
handlers := map[string]HandlerFunc{}
for handlerName, handler := range r.handlers {
handlers[handlerName] = handler.handlerFunc
}
return handlers
}
// DuplicateHandlerNameError is sent in a panic when you try to add a second handler with the same name.
type DuplicateHandlerNameError struct {
HandlerName string
}
func (d DuplicateHandlerNameError) Error() string {
return fmt.Sprintf("handler with name %s already exists", d.HandlerName)
}
// AddHandler adds a new handler.
//
// handlerName must be unique. For now, it is used only for debugging.
//
// subscribeTopic is a topic from which handler will receive messages.
//
// publishTopic is a topic to which router will produce messages returned by handlerFunc.
// When handler needs to publish to multiple topics,
// it is recommended to just inject Publisher to Handler or implement middleware
// which will catch messages and publish to topic based on metadata for example.
//
// If handler is added while router is already running, you need to explicitly call RunHandlers().
func (r *Router) AddHandler(
handlerName string,
subscribeTopic string,
subscriber Subscriber,
publishTopic string,
publisher Publisher,
handlerFunc HandlerFunc,
) *Handler {
r.logger.Info("Adding handler", watermill.LogFields{
"handler_name": handlerName,
"topic": subscribeTopic,
})
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
if _, ok := r.handlers[handlerName]; ok {
panic(DuplicateHandlerNameError{handlerName})
}
publisherName, subscriberName := internal.StructName(publisher), internal.StructName(subscriber)
newHandler := &handler{
name: handlerName,
logger: r.logger,
subscriber: subscriber,
subscribeTopic: subscribeTopic,
subscriberName: subscriberName,
publisher: publisher,
publishTopic: publishTopic,
publisherName: publisherName,
handlerFunc: handlerFunc,
runningHandlersWg: r.runningHandlersWg,
runningHandlersWgLock: r.runningHandlersWgLock,
messagesCh: nil,
routersCloseCh: r.closingInProgressCh,
startedCh: make(chan struct{}),
}
r.handlersWg.Add(1)
r.handlers[handlerName] = newHandler
select {
case r.handlerAdded <- struct{}{}:
default:
// closeWhenAllHandlersStopped is not always waiting for handlerAdded
}
return &Handler{
router: r,
handler: newHandler,
}
}
// AddNoPublisherHandler adds a new handler.
// This handler cannot return messages.
// When message is returned it will occur an error and Nack will be sent.
//
// handlerName must be unique. For now, it is used only for debugging.
//
// subscribeTopic is a topic from which handler will receive messages.
//
// subscriber is Subscriber from which messages will be consumed.
//
// If handler is added while router is already running, you need to explicitly call RunHandlers().
func (r *Router) AddNoPublisherHandler(
handlerName string,
subscribeTopic string,
subscriber Subscriber,
handlerFunc NoPublishHandlerFunc,
) *Handler {
handlerFuncAdapter := func(msg *Message) ([]*Message, error) {
return nil, handlerFunc(msg)
}
return r.AddHandler(handlerName, subscribeTopic, subscriber, "", disabledPublisher{}, handlerFuncAdapter)
}
// Run runs all plugins and handlers and starts subscribing to provided topics.
// This call is blocking while the router is running.
//
// When all handlers have stopped (for example, because subscriptions were closed), the router will also stop.
//
// To stop Run() you should call Close() on the router.
//
// ctx will be propagated to all subscribers.
//
// When all handlers are stopped (for example: because of closed connection), Run() will be also stopped.
func (r *Router) Run(ctx context.Context) (err error) {
if r.isRunning {
return errors.New("router is already running")
}
r.isRunning = true
ctx, cancel := context.WithCancel(ctx)
defer cancel()
r.logger.Debug("Loading plugins", nil)
for _, plugin := range r.plugins {
if err := plugin(r); err != nil {
return errors.Wrapf(err, "cannot initialize plugin %v", plugin)
}
}
if err := r.RunHandlers(ctx); err != nil {
return err
}
close(r.running)
go r.closeWhenAllHandlersStopped(ctx)
<-r.closingInProgressCh
cancel()
r.logger.Info("Waiting for messages", watermill.LogFields{
"timeout": r.config.CloseTimeout,
})
<-r.closedCh
r.logger.Info("All messages processed", nil)
return nil
}
// RunHandlers runs all handlers that were added after Run().
// RunHandlers is idempotent, so can be called multiple times safely.
func (r *Router) RunHandlers(ctx context.Context) error {
if !r.isRunning {
return errors.New("you can't call RunHandlers on non-running router")
}
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
r.logger.Info("Running router handlers", watermill.LogFields{"count": len(r.handlers)})
for name, h := range r.handlers {
name := name
h := h
if h.started {
continue
}
if err := r.decorateHandlerPublisher(h); err != nil {
return errors.Wrapf(err, "could not decorate publisher of handler %s", name)
}
if err := r.decorateHandlerSubscriber(h); err != nil {
return errors.Wrapf(err, "could not decorate subscriber of handler %s", name)
}
r.logger.Debug("Subscribing to topic", watermill.LogFields{
"subscriber_name": h.name,
"topic": h.subscribeTopic,
})
ctx, cancel := context.WithCancel(ctx)
messages, err := h.subscriber.Subscribe(ctx, h.subscribeTopic)
if err != nil {
cancel()
return errors.Wrapf(err, "cannot subscribe topic %s", h.subscribeTopic)
}
h.messagesCh = messages
h.started = true
close(h.startedCh)
h.stopFn = cancel
h.stopped = make(chan struct{})
go func() {
defer cancel()
h.run(ctx, r.middlewares)
r.handlersWg.Done()
r.logger.Info("Subscriber stopped", watermill.LogFields{
"subscriber_name": h.name,
"topic": h.subscribeTopic,
})
r.handlersLock.Lock()
delete(r.handlers, name)
r.handlersLock.Unlock()
}()
}
return nil
}
// closeWhenAllHandlersStopped closed router, when all handlers has stopped,
// because for example all subscriptions are closed.
func (r *Router) closeWhenAllHandlersStopped(ctx context.Context) {
r.handlersLock.RLock()
hasHandlers := len(r.handlers) == 0
r.handlersLock.RUnlock()
if hasHandlers {
// in that situation router will be closed immediately (even if they are no routers)
// let's wait for
select {
case <-r.handlerAdded:
// it should be some handler to track
case <-r.closedCh:
// let's avoid goroutine leak
return
}
}
r.handlersWg.Wait()
if r.IsClosed() {
// already closed
return
}
// Only log an error if the context was not canceled, but handlers were stopped.
select {
case <-ctx.Done():
default:
r.logger.Error("All handlers stopped, closing router", errors.New("all router handlers stopped"), nil)
}
if err := r.Close(); err != nil {
r.logger.Error("Cannot close router", err, nil)
}
}
// Running is closed when router is running.
// In other words: you can wait till router is running using
//
// fmt.Println("Starting router")
// go r.Run(ctx)
// <- r.Running()
// fmt.Println("Router is running")
//
// Warning: for historical reasons, this channel is not aware of router closing - the channel will be closed if the router has been running and closed.
func (r *Router) Running() chan struct{} {
return r.running
}
// IsRunning returns true when router is running.
//
// Warning: for historical reasons, this method is not aware of router closing.
// If you want to know if the router was closed, use IsClosed.
func (r *Router) IsRunning() bool {
select {
case <-r.running:
return true
default:
return false
}
}
// Close gracefully closes the router with a timeout provided in the configuration.
func (r *Router) Close() error {
r.closedLock.Lock()
defer r.closedLock.Unlock()
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
if r.closed {
return nil
}
r.closed = true
r.logger.Info("Closing router", nil)
defer r.logger.Info("Router closed", nil)
close(r.closingInProgressCh)
defer close(r.closedCh)
timeouted := r.waitForHandlers()
if timeouted {
return errors.New("router close timeout")
}
return nil
}
func (r *Router) waitForHandlers() bool {
var waitGroup sync.WaitGroup
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
r.handlersWg.Wait()
}()
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
r.runningHandlersWgLock.Lock()
defer r.runningHandlersWgLock.Unlock()
r.runningHandlersWg.Wait()
}()
return sync_internal.WaitGroupTimeout(&waitGroup, r.config.CloseTimeout)
}
func (r *Router) IsClosed() bool {
r.closedLock.Lock()
defer r.closedLock.Unlock()
return r.closed
}
type handler struct {
name string
logger watermill.LoggerAdapter
subscriber Subscriber
subscribeTopic string
subscriberName string
publisher Publisher
publishTopic string
publisherName string
handlerFunc HandlerFunc
runningHandlersWg *sync.WaitGroup
runningHandlersWgLock *sync.Mutex
messagesCh <-chan *Message
started bool
startedCh chan struct{}
stopFn context.CancelFunc
stopped chan struct{}
routersCloseCh chan struct{}
}
func (h *handler) run(ctx context.Context, middlewares []middleware) {
h.logger.Info("Starting handler", watermill.LogFields{
"subscriber_name": h.name,
"topic": h.subscribeTopic,
})
middlewareHandler := h.handlerFunc
// first added middlewares should be executed first (so should be at the top of call stack)
for i := len(middlewares) - 1; i >= 0; i-- {
currentMiddleware := middlewares[i]
isValidHandlerLevelMiddleware := currentMiddleware.HandlerName == h.name
if currentMiddleware.IsRouterLevel || isValidHandlerLevelMiddleware {
middlewareHandler = currentMiddleware.Handler(middlewareHandler)
}
}
go h.handleClose(ctx)
for msg := range h.messagesCh {
h.runningHandlersWgLock.Lock()
h.runningHandlersWg.Add(1)
h.runningHandlersWgLock.Unlock()
go h.handleMessage(msg, middlewareHandler)
}
if h.publisher != nil {
h.logger.Debug("Waiting for publisher to close", nil)
if err := h.publisher.Close(); err != nil {
h.logger.Error("Failed to close publisher", err, nil)
}
h.logger.Debug("Publisher closed", nil)
}
h.logger.Debug("Router handler stopped", nil)
close(h.stopped)
}
// Handler handles Messages.
type Handler struct {
router *Router
handler *handler
}
// AddMiddleware adds new middleware to the specified handler in the router.
//
// The order of middleware matters. Middleware added at the beginning is executed first.
func (h *Handler) AddMiddleware(m ...HandlerMiddleware) {
handler := h.handler
handler.logger.Debug("Adding middleware to handler", watermill.LogFields{
"count": fmt.Sprintf("%d", len(m)),
"handlerName": handler.name,
})
h.router.addHandlerLevelMiddleware(handler.name, m...)
}
// Started returns channel which is stopped when handler is running.
func (h *Handler) Started() chan struct{} {
return h.handler.startedCh
}
// Stop stops the handler.
// Stop is asynchronous.
// You can check if handler was stopped with Stopped() function.
func (h *Handler) Stop() {
if !h.handler.started {
panic("handler is not started")
}
h.handler.stopFn()
}
// Stopped returns channel which is stopped when handler did stop.
func (h *Handler) Stopped() chan struct{} {
return h.handler.stopped
}
// decorateHandlerPublisher applies the decorator chain to handler's publisher.
// They are applied in reverse order, so that the later decorators use the result of former ones.
func (r *Router) decorateHandlerPublisher(h *handler) error {
var err error
pub := h.publisher
for i := len(r.publisherDecorators) - 1; i >= 0; i-- {
decorator := r.publisherDecorators[i]
pub, err = decorator(pub)
if err != nil {
return errors.Wrap(err, "could not apply publisher decorator")
}
}
r.handlers[h.name].publisher = pub
return nil
}
// decorateHandlerSubscriber applies the decorator chain to handler's subscriber.
// They are applied in regular order, so that the later decorators use the result of former ones.
func (r *Router) decorateHandlerSubscriber(h *handler) error {
var err error
sub := h.subscriber
// add values to message context to subscriber
// it goes before other decorators, so that they may take advantage of these values
messageTransform := func(msg *Message) {
if msg != nil {
h.addHandlerContext(msg)
}
}
sub, err = MessageTransformSubscriberDecorator(messageTransform)(sub)
if err != nil {
return errors.Wrapf(err, "cannot wrap subscriber with context decorator")
}
for _, decorator := range r.subscriberDecorators {
sub, err = decorator(sub)
if err != nil {
return errors.Wrap(err, "could not apply subscriber decorator")
}
}
r.handlers[h.name].subscriber = sub
return nil
}
// addHandlerContext enriches the contex with values that are relevant within this handler's context.
func (h *handler) addHandlerContext(messages ...*Message) {
for i, msg := range messages {
ctx := msg.Context()
if h.name != "" {
ctx = context.WithValue(ctx, handlerNameKey, h.name)
}
if h.publisherName != "" {
ctx = context.WithValue(ctx, publisherNameKey, h.publisherName)
}
if h.subscriberName != "" {
ctx = context.WithValue(ctx, subscriberNameKey, h.subscriberName)
}
if h.subscribeTopic != "" {
ctx = context.WithValue(ctx, subscribeTopicKey, h.subscribeTopic)
}
if h.publishTopic != "" {
ctx = context.WithValue(ctx, publishTopicKey, h.publishTopic)
}
messages[i].SetContext(ctx)
}
}
func (h *handler) handleClose(ctx context.Context) {
select {
case <-h.routersCloseCh:
// for backward compatibility we are closing subscriber
h.logger.Debug("Waiting for subscriber to close", nil)
if err := h.subscriber.Close(); err != nil {
h.logger.Error("Failed to close subscriber", err, nil)
}
h.logger.Debug("Subscriber closed", nil)
case <-ctx.Done():
// we are closing subscriber just when entire router is closed
}
h.stopFn()
}
func (h *handler) handleMessage(msg *Message, handler HandlerFunc) {
defer h.runningHandlersWg.Done()
msgFields := watermill.LogFields{"message_uuid": msg.UUID}
defer func() {
if recovered := recover(); recovered != nil {
h.logger.Error(
"Panic recovered in handler. Stack: "+string(debug.Stack()),
errors.Errorf("%s", recovered),
msgFields,
)
msg.Nack()
}
}()
h.logger.Trace("Received message", msgFields)
producedMessages, err := handler(msg)
if err != nil {
h.logger.Error("Handler returned error", err, nil)
msg.Nack()
return
}
h.addHandlerContext(producedMessages...)
if err := h.publishProducedMessages(producedMessages, msgFields); err != nil {
h.logger.Error("Publishing produced messages failed", err, nil)
msg.Nack()
return
}
msg.Ack()
h.logger.Trace("Message acked", msgFields)
}
func (h *handler) publishProducedMessages(producedMessages Messages, msgFields watermill.LogFields) error {
if len(producedMessages) == 0 {
return nil
}
if h.publisher == nil {
return ErrOutputInNoPublisherHandler
}
h.logger.Trace("Sending produced messages", msgFields.Add(watermill.LogFields{
"produced_messages_count": len(producedMessages),
"publish_topic": h.publishTopic,
}))
for _, msg := range producedMessages {
if err := h.publisher.Publish(h.publishTopic, msg); err != nil {
// todo - how to deal with it better/transactional/retry?
h.logger.Error("Cannot publish message", err, msgFields.Add(watermill.LogFields{
"not_sent_message": fmt.Sprintf("%#v", producedMessages),
}))
return err
}
}
return nil
}
type disabledPublisher struct{}
func (disabledPublisher) Publish(topic string, messages ...*Message) error {
return ErrOutputInNoPublisherHandler
}
func (disabledPublisher) Close() error {
return nil
} |
r.subscriberDecorators = append(r.subscriberDecorators, dec...)
}
| random_line_split |
router.go | package message
import (
"context"
"fmt"
"runtime/debug"
"sync"
"time"
"github.com/pkg/errors"
"github.com/ThreeDotsLabs/watermill"
"github.com/ThreeDotsLabs/watermill/internal"
sync_internal "github.com/ThreeDotsLabs/watermill/pubsub/sync"
)
var (
// ErrOutputInNoPublisherHandler happens when a handler func returned some messages in a no-publisher handler.
// todo: maybe change the handler func signature in no-publisher handler so that there's no possibility for this
ErrOutputInNoPublisherHandler = errors.New("returned output messages in a handler without publisher")
)
// HandlerFunc is function called when message is received.
//
// msg.Ack() is called automatically when HandlerFunc doesn't return error.
// When HandlerFunc returns error, msg.Nack() is called.
// When msg.Ack() was called in handler and HandlerFunc returns error,
// msg.Nack() will be not sent because Ack was already sent.
//
// HandlerFunc's are executed parallel when multiple messages was received
// (because msg.Ack() was sent in HandlerFunc or Subscriber supports multiple consumers).
type HandlerFunc func(msg *Message) ([]*Message, error)
// NoPublishHandlerFunc is HandlerFunc alternative, which doesn't produce any messages.
type NoPublishHandlerFunc func(msg *Message) error
// PassthroughHandler is a handler that passes the message unchanged from the subscriber to the publisher.
var PassthroughHandler HandlerFunc = func(msg *Message) ([]*Message, error) {
return []*Message{msg}, nil
}
// HandlerMiddleware allows us to write something like decorators to HandlerFunc.
// It can execute something before handler (for example: modify consumed message)
// or after (modify produced messages, ack/nack on consumed message, handle errors, logging, etc.).
//
// It can be attached to the router by using `AddMiddleware` method.
//
// Example:
//
// func ExampleMiddleware(h message.HandlerFunc) message.HandlerFunc {
// return func(message *message.Message) ([]*message.Message, error) {
// fmt.Println("executed before handler")
// producedMessages, err := h(message)
// fmt.Println("executed after handler")
//
// return producedMessages, err
// }
// }
type HandlerMiddleware func(h HandlerFunc) HandlerFunc
// RouterPlugin is function which is executed on Router start.
type RouterPlugin func(*Router) error
// PublisherDecorator wraps the underlying Publisher, adding some functionality.
type PublisherDecorator func(pub Publisher) (Publisher, error)
// SubscriberDecorator wraps the underlying Subscriber, adding some functionality.
type SubscriberDecorator func(sub Subscriber) (Subscriber, error)
// RouterConfig holds the Router's configuration options.
type RouterConfig struct {
// CloseTimeout determines how long router should work for handlers when closing.
CloseTimeout time.Duration
}
func (c *RouterConfig) setDefaults() {
if c.CloseTimeout == 0 {
c.CloseTimeout = time.Second * 30
}
}
// Validate returns Router configuration error, if any.
func (c RouterConfig) Validate() error {
return nil
}
// NewRouter creates a new Router with given configuration.
func NewRouter(config RouterConfig, logger watermill.LoggerAdapter) (*Router, error) {
config.setDefaults()
if err := config.Validate(); err != nil {
return nil, errors.Wrap(err, "invalid config")
}
if logger == nil {
logger = watermill.NopLogger{}
}
return &Router{
config: config,
handlers: map[string]*handler{},
handlersWg: &sync.WaitGroup{},
runningHandlersWg: &sync.WaitGroup{},
runningHandlersWgLock: &sync.Mutex{},
handlerAdded: make(chan struct{}),
handlersLock: &sync.RWMutex{},
closingInProgressCh: make(chan struct{}),
closedCh: make(chan struct{}),
logger: logger,
running: make(chan struct{}),
}, nil
}
type middleware struct {
Handler HandlerMiddleware
HandlerName string
IsRouterLevel bool
}
// Router is responsible for handling messages from subscribers using provided handler functions.
//
// If the handler function returns a message, the message is published with the publisher.
// You can use middlewares to wrap handlers with common logic like logging, instrumentation, etc.
type Router struct {
config RouterConfig
middlewares []middleware
plugins []RouterPlugin
handlers map[string]*handler
handlersLock *sync.RWMutex
handlersWg *sync.WaitGroup
runningHandlersWg *sync.WaitGroup
runningHandlersWgLock *sync.Mutex
handlerAdded chan struct{}
closingInProgressCh chan struct{}
closedCh chan struct{}
closed bool
closedLock sync.Mutex
logger watermill.LoggerAdapter
publisherDecorators []PublisherDecorator
subscriberDecorators []SubscriberDecorator
isRunning bool
running chan struct{}
}
// Logger returns the Router's logger.
func (r *Router) Logger() watermill.LoggerAdapter {
return r.logger
}
// AddMiddleware adds a new middleware to the router.
//
// The order of middleware matters. Middleware added at the beginning is executed first.
func (r *Router) AddMiddleware(m ...HandlerMiddleware) {
r.logger.Debug("Adding middleware", watermill.LogFields{"count": fmt.Sprintf("%d", len(m))})
r.addRouterLevelMiddleware(m...)
}
func (r *Router) addRouterLevelMiddleware(m ...HandlerMiddleware) {
for _, handlerMiddleware := range m {
middleware := middleware{
Handler: handlerMiddleware,
HandlerName: "",
IsRouterLevel: true,
}
r.middlewares = append(r.middlewares, middleware)
}
}
func (r *Router) addHandlerLevelMiddleware(handlerName string, m ...HandlerMiddleware) {
for _, handlerMiddleware := range m {
middleware := middleware{
Handler: handlerMiddleware,
HandlerName: handlerName,
IsRouterLevel: false,
}
r.middlewares = append(r.middlewares, middleware)
}
}
// AddPlugin adds a new plugin to the router.
// Plugins are executed during startup of the router.
//
// A plugin can, for example, close the router after SIGINT or SIGTERM is sent to the process (SignalsHandler plugin).
func (r *Router) AddPlugin(p ...RouterPlugin) {
r.logger.Debug("Adding plugins", watermill.LogFields{"count": fmt.Sprintf("%d", len(p))})
r.plugins = append(r.plugins, p...)
}
// AddPublisherDecorators wraps the router's Publisher.
// The first decorator is the innermost, i.e. calls the original publisher.
func (r *Router) AddPublisherDecorators(dec ...PublisherDecorator) {
r.logger.Debug("Adding publisher decorators", watermill.LogFields{"count": fmt.Sprintf("%d", len(dec))})
r.publisherDecorators = append(r.publisherDecorators, dec...)
}
// AddSubscriberDecorators wraps the router's Subscriber.
// The first decorator is the innermost, i.e. calls the original subscriber.
func (r *Router) AddSubscriberDecorators(dec ...SubscriberDecorator) {
r.logger.Debug("Adding subscriber decorators", watermill.LogFields{"count": fmt.Sprintf("%d", len(dec))})
r.subscriberDecorators = append(r.subscriberDecorators, dec...)
}
// Handlers returns all registered handlers.
func (r *Router) Handlers() map[string]HandlerFunc {
handlers := map[string]HandlerFunc{}
for handlerName, handler := range r.handlers {
handlers[handlerName] = handler.handlerFunc
}
return handlers
}
// DuplicateHandlerNameError is sent in a panic when you try to add a second handler with the same name.
type DuplicateHandlerNameError struct {
HandlerName string
}
func (d DuplicateHandlerNameError) Error() string {
return fmt.Sprintf("handler with name %s already exists", d.HandlerName)
}
// AddHandler adds a new handler.
//
// handlerName must be unique. For now, it is used only for debugging.
//
// subscribeTopic is a topic from which handler will receive messages.
//
// publishTopic is a topic to which router will produce messages returned by handlerFunc.
// When handler needs to publish to multiple topics,
// it is recommended to just inject Publisher to Handler or implement middleware
// which will catch messages and publish to topic based on metadata for example.
//
// If handler is added while router is already running, you need to explicitly call RunHandlers().
func (r *Router) AddHandler(
handlerName string,
subscribeTopic string,
subscriber Subscriber,
publishTopic string,
publisher Publisher,
handlerFunc HandlerFunc,
) *Handler {
r.logger.Info("Adding handler", watermill.LogFields{
"handler_name": handlerName,
"topic": subscribeTopic,
})
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
if _, ok := r.handlers[handlerName]; ok {
panic(DuplicateHandlerNameError{handlerName})
}
publisherName, subscriberName := internal.StructName(publisher), internal.StructName(subscriber)
newHandler := &handler{
name: handlerName,
logger: r.logger,
subscriber: subscriber,
subscribeTopic: subscribeTopic,
subscriberName: subscriberName,
publisher: publisher,
publishTopic: publishTopic,
publisherName: publisherName,
handlerFunc: handlerFunc,
runningHandlersWg: r.runningHandlersWg,
runningHandlersWgLock: r.runningHandlersWgLock,
messagesCh: nil,
routersCloseCh: r.closingInProgressCh,
startedCh: make(chan struct{}),
}
r.handlersWg.Add(1)
r.handlers[handlerName] = newHandler
select {
case r.handlerAdded <- struct{}{}:
default:
// closeWhenAllHandlersStopped is not always waiting for handlerAdded
}
return &Handler{
router: r,
handler: newHandler,
}
}
// AddNoPublisherHandler adds a new handler.
// This handler cannot return messages.
// When message is returned it will occur an error and Nack will be sent.
//
// handlerName must be unique. For now, it is used only for debugging.
//
// subscribeTopic is a topic from which handler will receive messages.
//
// subscriber is Subscriber from which messages will be consumed.
//
// If handler is added while router is already running, you need to explicitly call RunHandlers().
func (r *Router) AddNoPublisherHandler(
handlerName string,
subscribeTopic string,
subscriber Subscriber,
handlerFunc NoPublishHandlerFunc,
) *Handler {
handlerFuncAdapter := func(msg *Message) ([]*Message, error) {
return nil, handlerFunc(msg)
}
return r.AddHandler(handlerName, subscribeTopic, subscriber, "", disabledPublisher{}, handlerFuncAdapter)
}
// Run runs all plugins and handlers and starts subscribing to provided topics.
// This call is blocking while the router is running.
//
// When all handlers have stopped (for example, because subscriptions were closed), the router will also stop.
//
// To stop Run() you should call Close() on the router.
//
// ctx will be propagated to all subscribers.
//
// When all handlers are stopped (for example: because of closed connection), Run() will be also stopped.
func (r *Router) Run(ctx context.Context) (err error) {
if r.isRunning {
return errors.New("router is already running")
}
r.isRunning = true
ctx, cancel := context.WithCancel(ctx)
defer cancel()
r.logger.Debug("Loading plugins", nil)
for _, plugin := range r.plugins {
if err := plugin(r); err != nil {
return errors.Wrapf(err, "cannot initialize plugin %v", plugin)
}
}
if err := r.RunHandlers(ctx); err != nil {
return err
}
close(r.running)
go r.closeWhenAllHandlersStopped(ctx)
<-r.closingInProgressCh
cancel()
r.logger.Info("Waiting for messages", watermill.LogFields{
"timeout": r.config.CloseTimeout,
})
<-r.closedCh
r.logger.Info("All messages processed", nil)
return nil
}
// RunHandlers runs all handlers that were added after Run().
// RunHandlers is idempotent, so can be called multiple times safely.
func (r *Router) RunHandlers(ctx context.Context) error {
if !r.isRunning {
return errors.New("you can't call RunHandlers on non-running router")
}
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
r.logger.Info("Running router handlers", watermill.LogFields{"count": len(r.handlers)})
for name, h := range r.handlers {
name := name
h := h
if h.started {
continue
}
if err := r.decorateHandlerPublisher(h); err != nil {
return errors.Wrapf(err, "could not decorate publisher of handler %s", name)
}
if err := r.decorateHandlerSubscriber(h); err != nil {
return errors.Wrapf(err, "could not decorate subscriber of handler %s", name)
}
r.logger.Debug("Subscribing to topic", watermill.LogFields{
"subscriber_name": h.name,
"topic": h.subscribeTopic,
})
ctx, cancel := context.WithCancel(ctx)
messages, err := h.subscriber.Subscribe(ctx, h.subscribeTopic)
if err != nil {
cancel()
return errors.Wrapf(err, "cannot subscribe topic %s", h.subscribeTopic)
}
h.messagesCh = messages
h.started = true
close(h.startedCh)
h.stopFn = cancel
h.stopped = make(chan struct{})
go func() {
defer cancel()
h.run(ctx, r.middlewares)
r.handlersWg.Done()
r.logger.Info("Subscriber stopped", watermill.LogFields{
"subscriber_name": h.name,
"topic": h.subscribeTopic,
})
r.handlersLock.Lock()
delete(r.handlers, name)
r.handlersLock.Unlock()
}()
}
return nil
}
// closeWhenAllHandlersStopped closed router, when all handlers has stopped,
// because for example all subscriptions are closed.
func (r *Router) closeWhenAllHandlersStopped(ctx context.Context) {
r.handlersLock.RLock()
hasHandlers := len(r.handlers) == 0
r.handlersLock.RUnlock()
if hasHandlers {
// in that situation router will be closed immediately (even if they are no routers)
// let's wait for
select {
case <-r.handlerAdded:
// it should be some handler to track
case <-r.closedCh:
// let's avoid goroutine leak
return
}
}
r.handlersWg.Wait()
if r.IsClosed() {
// already closed
return
}
// Only log an error if the context was not canceled, but handlers were stopped.
select {
case <-ctx.Done():
default:
r.logger.Error("All handlers stopped, closing router", errors.New("all router handlers stopped"), nil)
}
if err := r.Close(); err != nil {
r.logger.Error("Cannot close router", err, nil)
}
}
// Running is closed when router is running.
// In other words: you can wait till router is running using
//
// fmt.Println("Starting router")
// go r.Run(ctx)
// <- r.Running()
// fmt.Println("Router is running")
//
// Warning: for historical reasons, this channel is not aware of router closing - the channel will be closed if the router has been running and closed.
func (r *Router) Running() chan struct{} {
return r.running
}
// IsRunning returns true when router is running.
//
// Warning: for historical reasons, this method is not aware of router closing.
// If you want to know if the router was closed, use IsClosed.
func (r *Router) IsRunning() bool {
select {
case <-r.running:
return true
default:
return false
}
}
// Close gracefully closes the router with a timeout provided in the configuration.
func (r *Router) Close() error {
r.closedLock.Lock()
defer r.closedLock.Unlock()
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
if r.closed {
return nil
}
r.closed = true
r.logger.Info("Closing router", nil)
defer r.logger.Info("Router closed", nil)
close(r.closingInProgressCh)
defer close(r.closedCh)
timeouted := r.waitForHandlers()
if timeouted {
return errors.New("router close timeout")
}
return nil
}
func (r *Router) waitForHandlers() bool {
var waitGroup sync.WaitGroup
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
r.handlersWg.Wait()
}()
waitGroup.Add(1)
go func() {
defer waitGroup.Done()
r.runningHandlersWgLock.Lock()
defer r.runningHandlersWgLock.Unlock()
r.runningHandlersWg.Wait()
}()
return sync_internal.WaitGroupTimeout(&waitGroup, r.config.CloseTimeout)
}
func (r *Router) IsClosed() bool {
r.closedLock.Lock()
defer r.closedLock.Unlock()
return r.closed
}
type handler struct {
name string
logger watermill.LoggerAdapter
subscriber Subscriber
subscribeTopic string
subscriberName string
publisher Publisher
publishTopic string
publisherName string
handlerFunc HandlerFunc
runningHandlersWg *sync.WaitGroup
runningHandlersWgLock *sync.Mutex
messagesCh <-chan *Message
started bool
startedCh chan struct{}
stopFn context.CancelFunc
stopped chan struct{}
routersCloseCh chan struct{}
}
func (h *handler) run(ctx context.Context, middlewares []middleware) {
h.logger.Info("Starting handler", watermill.LogFields{
"subscriber_name": h.name,
"topic": h.subscribeTopic,
})
middlewareHandler := h.handlerFunc
// first added middlewares should be executed first (so should be at the top of call stack)
for i := len(middlewares) - 1; i >= 0; i-- {
currentMiddleware := middlewares[i]
isValidHandlerLevelMiddleware := currentMiddleware.HandlerName == h.name
if currentMiddleware.IsRouterLevel || isValidHandlerLevelMiddleware {
middlewareHandler = currentMiddleware.Handler(middlewareHandler)
}
}
go h.handleClose(ctx)
for msg := range h.messagesCh {
h.runningHandlersWgLock.Lock()
h.runningHandlersWg.Add(1)
h.runningHandlersWgLock.Unlock()
go h.handleMessage(msg, middlewareHandler)
}
if h.publisher != nil {
h.logger.Debug("Waiting for publisher to close", nil)
if err := h.publisher.Close(); err != nil {
h.logger.Error("Failed to close publisher", err, nil)
}
h.logger.Debug("Publisher closed", nil)
}
h.logger.Debug("Router handler stopped", nil)
close(h.stopped)
}
// Handler handles Messages.
type Handler struct {
router *Router
handler *handler
}
// AddMiddleware adds new middleware to the specified handler in the router.
//
// The order of middleware matters. Middleware added at the beginning is executed first.
func (h *Handler) AddMiddleware(m ...HandlerMiddleware) {
handler := h.handler
handler.logger.Debug("Adding middleware to handler", watermill.LogFields{
"count": fmt.Sprintf("%d", len(m)),
"handlerName": handler.name,
})
h.router.addHandlerLevelMiddleware(handler.name, m...)
}
// Started returns channel which is stopped when handler is running.
func (h *Handler) Started() chan struct{} {
return h.handler.startedCh
}
// Stop stops the handler.
// Stop is asynchronous.
// You can check if handler was stopped with Stopped() function.
func (h *Handler) Stop() {
if !h.handler.started {
panic("handler is not started")
}
h.handler.stopFn()
}
// Stopped returns channel which is stopped when handler did stop.
func (h *Handler) Stopped() chan struct{} {
return h.handler.stopped
}
// decorateHandlerPublisher applies the decorator chain to handler's publisher.
// They are applied in reverse order, so that the later decorators use the result of former ones.
func (r *Router) decorateHandlerPublisher(h *handler) error {
var err error
pub := h.publisher
for i := len(r.publisherDecorators) - 1; i >= 0; i-- {
decorator := r.publisherDecorators[i]
pub, err = decorator(pub)
if err != nil {
return errors.Wrap(err, "could not apply publisher decorator")
}
}
r.handlers[h.name].publisher = pub
return nil
}
// decorateHandlerSubscriber applies the decorator chain to handler's subscriber.
// They are applied in regular order, so that the later decorators use the result of former ones.
func (r *Router) decorateHandlerSubscriber(h *handler) error {
var err error
sub := h.subscriber
// add values to message context to subscriber
// it goes before other decorators, so that they may take advantage of these values
messageTransform := func(msg *Message) {
if msg != nil {
h.addHandlerContext(msg)
}
}
sub, err = MessageTransformSubscriberDecorator(messageTransform)(sub)
if err != nil {
return errors.Wrapf(err, "cannot wrap subscriber with context decorator")
}
for _, decorator := range r.subscriberDecorators {
sub, err = decorator(sub)
if err != nil {
return errors.Wrap(err, "could not apply subscriber decorator")
}
}
r.handlers[h.name].subscriber = sub
return nil
}
// addHandlerContext enriches the contex with values that are relevant within this handler's context.
func (h *handler) addHandlerContext(messages ...*Message) {
for i, msg := range messages |
}
func (h *handler) handleClose(ctx context.Context) {
select {
case <-h.routersCloseCh:
// for backward compatibility we are closing subscriber
h.logger.Debug("Waiting for subscriber to close", nil)
if err := h.subscriber.Close(); err != nil {
h.logger.Error("Failed to close subscriber", err, nil)
}
h.logger.Debug("Subscriber closed", nil)
case <-ctx.Done():
// we are closing subscriber just when entire router is closed
}
h.stopFn()
}
func (h *handler) handleMessage(msg *Message, handler HandlerFunc) {
defer h.runningHandlersWg.Done()
msgFields := watermill.LogFields{"message_uuid": msg.UUID}
defer func() {
if recovered := recover(); recovered != nil {
h.logger.Error(
"Panic recovered in handler. Stack: "+string(debug.Stack()),
errors.Errorf("%s", recovered),
msgFields,
)
msg.Nack()
}
}()
h.logger.Trace("Received message", msgFields)
producedMessages, err := handler(msg)
if err != nil {
h.logger.Error("Handler returned error", err, nil)
msg.Nack()
return
}
h.addHandlerContext(producedMessages...)
if err := h.publishProducedMessages(producedMessages, msgFields); err != nil {
h.logger.Error("Publishing produced messages failed", err, nil)
msg.Nack()
return
}
msg.Ack()
h.logger.Trace("Message acked", msgFields)
}
func (h *handler) publishProducedMessages(producedMessages Messages, msgFields watermill.LogFields) error {
if len(producedMessages) == 0 {
return nil
}
if h.publisher == nil {
return ErrOutputInNoPublisherHandler
}
h.logger.Trace("Sending produced messages", msgFields.Add(watermill.LogFields{
"produced_messages_count": len(producedMessages),
"publish_topic": h.publishTopic,
}))
for _, msg := range producedMessages {
if err := h.publisher.Publish(h.publishTopic, msg); err != nil {
// todo - how to deal with it better/transactional/retry?
h.logger.Error("Cannot publish message", err, msgFields.Add(watermill.LogFields{
"not_sent_message": fmt.Sprintf("%#v", producedMessages),
}))
return err
}
}
return nil
}
type disabledPublisher struct{}
func (disabledPublisher) Publish(topic string, messages ...*Message) error {
return ErrOutputInNoPublisherHandler
}
func (disabledPublisher) Close() error {
return nil
}
| {
ctx := msg.Context()
if h.name != "" {
ctx = context.WithValue(ctx, handlerNameKey, h.name)
}
if h.publisherName != "" {
ctx = context.WithValue(ctx, publisherNameKey, h.publisherName)
}
if h.subscriberName != "" {
ctx = context.WithValue(ctx, subscriberNameKey, h.subscriberName)
}
if h.subscribeTopic != "" {
ctx = context.WithValue(ctx, subscribeTopicKey, h.subscribeTopic)
}
if h.publishTopic != "" {
ctx = context.WithValue(ctx, publishTopicKey, h.publishTopic)
}
messages[i].SetContext(ctx)
} | conditional_block |
entry_query.go | // Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/y-yagi/doco/ent/entry"
"github.com/y-yagi/doco/ent/predicate"
)
// EntryQuery is the builder for querying Entry entities.
type EntryQuery struct {
config
ctx *QueryContext
order []entry.OrderOption
inters []Interceptor
predicates []predicate.Entry
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the EntryQuery builder.
func (eq *EntryQuery) Where(ps ...predicate.Entry) *EntryQuery {
eq.predicates = append(eq.predicates, ps...)
return eq
}
// Limit the number of records to be returned by this query.
func (eq *EntryQuery) Limit(limit int) *EntryQuery {
eq.ctx.Limit = &limit
return eq
}
// Offset to start from.
func (eq *EntryQuery) Offset(offset int) *EntryQuery {
eq.ctx.Offset = &offset
return eq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (eq *EntryQuery) Unique(unique bool) *EntryQuery {
eq.ctx.Unique = &unique
return eq
}
// Order specifies how the records should be ordered.
func (eq *EntryQuery) Order(o ...entry.OrderOption) *EntryQuery {
eq.order = append(eq.order, o...)
return eq
}
// First returns the first Entry entity from the query.
// Returns a *NotFoundError when no Entry was found.
func (eq *EntryQuery) First(ctx context.Context) (*Entry, error) {
nodes, err := eq.Limit(1).All(setContextOp(ctx, eq.ctx, "First"))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{entry.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (eq *EntryQuery) FirstX(ctx context.Context) *Entry {
node, err := eq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first Entry ID from the query.
// Returns a *NotFoundError when no Entry ID was found.
func (eq *EntryQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(1).IDs(setContextOp(ctx, eq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{entry.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (eq *EntryQuery) FirstIDX(ctx context.Context) int {
id, err := eq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single Entry entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one Entry entity is found.
// Returns a *NotFoundError when no Entry entities are found.
func (eq *EntryQuery) Only(ctx context.Context) (*Entry, error) {
nodes, err := eq.Limit(2).All(setContextOp(ctx, eq.ctx, "Only"))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{entry.Label}
default:
return nil, &NotSingularError{entry.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (eq *EntryQuery) OnlyX(ctx context.Context) *Entry {
node, err := eq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only Entry ID in the query.
// Returns a *NotSingularError when more than one Entry ID is found.
// Returns a *NotFoundError when no entities are found.
func (eq *EntryQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(2).IDs(setContextOp(ctx, eq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{entry.Label}
default:
err = &NotSingularError{entry.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (eq *EntryQuery) OnlyIDX(ctx context.Context) int {
id, err := eq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of Entries.
func (eq *EntryQuery) All(ctx context.Context) ([]*Entry, error) {
ctx = setContextOp(ctx, eq.ctx, "All")
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*Entry, *EntryQuery]()
return withInterceptors[[]*Entry](ctx, eq, qr, eq.inters)
}
// AllX is like All, but panics if an error occurs.
func (eq *EntryQuery) AllX(ctx context.Context) []*Entry {
nodes, err := eq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of Entry IDs.
func (eq *EntryQuery) IDs(ctx context.Context) (ids []int, err error) {
if eq.ctx.Unique == nil && eq.path != nil {
eq.Unique(true)
}
ctx = setContextOp(ctx, eq.ctx, "IDs")
if err = eq.Select(entry.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (eq *EntryQuery) IDsX(ctx context.Context) []int {
ids, err := eq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (eq *EntryQuery) | (ctx context.Context) (int, error) {
ctx = setContextOp(ctx, eq.ctx, "Count")
if err := eq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, eq, querierCount[*EntryQuery](), eq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (eq *EntryQuery) CountX(ctx context.Context) int {
count, err := eq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (eq *EntryQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, eq.ctx, "Exist")
switch _, err := eq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (eq *EntryQuery) ExistX(ctx context.Context) bool {
exist, err := eq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the EntryQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (eq *EntryQuery) Clone() *EntryQuery {
if eq == nil {
return nil
}
return &EntryQuery{
config: eq.config,
ctx: eq.ctx.Clone(),
order: append([]entry.OrderOption{}, eq.order...),
inters: append([]Interceptor{}, eq.inters...),
predicates: append([]predicate.Entry{}, eq.predicates...),
// clone intermediate query.
sql: eq.sql.Clone(),
path: eq.path,
}
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// Title string `json:"title,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.Entry.Query().
// GroupBy(entry.FieldTitle).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (eq *EntryQuery) GroupBy(field string, fields ...string) *EntryGroupBy {
eq.ctx.Fields = append([]string{field}, fields...)
grbuild := &EntryGroupBy{build: eq}
grbuild.flds = &eq.ctx.Fields
grbuild.label = entry.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// Title string `json:"title,omitempty"`
// }
//
// client.Entry.Query().
// Select(entry.FieldTitle).
// Scan(ctx, &v)
func (eq *EntryQuery) Select(fields ...string) *EntrySelect {
eq.ctx.Fields = append(eq.ctx.Fields, fields...)
sbuild := &EntrySelect{EntryQuery: eq}
sbuild.label = entry.Label
sbuild.flds, sbuild.scan = &eq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a EntrySelect configured with the given aggregations.
func (eq *EntryQuery) Aggregate(fns ...AggregateFunc) *EntrySelect {
return eq.Select().Aggregate(fns...)
}
func (eq *EntryQuery) prepareQuery(ctx context.Context) error {
for _, inter := range eq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, eq); err != nil {
return err
}
}
}
for _, f := range eq.ctx.Fields {
if !entry.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if eq.path != nil {
prev, err := eq.path(ctx)
if err != nil {
return err
}
eq.sql = prev
}
return nil
}
func (eq *EntryQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Entry, error) {
var (
nodes = []*Entry{}
_spec = eq.querySpec()
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*Entry).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &Entry{config: eq.config}
nodes = append(nodes, node)
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, eq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
return nodes, nil
}
func (eq *EntryQuery) sqlCount(ctx context.Context) (int, error) {
_spec := eq.querySpec()
_spec.Node.Columns = eq.ctx.Fields
if len(eq.ctx.Fields) > 0 {
_spec.Unique = eq.ctx.Unique != nil && *eq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, eq.driver, _spec)
}
func (eq *EntryQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(entry.Table, entry.Columns, sqlgraph.NewFieldSpec(entry.FieldID, field.TypeInt))
_spec.From = eq.sql
if unique := eq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if eq.path != nil {
_spec.Unique = true
}
if fields := eq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, entry.FieldID)
for i := range fields {
if fields[i] != entry.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
}
if ps := eq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := eq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := eq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := eq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (eq *EntryQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(eq.driver.Dialect())
t1 := builder.Table(entry.Table)
columns := eq.ctx.Fields
if len(columns) == 0 {
columns = entry.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if eq.sql != nil {
selector = eq.sql
selector.Select(selector.Columns(columns...)...)
}
if eq.ctx.Unique != nil && *eq.ctx.Unique {
selector.Distinct()
}
for _, p := range eq.predicates {
p(selector)
}
for _, p := range eq.order {
p(selector)
}
if offset := eq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := eq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// EntryGroupBy is the group-by builder for Entry entities.
type EntryGroupBy struct {
selector
build *EntryQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (egb *EntryGroupBy) Aggregate(fns ...AggregateFunc) *EntryGroupBy {
egb.fns = append(egb.fns, fns...)
return egb
}
// Scan applies the selector query and scans the result into the given value.
func (egb *EntryGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, egb.build.ctx, "GroupBy")
if err := egb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*EntryQuery, *EntryGroupBy](ctx, egb.build, egb, egb.build.inters, v)
}
func (egb *EntryGroupBy) sqlScan(ctx context.Context, root *EntryQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(egb.fns))
for _, fn := range egb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*egb.flds)+len(egb.fns))
for _, f := range *egb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*egb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := egb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// EntrySelect is the builder for selecting fields of Entry entities.
type EntrySelect struct {
*EntryQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (es *EntrySelect) Aggregate(fns ...AggregateFunc) *EntrySelect {
es.fns = append(es.fns, fns...)
return es
}
// Scan applies the selector query and scans the result into the given value.
func (es *EntrySelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, es.ctx, "Select")
if err := es.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*EntryQuery, *EntrySelect](ctx, es.EntryQuery, es, es.inters, v)
}
func (es *EntrySelect) sqlScan(ctx context.Context, root *EntryQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(es.fns))
for _, fn := range es.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*es.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := es.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
| Count | identifier_name |
entry_query.go | // Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/y-yagi/doco/ent/entry"
"github.com/y-yagi/doco/ent/predicate"
)
// EntryQuery is the builder for querying Entry entities.
type EntryQuery struct {
config
ctx *QueryContext
order []entry.OrderOption
inters []Interceptor
predicates []predicate.Entry
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the EntryQuery builder.
func (eq *EntryQuery) Where(ps ...predicate.Entry) *EntryQuery {
eq.predicates = append(eq.predicates, ps...)
return eq
}
// Limit the number of records to be returned by this query.
func (eq *EntryQuery) Limit(limit int) *EntryQuery {
eq.ctx.Limit = &limit
return eq
}
// Offset to start from.
func (eq *EntryQuery) Offset(offset int) *EntryQuery {
eq.ctx.Offset = &offset
return eq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (eq *EntryQuery) Unique(unique bool) *EntryQuery {
eq.ctx.Unique = &unique
return eq
}
// Order specifies how the records should be ordered.
func (eq *EntryQuery) Order(o ...entry.OrderOption) *EntryQuery {
eq.order = append(eq.order, o...)
return eq
}
// First returns the first Entry entity from the query.
// Returns a *NotFoundError when no Entry was found.
func (eq *EntryQuery) First(ctx context.Context) (*Entry, error) {
nodes, err := eq.Limit(1).All(setContextOp(ctx, eq.ctx, "First"))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{entry.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (eq *EntryQuery) FirstX(ctx context.Context) *Entry {
node, err := eq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first Entry ID from the query.
// Returns a *NotFoundError when no Entry ID was found.
func (eq *EntryQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(1).IDs(setContextOp(ctx, eq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{entry.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (eq *EntryQuery) FirstIDX(ctx context.Context) int {
id, err := eq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single Entry entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one Entry entity is found.
// Returns a *NotFoundError when no Entry entities are found.
func (eq *EntryQuery) Only(ctx context.Context) (*Entry, error) {
nodes, err := eq.Limit(2).All(setContextOp(ctx, eq.ctx, "Only"))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{entry.Label}
default:
return nil, &NotSingularError{entry.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (eq *EntryQuery) OnlyX(ctx context.Context) *Entry {
node, err := eq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only Entry ID in the query.
// Returns a *NotSingularError when more than one Entry ID is found.
// Returns a *NotFoundError when no entities are found.
func (eq *EntryQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(2).IDs(setContextOp(ctx, eq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{entry.Label}
default:
err = &NotSingularError{entry.Label}
} | id, err := eq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of Entries.
func (eq *EntryQuery) All(ctx context.Context) ([]*Entry, error) {
ctx = setContextOp(ctx, eq.ctx, "All")
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*Entry, *EntryQuery]()
return withInterceptors[[]*Entry](ctx, eq, qr, eq.inters)
}
// AllX is like All, but panics if an error occurs.
func (eq *EntryQuery) AllX(ctx context.Context) []*Entry {
nodes, err := eq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of Entry IDs.
func (eq *EntryQuery) IDs(ctx context.Context) (ids []int, err error) {
if eq.ctx.Unique == nil && eq.path != nil {
eq.Unique(true)
}
ctx = setContextOp(ctx, eq.ctx, "IDs")
if err = eq.Select(entry.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (eq *EntryQuery) IDsX(ctx context.Context) []int {
ids, err := eq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (eq *EntryQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, eq.ctx, "Count")
if err := eq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, eq, querierCount[*EntryQuery](), eq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (eq *EntryQuery) CountX(ctx context.Context) int {
count, err := eq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (eq *EntryQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, eq.ctx, "Exist")
switch _, err := eq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (eq *EntryQuery) ExistX(ctx context.Context) bool {
exist, err := eq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the EntryQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (eq *EntryQuery) Clone() *EntryQuery {
if eq == nil {
return nil
}
return &EntryQuery{
config: eq.config,
ctx: eq.ctx.Clone(),
order: append([]entry.OrderOption{}, eq.order...),
inters: append([]Interceptor{}, eq.inters...),
predicates: append([]predicate.Entry{}, eq.predicates...),
// clone intermediate query.
sql: eq.sql.Clone(),
path: eq.path,
}
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// Title string `json:"title,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.Entry.Query().
// GroupBy(entry.FieldTitle).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (eq *EntryQuery) GroupBy(field string, fields ...string) *EntryGroupBy {
eq.ctx.Fields = append([]string{field}, fields...)
grbuild := &EntryGroupBy{build: eq}
grbuild.flds = &eq.ctx.Fields
grbuild.label = entry.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// Title string `json:"title,omitempty"`
// }
//
// client.Entry.Query().
// Select(entry.FieldTitle).
// Scan(ctx, &v)
func (eq *EntryQuery) Select(fields ...string) *EntrySelect {
eq.ctx.Fields = append(eq.ctx.Fields, fields...)
sbuild := &EntrySelect{EntryQuery: eq}
sbuild.label = entry.Label
sbuild.flds, sbuild.scan = &eq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a EntrySelect configured with the given aggregations.
func (eq *EntryQuery) Aggregate(fns ...AggregateFunc) *EntrySelect {
return eq.Select().Aggregate(fns...)
}
func (eq *EntryQuery) prepareQuery(ctx context.Context) error {
for _, inter := range eq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, eq); err != nil {
return err
}
}
}
for _, f := range eq.ctx.Fields {
if !entry.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if eq.path != nil {
prev, err := eq.path(ctx)
if err != nil {
return err
}
eq.sql = prev
}
return nil
}
func (eq *EntryQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Entry, error) {
var (
nodes = []*Entry{}
_spec = eq.querySpec()
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*Entry).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &Entry{config: eq.config}
nodes = append(nodes, node)
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, eq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
return nodes, nil
}
func (eq *EntryQuery) sqlCount(ctx context.Context) (int, error) {
_spec := eq.querySpec()
_spec.Node.Columns = eq.ctx.Fields
if len(eq.ctx.Fields) > 0 {
_spec.Unique = eq.ctx.Unique != nil && *eq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, eq.driver, _spec)
}
func (eq *EntryQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(entry.Table, entry.Columns, sqlgraph.NewFieldSpec(entry.FieldID, field.TypeInt))
_spec.From = eq.sql
if unique := eq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if eq.path != nil {
_spec.Unique = true
}
if fields := eq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, entry.FieldID)
for i := range fields {
if fields[i] != entry.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
}
if ps := eq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := eq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := eq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := eq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (eq *EntryQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(eq.driver.Dialect())
t1 := builder.Table(entry.Table)
columns := eq.ctx.Fields
if len(columns) == 0 {
columns = entry.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if eq.sql != nil {
selector = eq.sql
selector.Select(selector.Columns(columns...)...)
}
if eq.ctx.Unique != nil && *eq.ctx.Unique {
selector.Distinct()
}
for _, p := range eq.predicates {
p(selector)
}
for _, p := range eq.order {
p(selector)
}
if offset := eq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := eq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// EntryGroupBy is the group-by builder for Entry entities.
type EntryGroupBy struct {
selector
build *EntryQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (egb *EntryGroupBy) Aggregate(fns ...AggregateFunc) *EntryGroupBy {
egb.fns = append(egb.fns, fns...)
return egb
}
// Scan applies the selector query and scans the result into the given value.
func (egb *EntryGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, egb.build.ctx, "GroupBy")
if err := egb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*EntryQuery, *EntryGroupBy](ctx, egb.build, egb, egb.build.inters, v)
}
func (egb *EntryGroupBy) sqlScan(ctx context.Context, root *EntryQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(egb.fns))
for _, fn := range egb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*egb.flds)+len(egb.fns))
for _, f := range *egb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*egb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := egb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// EntrySelect is the builder for selecting fields of Entry entities.
type EntrySelect struct {
*EntryQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (es *EntrySelect) Aggregate(fns ...AggregateFunc) *EntrySelect {
es.fns = append(es.fns, fns...)
return es
}
// Scan applies the selector query and scans the result into the given value.
func (es *EntrySelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, es.ctx, "Select")
if err := es.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*EntryQuery, *EntrySelect](ctx, es.EntryQuery, es, es.inters, v)
}
func (es *EntrySelect) sqlScan(ctx context.Context, root *EntryQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(es.fns))
for _, fn := range es.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*es.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := es.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
} | return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (eq *EntryQuery) OnlyIDX(ctx context.Context) int { | random_line_split |
entry_query.go | // Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/y-yagi/doco/ent/entry"
"github.com/y-yagi/doco/ent/predicate"
)
// EntryQuery is the builder for querying Entry entities.
type EntryQuery struct {
config
ctx *QueryContext
order []entry.OrderOption
inters []Interceptor
predicates []predicate.Entry
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the EntryQuery builder.
func (eq *EntryQuery) Where(ps ...predicate.Entry) *EntryQuery {
eq.predicates = append(eq.predicates, ps...)
return eq
}
// Limit the number of records to be returned by this query.
func (eq *EntryQuery) Limit(limit int) *EntryQuery {
eq.ctx.Limit = &limit
return eq
}
// Offset to start from.
func (eq *EntryQuery) Offset(offset int) *EntryQuery {
eq.ctx.Offset = &offset
return eq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (eq *EntryQuery) Unique(unique bool) *EntryQuery {
eq.ctx.Unique = &unique
return eq
}
// Order specifies how the records should be ordered.
func (eq *EntryQuery) Order(o ...entry.OrderOption) *EntryQuery {
eq.order = append(eq.order, o...)
return eq
}
// First returns the first Entry entity from the query.
// Returns a *NotFoundError when no Entry was found.
func (eq *EntryQuery) First(ctx context.Context) (*Entry, error) {
nodes, err := eq.Limit(1).All(setContextOp(ctx, eq.ctx, "First"))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{entry.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (eq *EntryQuery) FirstX(ctx context.Context) *Entry {
node, err := eq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first Entry ID from the query.
// Returns a *NotFoundError when no Entry ID was found.
func (eq *EntryQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(1).IDs(setContextOp(ctx, eq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{entry.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (eq *EntryQuery) FirstIDX(ctx context.Context) int {
id, err := eq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single Entry entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one Entry entity is found.
// Returns a *NotFoundError when no Entry entities are found.
func (eq *EntryQuery) Only(ctx context.Context) (*Entry, error) {
nodes, err := eq.Limit(2).All(setContextOp(ctx, eq.ctx, "Only"))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{entry.Label}
default:
return nil, &NotSingularError{entry.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (eq *EntryQuery) OnlyX(ctx context.Context) *Entry {
node, err := eq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only Entry ID in the query.
// Returns a *NotSingularError when more than one Entry ID is found.
// Returns a *NotFoundError when no entities are found.
func (eq *EntryQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(2).IDs(setContextOp(ctx, eq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{entry.Label}
default:
err = &NotSingularError{entry.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (eq *EntryQuery) OnlyIDX(ctx context.Context) int {
id, err := eq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of Entries.
func (eq *EntryQuery) All(ctx context.Context) ([]*Entry, error) {
ctx = setContextOp(ctx, eq.ctx, "All")
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*Entry, *EntryQuery]()
return withInterceptors[[]*Entry](ctx, eq, qr, eq.inters)
}
// AllX is like All, but panics if an error occurs.
func (eq *EntryQuery) AllX(ctx context.Context) []*Entry {
nodes, err := eq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of Entry IDs.
func (eq *EntryQuery) IDs(ctx context.Context) (ids []int, err error) {
if eq.ctx.Unique == nil && eq.path != nil {
eq.Unique(true)
}
ctx = setContextOp(ctx, eq.ctx, "IDs")
if err = eq.Select(entry.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (eq *EntryQuery) IDsX(ctx context.Context) []int {
ids, err := eq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (eq *EntryQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, eq.ctx, "Count")
if err := eq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, eq, querierCount[*EntryQuery](), eq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (eq *EntryQuery) CountX(ctx context.Context) int {
count, err := eq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (eq *EntryQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, eq.ctx, "Exist")
switch _, err := eq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (eq *EntryQuery) ExistX(ctx context.Context) bool {
exist, err := eq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the EntryQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (eq *EntryQuery) Clone() *EntryQuery {
if eq == nil {
return nil
}
return &EntryQuery{
config: eq.config,
ctx: eq.ctx.Clone(),
order: append([]entry.OrderOption{}, eq.order...),
inters: append([]Interceptor{}, eq.inters...),
predicates: append([]predicate.Entry{}, eq.predicates...),
// clone intermediate query.
sql: eq.sql.Clone(),
path: eq.path,
}
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// Title string `json:"title,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.Entry.Query().
// GroupBy(entry.FieldTitle).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (eq *EntryQuery) GroupBy(field string, fields ...string) *EntryGroupBy {
eq.ctx.Fields = append([]string{field}, fields...)
grbuild := &EntryGroupBy{build: eq}
grbuild.flds = &eq.ctx.Fields
grbuild.label = entry.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// Title string `json:"title,omitempty"`
// }
//
// client.Entry.Query().
// Select(entry.FieldTitle).
// Scan(ctx, &v)
func (eq *EntryQuery) Select(fields ...string) *EntrySelect {
eq.ctx.Fields = append(eq.ctx.Fields, fields...)
sbuild := &EntrySelect{EntryQuery: eq}
sbuild.label = entry.Label
sbuild.flds, sbuild.scan = &eq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a EntrySelect configured with the given aggregations.
func (eq *EntryQuery) Aggregate(fns ...AggregateFunc) *EntrySelect {
return eq.Select().Aggregate(fns...)
}
func (eq *EntryQuery) prepareQuery(ctx context.Context) error {
for _, inter := range eq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, eq); err != nil |
}
}
for _, f := range eq.ctx.Fields {
if !entry.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if eq.path != nil {
prev, err := eq.path(ctx)
if err != nil {
return err
}
eq.sql = prev
}
return nil
}
func (eq *EntryQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Entry, error) {
var (
nodes = []*Entry{}
_spec = eq.querySpec()
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*Entry).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &Entry{config: eq.config}
nodes = append(nodes, node)
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, eq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
return nodes, nil
}
func (eq *EntryQuery) sqlCount(ctx context.Context) (int, error) {
_spec := eq.querySpec()
_spec.Node.Columns = eq.ctx.Fields
if len(eq.ctx.Fields) > 0 {
_spec.Unique = eq.ctx.Unique != nil && *eq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, eq.driver, _spec)
}
func (eq *EntryQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(entry.Table, entry.Columns, sqlgraph.NewFieldSpec(entry.FieldID, field.TypeInt))
_spec.From = eq.sql
if unique := eq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if eq.path != nil {
_spec.Unique = true
}
if fields := eq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, entry.FieldID)
for i := range fields {
if fields[i] != entry.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
}
if ps := eq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := eq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := eq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := eq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (eq *EntryQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(eq.driver.Dialect())
t1 := builder.Table(entry.Table)
columns := eq.ctx.Fields
if len(columns) == 0 {
columns = entry.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if eq.sql != nil {
selector = eq.sql
selector.Select(selector.Columns(columns...)...)
}
if eq.ctx.Unique != nil && *eq.ctx.Unique {
selector.Distinct()
}
for _, p := range eq.predicates {
p(selector)
}
for _, p := range eq.order {
p(selector)
}
if offset := eq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := eq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// EntryGroupBy is the group-by builder for Entry entities.
type EntryGroupBy struct {
selector
build *EntryQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (egb *EntryGroupBy) Aggregate(fns ...AggregateFunc) *EntryGroupBy {
egb.fns = append(egb.fns, fns...)
return egb
}
// Scan applies the selector query and scans the result into the given value.
func (egb *EntryGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, egb.build.ctx, "GroupBy")
if err := egb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*EntryQuery, *EntryGroupBy](ctx, egb.build, egb, egb.build.inters, v)
}
func (egb *EntryGroupBy) sqlScan(ctx context.Context, root *EntryQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(egb.fns))
for _, fn := range egb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*egb.flds)+len(egb.fns))
for _, f := range *egb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*egb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := egb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// EntrySelect is the builder for selecting fields of Entry entities.
type EntrySelect struct {
*EntryQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (es *EntrySelect) Aggregate(fns ...AggregateFunc) *EntrySelect {
es.fns = append(es.fns, fns...)
return es
}
// Scan applies the selector query and scans the result into the given value.
func (es *EntrySelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, es.ctx, "Select")
if err := es.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*EntryQuery, *EntrySelect](ctx, es.EntryQuery, es, es.inters, v)
}
func (es *EntrySelect) sqlScan(ctx context.Context, root *EntryQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(es.fns))
for _, fn := range es.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*es.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := es.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
| {
return err
} | conditional_block |
entry_query.go | // Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
"fmt"
"math"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/y-yagi/doco/ent/entry"
"github.com/y-yagi/doco/ent/predicate"
)
// EntryQuery is the builder for querying Entry entities.
type EntryQuery struct {
config
ctx *QueryContext
order []entry.OrderOption
inters []Interceptor
predicates []predicate.Entry
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the EntryQuery builder.
func (eq *EntryQuery) Where(ps ...predicate.Entry) *EntryQuery {
eq.predicates = append(eq.predicates, ps...)
return eq
}
// Limit the number of records to be returned by this query.
func (eq *EntryQuery) Limit(limit int) *EntryQuery {
eq.ctx.Limit = &limit
return eq
}
// Offset to start from.
func (eq *EntryQuery) Offset(offset int) *EntryQuery {
eq.ctx.Offset = &offset
return eq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (eq *EntryQuery) Unique(unique bool) *EntryQuery {
eq.ctx.Unique = &unique
return eq
}
// Order specifies how the records should be ordered.
func (eq *EntryQuery) Order(o ...entry.OrderOption) *EntryQuery {
eq.order = append(eq.order, o...)
return eq
}
// First returns the first Entry entity from the query.
// Returns a *NotFoundError when no Entry was found.
func (eq *EntryQuery) First(ctx context.Context) (*Entry, error) {
nodes, err := eq.Limit(1).All(setContextOp(ctx, eq.ctx, "First"))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{entry.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (eq *EntryQuery) FirstX(ctx context.Context) *Entry {
node, err := eq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first Entry ID from the query.
// Returns a *NotFoundError when no Entry ID was found.
func (eq *EntryQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(1).IDs(setContextOp(ctx, eq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{entry.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (eq *EntryQuery) FirstIDX(ctx context.Context) int {
id, err := eq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single Entry entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one Entry entity is found.
// Returns a *NotFoundError when no Entry entities are found.
func (eq *EntryQuery) Only(ctx context.Context) (*Entry, error) {
nodes, err := eq.Limit(2).All(setContextOp(ctx, eq.ctx, "Only"))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{entry.Label}
default:
return nil, &NotSingularError{entry.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (eq *EntryQuery) OnlyX(ctx context.Context) *Entry {
node, err := eq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only Entry ID in the query.
// Returns a *NotSingularError when more than one Entry ID is found.
// Returns a *NotFoundError when no entities are found.
func (eq *EntryQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(2).IDs(setContextOp(ctx, eq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{entry.Label}
default:
err = &NotSingularError{entry.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (eq *EntryQuery) OnlyIDX(ctx context.Context) int {
id, err := eq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of Entries.
func (eq *EntryQuery) All(ctx context.Context) ([]*Entry, error) {
ctx = setContextOp(ctx, eq.ctx, "All")
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*Entry, *EntryQuery]()
return withInterceptors[[]*Entry](ctx, eq, qr, eq.inters)
}
// AllX is like All, but panics if an error occurs.
func (eq *EntryQuery) AllX(ctx context.Context) []*Entry {
nodes, err := eq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of Entry IDs.
func (eq *EntryQuery) IDs(ctx context.Context) (ids []int, err error) {
if eq.ctx.Unique == nil && eq.path != nil {
eq.Unique(true)
}
ctx = setContextOp(ctx, eq.ctx, "IDs")
if err = eq.Select(entry.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (eq *EntryQuery) IDsX(ctx context.Context) []int {
ids, err := eq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (eq *EntryQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, eq.ctx, "Count")
if err := eq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, eq, querierCount[*EntryQuery](), eq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (eq *EntryQuery) CountX(ctx context.Context) int {
count, err := eq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (eq *EntryQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, eq.ctx, "Exist")
switch _, err := eq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (eq *EntryQuery) ExistX(ctx context.Context) bool {
exist, err := eq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the EntryQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (eq *EntryQuery) Clone() *EntryQuery {
if eq == nil {
return nil
}
return &EntryQuery{
config: eq.config,
ctx: eq.ctx.Clone(),
order: append([]entry.OrderOption{}, eq.order...),
inters: append([]Interceptor{}, eq.inters...),
predicates: append([]predicate.Entry{}, eq.predicates...),
// clone intermediate query.
sql: eq.sql.Clone(),
path: eq.path,
}
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// Title string `json:"title,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.Entry.Query().
// GroupBy(entry.FieldTitle).
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (eq *EntryQuery) GroupBy(field string, fields ...string) *EntryGroupBy {
eq.ctx.Fields = append([]string{field}, fields...)
grbuild := &EntryGroupBy{build: eq}
grbuild.flds = &eq.ctx.Fields
grbuild.label = entry.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// Title string `json:"title,omitempty"`
// }
//
// client.Entry.Query().
// Select(entry.FieldTitle).
// Scan(ctx, &v)
func (eq *EntryQuery) Select(fields ...string) *EntrySelect {
eq.ctx.Fields = append(eq.ctx.Fields, fields...)
sbuild := &EntrySelect{EntryQuery: eq}
sbuild.label = entry.Label
sbuild.flds, sbuild.scan = &eq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a EntrySelect configured with the given aggregations.
func (eq *EntryQuery) Aggregate(fns ...AggregateFunc) *EntrySelect {
return eq.Select().Aggregate(fns...)
}
func (eq *EntryQuery) prepareQuery(ctx context.Context) error |
func (eq *EntryQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Entry, error) {
var (
nodes = []*Entry{}
_spec = eq.querySpec()
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*Entry).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &Entry{config: eq.config}
nodes = append(nodes, node)
return node.assignValues(columns, values)
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, eq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
return nodes, nil
}
func (eq *EntryQuery) sqlCount(ctx context.Context) (int, error) {
_spec := eq.querySpec()
_spec.Node.Columns = eq.ctx.Fields
if len(eq.ctx.Fields) > 0 {
_spec.Unique = eq.ctx.Unique != nil && *eq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, eq.driver, _spec)
}
func (eq *EntryQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(entry.Table, entry.Columns, sqlgraph.NewFieldSpec(entry.FieldID, field.TypeInt))
_spec.From = eq.sql
if unique := eq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if eq.path != nil {
_spec.Unique = true
}
if fields := eq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, entry.FieldID)
for i := range fields {
if fields[i] != entry.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
}
if ps := eq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := eq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := eq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := eq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (eq *EntryQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(eq.driver.Dialect())
t1 := builder.Table(entry.Table)
columns := eq.ctx.Fields
if len(columns) == 0 {
columns = entry.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if eq.sql != nil {
selector = eq.sql
selector.Select(selector.Columns(columns...)...)
}
if eq.ctx.Unique != nil && *eq.ctx.Unique {
selector.Distinct()
}
for _, p := range eq.predicates {
p(selector)
}
for _, p := range eq.order {
p(selector)
}
if offset := eq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := eq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// EntryGroupBy is the group-by builder for Entry entities.
type EntryGroupBy struct {
selector
build *EntryQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (egb *EntryGroupBy) Aggregate(fns ...AggregateFunc) *EntryGroupBy {
egb.fns = append(egb.fns, fns...)
return egb
}
// Scan applies the selector query and scans the result into the given value.
func (egb *EntryGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, egb.build.ctx, "GroupBy")
if err := egb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*EntryQuery, *EntryGroupBy](ctx, egb.build, egb, egb.build.inters, v)
}
func (egb *EntryGroupBy) sqlScan(ctx context.Context, root *EntryQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(egb.fns))
for _, fn := range egb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*egb.flds)+len(egb.fns))
for _, f := range *egb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*egb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := egb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// EntrySelect is the builder for selecting fields of Entry entities.
type EntrySelect struct {
*EntryQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (es *EntrySelect) Aggregate(fns ...AggregateFunc) *EntrySelect {
es.fns = append(es.fns, fns...)
return es
}
// Scan applies the selector query and scans the result into the given value.
func (es *EntrySelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, es.ctx, "Select")
if err := es.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*EntryQuery, *EntrySelect](ctx, es.EntryQuery, es, es.inters, v)
}
func (es *EntrySelect) sqlScan(ctx context.Context, root *EntryQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(es.fns))
for _, fn := range es.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*es.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := es.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
| {
for _, inter := range eq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, eq); err != nil {
return err
}
}
}
for _, f := range eq.ctx.Fields {
if !entry.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
}
if eq.path != nil {
prev, err := eq.path(ctx)
if err != nil {
return err
}
eq.sql = prev
}
return nil
} | identifier_body |
volumes.go | package scheduler
import (
"fmt"
"strings"
"github.com/moby/swarmkit/v2/api"
"github.com/moby/swarmkit/v2/manager/state/store"
)
// the scheduler package does double duty -- in addition to choosing nodes, it
// must also choose volumes. this is because volumes are fungible, and can be
// scheduled to several nodes, and used by several tasks. we should endeavor to
// spread tasks across volumes, like we spread nodes. on the positive side,
// unlike nodes, volumes are not heirarchical. that is, we don't need to
// spread across multiple levels of a tree, only a flat set.
// volumeSet is the set of all volumes currently managed
type volumeSet struct {
// volumes is a mapping of volume IDs to volumeInfo
volumes map[string]volumeInfo
// byGroup is a mapping from a volume group name to a set of volumes in
// that group
byGroup map[string]map[string]struct{}
// byName is a mapping of volume names to swarmkit volume IDs.
byName map[string]string
}
// volumeUsage contains information about the usage of a Volume by a specific
// task.
type volumeUsage struct {
nodeID string
readOnly bool
}
// volumeInfo contains scheduler information about a given volume
type volumeInfo struct {
volume *api.Volume
tasks map[string]volumeUsage
// nodes is a set of nodes a volume is in use on. it maps a node ID to a
// reference count for how many tasks are using the volume on that node.
nodes map[string]int
}
func | () *volumeSet {
return &volumeSet{
volumes: map[string]volumeInfo{},
byGroup: map[string]map[string]struct{}{},
byName: map[string]string{},
}
}
// getVolume returns the volume object for the given ID as stored in the
// volumeSet, or nil if none exists.
//
//nolint:unused // TODO(thaJeztah) this is currently unused: is it safe to remove?
func (vs *volumeSet) getVolume(id string) *api.Volume {
return vs.volumes[id].volume
}
func (vs *volumeSet) addOrUpdateVolume(v *api.Volume) {
if info, ok := vs.volumes[v.ID]; !ok {
vs.volumes[v.ID] = volumeInfo{
volume: v,
nodes: map[string]int{},
tasks: map[string]volumeUsage{},
}
} else {
// if the volume already exists in the set, then only update the volume
// object, not the tasks map.
info.volume = v
}
if set, ok := vs.byGroup[v.Spec.Group]; ok {
set[v.ID] = struct{}{}
} else {
vs.byGroup[v.Spec.Group] = map[string]struct{}{v.ID: {}}
}
vs.byName[v.Spec.Annotations.Name] = v.ID
}
//nolint:unused // only used in tests.
func (vs *volumeSet) removeVolume(volumeID string) {
if info, ok := vs.volumes[volumeID]; ok {
// if the volume exists in the set, look up its group ID and remove it
// from the byGroup mapping as well
group := info.volume.Spec.Group
delete(vs.byGroup[group], volumeID)
delete(vs.volumes, volumeID)
delete(vs.byName, info.volume.Spec.Annotations.Name)
}
}
// chooseTaskVolumes selects a set of VolumeAttachments for the task on the
// given node. it expects that the node was already validated to have the
// necessary volumes, but it will return an error if a full set of volumes is
// not available.
func (vs *volumeSet) chooseTaskVolumes(task *api.Task, nodeInfo *NodeInfo) ([]*api.VolumeAttachment, error) {
volumes := []*api.VolumeAttachment{}
// we'll reserve volumes in this loop, but release all of our reservations
// before we finish. the caller will need to call reserveTaskVolumes after
// calling this function
// TODO(dperny): this is probably not optimal
defer func() {
for _, volume := range volumes {
vs.releaseVolume(volume.ID, task.ID)
}
}()
// TODO(dperny): handle non-container tasks
c := task.Spec.GetContainer()
if c == nil {
return nil, nil
}
for _, mount := range task.Spec.GetContainer().Mounts {
if mount.Type == api.MountTypeCluster {
candidate := vs.isVolumeAvailableOnNode(&mount, nodeInfo)
if candidate == "" {
// TODO(dperny): return structured error types, instead of
// error strings
return nil, fmt.Errorf("cannot find volume to satisfy mount with source %v", mount.Source)
}
vs.reserveVolume(candidate, task.ID, nodeInfo.Node.ID, mount.ReadOnly)
volumes = append(volumes, &api.VolumeAttachment{
ID: candidate,
Source: mount.Source,
Target: mount.Target,
})
}
}
return volumes, nil
}
// reserveTaskVolumes identifies all volumes currently in use on a task and
// marks them in the volumeSet as in use.
func (vs *volumeSet) reserveTaskVolumes(task *api.Task) {
for _, va := range task.Volumes {
// we shouldn't need to handle non-container tasks because those tasks
// won't have any entries in task.Volumes.
for _, mount := range task.Spec.GetContainer().Mounts {
if mount.Source == va.Source && mount.Target == va.Target {
vs.reserveVolume(va.ID, task.ID, task.NodeID, mount.ReadOnly)
}
}
}
}
func (vs *volumeSet) reserveVolume(volumeID, taskID, nodeID string, readOnly bool) {
info, ok := vs.volumes[volumeID]
if !ok {
// TODO(dperny): don't just return nothing.
return
}
info.tasks[taskID] = volumeUsage{nodeID: nodeID, readOnly: readOnly}
// increment the reference count for this node.
info.nodes[nodeID] = info.nodes[nodeID] + 1
}
func (vs *volumeSet) releaseVolume(volumeID, taskID string) {
info, ok := vs.volumes[volumeID]
if !ok {
// if the volume isn't in the set, no action to take.
return
}
// decrement the reference count for this task's node
usage, ok := info.tasks[taskID]
if ok {
// this is probably an unnecessarily high level of caution, but make
// sure we don't go below zero on node count.
if c := info.nodes[usage.nodeID]; c > 0 {
info.nodes[usage.nodeID] = c - 1
}
delete(info.tasks, taskID)
}
}
// freeVolumes finds volumes that are no longer in use on some nodes, and
// updates them to be unpublished from those nodes.
//
// TODO(dperny): this is messy and has a lot of overhead. it should be reworked
// to something more streamlined.
func (vs *volumeSet) freeVolumes(batch *store.Batch) error {
for volumeID, info := range vs.volumes {
if err := batch.Update(func(tx store.Tx) error {
v := store.GetVolume(tx, volumeID)
if v == nil {
return nil
}
// when we are freeing a volume, we may update more than one of the
// volume's PublishStatuses. this means we can't simply put the
// Update call inside of the if statement; we need to know if we've
// changed anything once we've checked *all* of the statuses.
changed := false
for _, status := range v.PublishStatus {
if info.nodes[status.NodeID] == 0 && status.State == api.VolumePublishStatus_PUBLISHED {
status.State = api.VolumePublishStatus_PENDING_NODE_UNPUBLISH
changed = true
}
}
if changed {
if err := store.UpdateVolume(tx, v); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
}
return nil
}
// isVolumeAvailableOnNode checks if a volume satisfying the given mount is
// available on the given node.
//
// Returns the ID of the volume available, or an empty string if no such volume
// is found.
func (vs *volumeSet) isVolumeAvailableOnNode(mount *api.Mount, node *NodeInfo) string {
source := mount.Source
// first, discern whether we're looking for a group or a volume
// try trimming off the "group:" prefix. if the resulting string is
// different from the input string (meaning something has been trimmed),
// then this volume is actually a volume group.
if group := strings.TrimPrefix(source, "group:"); group != source {
ids, ok := vs.byGroup[group]
// if there are no volumes of this group specified, then no volume
// meets the moutn criteria.
if !ok {
return ""
}
// iterate through all ids in the group, checking if any one meets the
// spec.
for id := range ids {
if vs.checkVolume(id, node, mount.ReadOnly) {
return id
}
}
return ""
}
// if it's not a group, it's a name. resolve the volume name to its ID
id, ok := vs.byName[source]
if !ok || !vs.checkVolume(id, node, mount.ReadOnly) {
return ""
}
return id
}
// checkVolume checks if an individual volume with the given ID can be placed
// on the given node.
func (vs *volumeSet) checkVolume(id string, info *NodeInfo, readOnly bool) bool {
vi := vs.volumes[id]
// first, check if the volume's availability is even Active. If not. no
// reason to bother with anything further.
if vi.volume != nil && vi.volume.Spec.Availability != api.VolumeAvailabilityActive {
return false
}
// get the node topology for this volume
var top *api.Topology
// get the topology for this volume's driver on this node
for _, info := range info.Description.CSIInfo {
if info.PluginName == vi.volume.Spec.Driver.Name {
top = info.AccessibleTopology
break
}
}
// check if the volume is available on this node. a volume's
// availability on a node depends on its accessible topology, how it's
// already being used, and how this task intends to use it.
if vi.volume.Spec.AccessMode.Scope == api.VolumeScopeSingleNode {
// if the volume is not in use on this node already, then it can't
// be used here.
for _, usage := range vi.tasks {
if usage.nodeID != info.ID {
return false
}
}
}
// even if the volume is currently on this node, or it has multi-node
// access, the volume sharing needs to be compatible.
switch vi.volume.Spec.AccessMode.Sharing {
case api.VolumeSharingNone:
// if the volume sharing is none, then the volume cannot be
// used by another task
if len(vi.tasks) > 0 {
return false
}
case api.VolumeSharingOneWriter:
// if the mount is not ReadOnly, and the volume has a writer, then
// we this volume does not work.
if !readOnly && hasWriter(vi) {
return false
}
case api.VolumeSharingReadOnly:
// if the volume sharing is read-only, then the Mount must also
// be read-only
if !readOnly {
return false
}
}
// then, do the quick check of whether this volume is in the topology. if
// the volume has an AccessibleTopology, and it does not lie within the
// node's topology, then this volume won't fit.
return IsInTopology(top, vi.volume.VolumeInfo.AccessibleTopology)
}
// hasWriter is a helper function that returns true if at least one task is
// using this volume not in read-only mode.
func hasWriter(info volumeInfo) bool {
for _, usage := range info.tasks {
if !usage.readOnly {
return true
}
}
return false
}
| newVolumeSet | identifier_name |
volumes.go | package scheduler
import (
"fmt"
"strings"
"github.com/moby/swarmkit/v2/api"
"github.com/moby/swarmkit/v2/manager/state/store"
)
// the scheduler package does double duty -- in addition to choosing nodes, it
// must also choose volumes. this is because volumes are fungible, and can be
// scheduled to several nodes, and used by several tasks. we should endeavor to
// spread tasks across volumes, like we spread nodes. on the positive side,
// unlike nodes, volumes are not heirarchical. that is, we don't need to
// spread across multiple levels of a tree, only a flat set.
// volumeSet is the set of all volumes currently managed
type volumeSet struct {
// volumes is a mapping of volume IDs to volumeInfo
volumes map[string]volumeInfo
// byGroup is a mapping from a volume group name to a set of volumes in
// that group
byGroup map[string]map[string]struct{}
// byName is a mapping of volume names to swarmkit volume IDs.
byName map[string]string
}
// volumeUsage contains information about the usage of a Volume by a specific
// task.
type volumeUsage struct {
nodeID string
readOnly bool
}
// volumeInfo contains scheduler information about a given volume
type volumeInfo struct {
volume *api.Volume
tasks map[string]volumeUsage
// nodes is a set of nodes a volume is in use on. it maps a node ID to a
// reference count for how many tasks are using the volume on that node.
nodes map[string]int
}
func newVolumeSet() *volumeSet {
return &volumeSet{
volumes: map[string]volumeInfo{},
byGroup: map[string]map[string]struct{}{},
byName: map[string]string{},
}
}
// getVolume returns the volume object for the given ID as stored in the
// volumeSet, or nil if none exists.
//
//nolint:unused // TODO(thaJeztah) this is currently unused: is it safe to remove?
func (vs *volumeSet) getVolume(id string) *api.Volume {
return vs.volumes[id].volume
}
func (vs *volumeSet) addOrUpdateVolume(v *api.Volume) {
if info, ok := vs.volumes[v.ID]; !ok {
vs.volumes[v.ID] = volumeInfo{
volume: v,
nodes: map[string]int{},
tasks: map[string]volumeUsage{},
}
} else {
// if the volume already exists in the set, then only update the volume
// object, not the tasks map.
info.volume = v
}
if set, ok := vs.byGroup[v.Spec.Group]; ok {
set[v.ID] = struct{}{}
} else {
vs.byGroup[v.Spec.Group] = map[string]struct{}{v.ID: {}}
}
vs.byName[v.Spec.Annotations.Name] = v.ID
}
//nolint:unused // only used in tests.
func (vs *volumeSet) removeVolume(volumeID string) |
// chooseTaskVolumes selects a set of VolumeAttachments for the task on the
// given node. it expects that the node was already validated to have the
// necessary volumes, but it will return an error if a full set of volumes is
// not available.
func (vs *volumeSet) chooseTaskVolumes(task *api.Task, nodeInfo *NodeInfo) ([]*api.VolumeAttachment, error) {
volumes := []*api.VolumeAttachment{}
// we'll reserve volumes in this loop, but release all of our reservations
// before we finish. the caller will need to call reserveTaskVolumes after
// calling this function
// TODO(dperny): this is probably not optimal
defer func() {
for _, volume := range volumes {
vs.releaseVolume(volume.ID, task.ID)
}
}()
// TODO(dperny): handle non-container tasks
c := task.Spec.GetContainer()
if c == nil {
return nil, nil
}
for _, mount := range task.Spec.GetContainer().Mounts {
if mount.Type == api.MountTypeCluster {
candidate := vs.isVolumeAvailableOnNode(&mount, nodeInfo)
if candidate == "" {
// TODO(dperny): return structured error types, instead of
// error strings
return nil, fmt.Errorf("cannot find volume to satisfy mount with source %v", mount.Source)
}
vs.reserveVolume(candidate, task.ID, nodeInfo.Node.ID, mount.ReadOnly)
volumes = append(volumes, &api.VolumeAttachment{
ID: candidate,
Source: mount.Source,
Target: mount.Target,
})
}
}
return volumes, nil
}
// reserveTaskVolumes identifies all volumes currently in use on a task and
// marks them in the volumeSet as in use.
func (vs *volumeSet) reserveTaskVolumes(task *api.Task) {
for _, va := range task.Volumes {
// we shouldn't need to handle non-container tasks because those tasks
// won't have any entries in task.Volumes.
for _, mount := range task.Spec.GetContainer().Mounts {
if mount.Source == va.Source && mount.Target == va.Target {
vs.reserveVolume(va.ID, task.ID, task.NodeID, mount.ReadOnly)
}
}
}
}
func (vs *volumeSet) reserveVolume(volumeID, taskID, nodeID string, readOnly bool) {
info, ok := vs.volumes[volumeID]
if !ok {
// TODO(dperny): don't just return nothing.
return
}
info.tasks[taskID] = volumeUsage{nodeID: nodeID, readOnly: readOnly}
// increment the reference count for this node.
info.nodes[nodeID] = info.nodes[nodeID] + 1
}
func (vs *volumeSet) releaseVolume(volumeID, taskID string) {
info, ok := vs.volumes[volumeID]
if !ok {
// if the volume isn't in the set, no action to take.
return
}
// decrement the reference count for this task's node
usage, ok := info.tasks[taskID]
if ok {
// this is probably an unnecessarily high level of caution, but make
// sure we don't go below zero on node count.
if c := info.nodes[usage.nodeID]; c > 0 {
info.nodes[usage.nodeID] = c - 1
}
delete(info.tasks, taskID)
}
}
// freeVolumes finds volumes that are no longer in use on some nodes, and
// updates them to be unpublished from those nodes.
//
// TODO(dperny): this is messy and has a lot of overhead. it should be reworked
// to something more streamlined.
func (vs *volumeSet) freeVolumes(batch *store.Batch) error {
for volumeID, info := range vs.volumes {
if err := batch.Update(func(tx store.Tx) error {
v := store.GetVolume(tx, volumeID)
if v == nil {
return nil
}
// when we are freeing a volume, we may update more than one of the
// volume's PublishStatuses. this means we can't simply put the
// Update call inside of the if statement; we need to know if we've
// changed anything once we've checked *all* of the statuses.
changed := false
for _, status := range v.PublishStatus {
if info.nodes[status.NodeID] == 0 && status.State == api.VolumePublishStatus_PUBLISHED {
status.State = api.VolumePublishStatus_PENDING_NODE_UNPUBLISH
changed = true
}
}
if changed {
if err := store.UpdateVolume(tx, v); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
}
return nil
}
// isVolumeAvailableOnNode checks if a volume satisfying the given mount is
// available on the given node.
//
// Returns the ID of the volume available, or an empty string if no such volume
// is found.
func (vs *volumeSet) isVolumeAvailableOnNode(mount *api.Mount, node *NodeInfo) string {
source := mount.Source
// first, discern whether we're looking for a group or a volume
// try trimming off the "group:" prefix. if the resulting string is
// different from the input string (meaning something has been trimmed),
// then this volume is actually a volume group.
if group := strings.TrimPrefix(source, "group:"); group != source {
ids, ok := vs.byGroup[group]
// if there are no volumes of this group specified, then no volume
// meets the moutn criteria.
if !ok {
return ""
}
// iterate through all ids in the group, checking if any one meets the
// spec.
for id := range ids {
if vs.checkVolume(id, node, mount.ReadOnly) {
return id
}
}
return ""
}
// if it's not a group, it's a name. resolve the volume name to its ID
id, ok := vs.byName[source]
if !ok || !vs.checkVolume(id, node, mount.ReadOnly) {
return ""
}
return id
}
// checkVolume checks if an individual volume with the given ID can be placed
// on the given node.
func (vs *volumeSet) checkVolume(id string, info *NodeInfo, readOnly bool) bool {
vi := vs.volumes[id]
// first, check if the volume's availability is even Active. If not. no
// reason to bother with anything further.
if vi.volume != nil && vi.volume.Spec.Availability != api.VolumeAvailabilityActive {
return false
}
// get the node topology for this volume
var top *api.Topology
// get the topology for this volume's driver on this node
for _, info := range info.Description.CSIInfo {
if info.PluginName == vi.volume.Spec.Driver.Name {
top = info.AccessibleTopology
break
}
}
// check if the volume is available on this node. a volume's
// availability on a node depends on its accessible topology, how it's
// already being used, and how this task intends to use it.
if vi.volume.Spec.AccessMode.Scope == api.VolumeScopeSingleNode {
// if the volume is not in use on this node already, then it can't
// be used here.
for _, usage := range vi.tasks {
if usage.nodeID != info.ID {
return false
}
}
}
// even if the volume is currently on this node, or it has multi-node
// access, the volume sharing needs to be compatible.
switch vi.volume.Spec.AccessMode.Sharing {
case api.VolumeSharingNone:
// if the volume sharing is none, then the volume cannot be
// used by another task
if len(vi.tasks) > 0 {
return false
}
case api.VolumeSharingOneWriter:
// if the mount is not ReadOnly, and the volume has a writer, then
// we this volume does not work.
if !readOnly && hasWriter(vi) {
return false
}
case api.VolumeSharingReadOnly:
// if the volume sharing is read-only, then the Mount must also
// be read-only
if !readOnly {
return false
}
}
// then, do the quick check of whether this volume is in the topology. if
// the volume has an AccessibleTopology, and it does not lie within the
// node's topology, then this volume won't fit.
return IsInTopology(top, vi.volume.VolumeInfo.AccessibleTopology)
}
// hasWriter is a helper function that returns true if at least one task is
// using this volume not in read-only mode.
func hasWriter(info volumeInfo) bool {
for _, usage := range info.tasks {
if !usage.readOnly {
return true
}
}
return false
}
| {
if info, ok := vs.volumes[volumeID]; ok {
// if the volume exists in the set, look up its group ID and remove it
// from the byGroup mapping as well
group := info.volume.Spec.Group
delete(vs.byGroup[group], volumeID)
delete(vs.volumes, volumeID)
delete(vs.byName, info.volume.Spec.Annotations.Name)
}
} | identifier_body |
volumes.go | package scheduler
import (
"fmt"
"strings"
| "github.com/moby/swarmkit/v2/api"
"github.com/moby/swarmkit/v2/manager/state/store"
)
// the scheduler package does double duty -- in addition to choosing nodes, it
// must also choose volumes. this is because volumes are fungible, and can be
// scheduled to several nodes, and used by several tasks. we should endeavor to
// spread tasks across volumes, like we spread nodes. on the positive side,
// unlike nodes, volumes are not heirarchical. that is, we don't need to
// spread across multiple levels of a tree, only a flat set.
// volumeSet is the set of all volumes currently managed
type volumeSet struct {
// volumes is a mapping of volume IDs to volumeInfo
volumes map[string]volumeInfo
// byGroup is a mapping from a volume group name to a set of volumes in
// that group
byGroup map[string]map[string]struct{}
// byName is a mapping of volume names to swarmkit volume IDs.
byName map[string]string
}
// volumeUsage contains information about the usage of a Volume by a specific
// task.
type volumeUsage struct {
nodeID string
readOnly bool
}
// volumeInfo contains scheduler information about a given volume
type volumeInfo struct {
volume *api.Volume
tasks map[string]volumeUsage
// nodes is a set of nodes a volume is in use on. it maps a node ID to a
// reference count for how many tasks are using the volume on that node.
nodes map[string]int
}
func newVolumeSet() *volumeSet {
return &volumeSet{
volumes: map[string]volumeInfo{},
byGroup: map[string]map[string]struct{}{},
byName: map[string]string{},
}
}
// getVolume returns the volume object for the given ID as stored in the
// volumeSet, or nil if none exists.
//
//nolint:unused // TODO(thaJeztah) this is currently unused: is it safe to remove?
func (vs *volumeSet) getVolume(id string) *api.Volume {
return vs.volumes[id].volume
}
func (vs *volumeSet) addOrUpdateVolume(v *api.Volume) {
if info, ok := vs.volumes[v.ID]; !ok {
vs.volumes[v.ID] = volumeInfo{
volume: v,
nodes: map[string]int{},
tasks: map[string]volumeUsage{},
}
} else {
// if the volume already exists in the set, then only update the volume
// object, not the tasks map.
info.volume = v
}
if set, ok := vs.byGroup[v.Spec.Group]; ok {
set[v.ID] = struct{}{}
} else {
vs.byGroup[v.Spec.Group] = map[string]struct{}{v.ID: {}}
}
vs.byName[v.Spec.Annotations.Name] = v.ID
}
//nolint:unused // only used in tests.
func (vs *volumeSet) removeVolume(volumeID string) {
if info, ok := vs.volumes[volumeID]; ok {
// if the volume exists in the set, look up its group ID and remove it
// from the byGroup mapping as well
group := info.volume.Spec.Group
delete(vs.byGroup[group], volumeID)
delete(vs.volumes, volumeID)
delete(vs.byName, info.volume.Spec.Annotations.Name)
}
}
// chooseTaskVolumes selects a set of VolumeAttachments for the task on the
// given node. it expects that the node was already validated to have the
// necessary volumes, but it will return an error if a full set of volumes is
// not available.
func (vs *volumeSet) chooseTaskVolumes(task *api.Task, nodeInfo *NodeInfo) ([]*api.VolumeAttachment, error) {
volumes := []*api.VolumeAttachment{}
// we'll reserve volumes in this loop, but release all of our reservations
// before we finish. the caller will need to call reserveTaskVolumes after
// calling this function
// TODO(dperny): this is probably not optimal
defer func() {
for _, volume := range volumes {
vs.releaseVolume(volume.ID, task.ID)
}
}()
// TODO(dperny): handle non-container tasks
c := task.Spec.GetContainer()
if c == nil {
return nil, nil
}
for _, mount := range task.Spec.GetContainer().Mounts {
if mount.Type == api.MountTypeCluster {
candidate := vs.isVolumeAvailableOnNode(&mount, nodeInfo)
if candidate == "" {
// TODO(dperny): return structured error types, instead of
// error strings
return nil, fmt.Errorf("cannot find volume to satisfy mount with source %v", mount.Source)
}
vs.reserveVolume(candidate, task.ID, nodeInfo.Node.ID, mount.ReadOnly)
volumes = append(volumes, &api.VolumeAttachment{
ID: candidate,
Source: mount.Source,
Target: mount.Target,
})
}
}
return volumes, nil
}
// reserveTaskVolumes identifies all volumes currently in use on a task and
// marks them in the volumeSet as in use.
func (vs *volumeSet) reserveTaskVolumes(task *api.Task) {
for _, va := range task.Volumes {
// we shouldn't need to handle non-container tasks because those tasks
// won't have any entries in task.Volumes.
for _, mount := range task.Spec.GetContainer().Mounts {
if mount.Source == va.Source && mount.Target == va.Target {
vs.reserveVolume(va.ID, task.ID, task.NodeID, mount.ReadOnly)
}
}
}
}
func (vs *volumeSet) reserveVolume(volumeID, taskID, nodeID string, readOnly bool) {
info, ok := vs.volumes[volumeID]
if !ok {
// TODO(dperny): don't just return nothing.
return
}
info.tasks[taskID] = volumeUsage{nodeID: nodeID, readOnly: readOnly}
// increment the reference count for this node.
info.nodes[nodeID] = info.nodes[nodeID] + 1
}
func (vs *volumeSet) releaseVolume(volumeID, taskID string) {
info, ok := vs.volumes[volumeID]
if !ok {
// if the volume isn't in the set, no action to take.
return
}
// decrement the reference count for this task's node
usage, ok := info.tasks[taskID]
if ok {
// this is probably an unnecessarily high level of caution, but make
// sure we don't go below zero on node count.
if c := info.nodes[usage.nodeID]; c > 0 {
info.nodes[usage.nodeID] = c - 1
}
delete(info.tasks, taskID)
}
}
// freeVolumes finds volumes that are no longer in use on some nodes, and
// updates them to be unpublished from those nodes.
//
// TODO(dperny): this is messy and has a lot of overhead. it should be reworked
// to something more streamlined.
func (vs *volumeSet) freeVolumes(batch *store.Batch) error {
for volumeID, info := range vs.volumes {
if err := batch.Update(func(tx store.Tx) error {
v := store.GetVolume(tx, volumeID)
if v == nil {
return nil
}
// when we are freeing a volume, we may update more than one of the
// volume's PublishStatuses. this means we can't simply put the
// Update call inside of the if statement; we need to know if we've
// changed anything once we've checked *all* of the statuses.
changed := false
for _, status := range v.PublishStatus {
if info.nodes[status.NodeID] == 0 && status.State == api.VolumePublishStatus_PUBLISHED {
status.State = api.VolumePublishStatus_PENDING_NODE_UNPUBLISH
changed = true
}
}
if changed {
if err := store.UpdateVolume(tx, v); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
}
return nil
}
// isVolumeAvailableOnNode checks if a volume satisfying the given mount is
// available on the given node.
//
// Returns the ID of the volume available, or an empty string if no such volume
// is found.
func (vs *volumeSet) isVolumeAvailableOnNode(mount *api.Mount, node *NodeInfo) string {
source := mount.Source
// first, discern whether we're looking for a group or a volume
// try trimming off the "group:" prefix. if the resulting string is
// different from the input string (meaning something has been trimmed),
// then this volume is actually a volume group.
if group := strings.TrimPrefix(source, "group:"); group != source {
ids, ok := vs.byGroup[group]
// if there are no volumes of this group specified, then no volume
// meets the moutn criteria.
if !ok {
return ""
}
// iterate through all ids in the group, checking if any one meets the
// spec.
for id := range ids {
if vs.checkVolume(id, node, mount.ReadOnly) {
return id
}
}
return ""
}
// if it's not a group, it's a name. resolve the volume name to its ID
id, ok := vs.byName[source]
if !ok || !vs.checkVolume(id, node, mount.ReadOnly) {
return ""
}
return id
}
// checkVolume checks if an individual volume with the given ID can be placed
// on the given node.
func (vs *volumeSet) checkVolume(id string, info *NodeInfo, readOnly bool) bool {
vi := vs.volumes[id]
// first, check if the volume's availability is even Active. If not. no
// reason to bother with anything further.
if vi.volume != nil && vi.volume.Spec.Availability != api.VolumeAvailabilityActive {
return false
}
// get the node topology for this volume
var top *api.Topology
// get the topology for this volume's driver on this node
for _, info := range info.Description.CSIInfo {
if info.PluginName == vi.volume.Spec.Driver.Name {
top = info.AccessibleTopology
break
}
}
// check if the volume is available on this node. a volume's
// availability on a node depends on its accessible topology, how it's
// already being used, and how this task intends to use it.
if vi.volume.Spec.AccessMode.Scope == api.VolumeScopeSingleNode {
// if the volume is not in use on this node already, then it can't
// be used here.
for _, usage := range vi.tasks {
if usage.nodeID != info.ID {
return false
}
}
}
// even if the volume is currently on this node, or it has multi-node
// access, the volume sharing needs to be compatible.
switch vi.volume.Spec.AccessMode.Sharing {
case api.VolumeSharingNone:
// if the volume sharing is none, then the volume cannot be
// used by another task
if len(vi.tasks) > 0 {
return false
}
case api.VolumeSharingOneWriter:
// if the mount is not ReadOnly, and the volume has a writer, then
// we this volume does not work.
if !readOnly && hasWriter(vi) {
return false
}
case api.VolumeSharingReadOnly:
// if the volume sharing is read-only, then the Mount must also
// be read-only
if !readOnly {
return false
}
}
// then, do the quick check of whether this volume is in the topology. if
// the volume has an AccessibleTopology, and it does not lie within the
// node's topology, then this volume won't fit.
return IsInTopology(top, vi.volume.VolumeInfo.AccessibleTopology)
}
// hasWriter is a helper function that returns true if at least one task is
// using this volume not in read-only mode.
func hasWriter(info volumeInfo) bool {
for _, usage := range info.tasks {
if !usage.readOnly {
return true
}
}
return false
} | random_line_split | |
volumes.go | package scheduler
import (
"fmt"
"strings"
"github.com/moby/swarmkit/v2/api"
"github.com/moby/swarmkit/v2/manager/state/store"
)
// the scheduler package does double duty -- in addition to choosing nodes, it
// must also choose volumes. this is because volumes are fungible, and can be
// scheduled to several nodes, and used by several tasks. we should endeavor to
// spread tasks across volumes, like we spread nodes. on the positive side,
// unlike nodes, volumes are not heirarchical. that is, we don't need to
// spread across multiple levels of a tree, only a flat set.
// volumeSet is the set of all volumes currently managed
type volumeSet struct {
// volumes is a mapping of volume IDs to volumeInfo
volumes map[string]volumeInfo
// byGroup is a mapping from a volume group name to a set of volumes in
// that group
byGroup map[string]map[string]struct{}
// byName is a mapping of volume names to swarmkit volume IDs.
byName map[string]string
}
// volumeUsage contains information about the usage of a Volume by a specific
// task.
type volumeUsage struct {
nodeID string
readOnly bool
}
// volumeInfo contains scheduler information about a given volume
type volumeInfo struct {
volume *api.Volume
tasks map[string]volumeUsage
// nodes is a set of nodes a volume is in use on. it maps a node ID to a
// reference count for how many tasks are using the volume on that node.
nodes map[string]int
}
func newVolumeSet() *volumeSet {
return &volumeSet{
volumes: map[string]volumeInfo{},
byGroup: map[string]map[string]struct{}{},
byName: map[string]string{},
}
}
// getVolume returns the volume object for the given ID as stored in the
// volumeSet, or nil if none exists.
//
//nolint:unused // TODO(thaJeztah) this is currently unused: is it safe to remove?
func (vs *volumeSet) getVolume(id string) *api.Volume {
return vs.volumes[id].volume
}
func (vs *volumeSet) addOrUpdateVolume(v *api.Volume) {
if info, ok := vs.volumes[v.ID]; !ok {
vs.volumes[v.ID] = volumeInfo{
volume: v,
nodes: map[string]int{},
tasks: map[string]volumeUsage{},
}
} else {
// if the volume already exists in the set, then only update the volume
// object, not the tasks map.
info.volume = v
}
if set, ok := vs.byGroup[v.Spec.Group]; ok {
set[v.ID] = struct{}{}
} else {
vs.byGroup[v.Spec.Group] = map[string]struct{}{v.ID: {}}
}
vs.byName[v.Spec.Annotations.Name] = v.ID
}
//nolint:unused // only used in tests.
func (vs *volumeSet) removeVolume(volumeID string) {
if info, ok := vs.volumes[volumeID]; ok {
// if the volume exists in the set, look up its group ID and remove it
// from the byGroup mapping as well
group := info.volume.Spec.Group
delete(vs.byGroup[group], volumeID)
delete(vs.volumes, volumeID)
delete(vs.byName, info.volume.Spec.Annotations.Name)
}
}
// chooseTaskVolumes selects a set of VolumeAttachments for the task on the
// given node. it expects that the node was already validated to have the
// necessary volumes, but it will return an error if a full set of volumes is
// not available.
func (vs *volumeSet) chooseTaskVolumes(task *api.Task, nodeInfo *NodeInfo) ([]*api.VolumeAttachment, error) {
volumes := []*api.VolumeAttachment{}
// we'll reserve volumes in this loop, but release all of our reservations
// before we finish. the caller will need to call reserveTaskVolumes after
// calling this function
// TODO(dperny): this is probably not optimal
defer func() {
for _, volume := range volumes {
vs.releaseVolume(volume.ID, task.ID)
}
}()
// TODO(dperny): handle non-container tasks
c := task.Spec.GetContainer()
if c == nil {
return nil, nil
}
for _, mount := range task.Spec.GetContainer().Mounts {
if mount.Type == api.MountTypeCluster {
candidate := vs.isVolumeAvailableOnNode(&mount, nodeInfo)
if candidate == "" {
// TODO(dperny): return structured error types, instead of
// error strings
return nil, fmt.Errorf("cannot find volume to satisfy mount with source %v", mount.Source)
}
vs.reserveVolume(candidate, task.ID, nodeInfo.Node.ID, mount.ReadOnly)
volumes = append(volumes, &api.VolumeAttachment{
ID: candidate,
Source: mount.Source,
Target: mount.Target,
})
}
}
return volumes, nil
}
// reserveTaskVolumes identifies all volumes currently in use on a task and
// marks them in the volumeSet as in use.
func (vs *volumeSet) reserveTaskVolumes(task *api.Task) {
for _, va := range task.Volumes {
// we shouldn't need to handle non-container tasks because those tasks
// won't have any entries in task.Volumes.
for _, mount := range task.Spec.GetContainer().Mounts |
}
}
func (vs *volumeSet) reserveVolume(volumeID, taskID, nodeID string, readOnly bool) {
info, ok := vs.volumes[volumeID]
if !ok {
// TODO(dperny): don't just return nothing.
return
}
info.tasks[taskID] = volumeUsage{nodeID: nodeID, readOnly: readOnly}
// increment the reference count for this node.
info.nodes[nodeID] = info.nodes[nodeID] + 1
}
func (vs *volumeSet) releaseVolume(volumeID, taskID string) {
info, ok := vs.volumes[volumeID]
if !ok {
// if the volume isn't in the set, no action to take.
return
}
// decrement the reference count for this task's node
usage, ok := info.tasks[taskID]
if ok {
// this is probably an unnecessarily high level of caution, but make
// sure we don't go below zero on node count.
if c := info.nodes[usage.nodeID]; c > 0 {
info.nodes[usage.nodeID] = c - 1
}
delete(info.tasks, taskID)
}
}
// freeVolumes finds volumes that are no longer in use on some nodes, and
// updates them to be unpublished from those nodes.
//
// TODO(dperny): this is messy and has a lot of overhead. it should be reworked
// to something more streamlined.
func (vs *volumeSet) freeVolumes(batch *store.Batch) error {
for volumeID, info := range vs.volumes {
if err := batch.Update(func(tx store.Tx) error {
v := store.GetVolume(tx, volumeID)
if v == nil {
return nil
}
// when we are freeing a volume, we may update more than one of the
// volume's PublishStatuses. this means we can't simply put the
// Update call inside of the if statement; we need to know if we've
// changed anything once we've checked *all* of the statuses.
changed := false
for _, status := range v.PublishStatus {
if info.nodes[status.NodeID] == 0 && status.State == api.VolumePublishStatus_PUBLISHED {
status.State = api.VolumePublishStatus_PENDING_NODE_UNPUBLISH
changed = true
}
}
if changed {
if err := store.UpdateVolume(tx, v); err != nil {
return err
}
}
return nil
}); err != nil {
return err
}
}
return nil
}
// isVolumeAvailableOnNode checks if a volume satisfying the given mount is
// available on the given node.
//
// Returns the ID of the volume available, or an empty string if no such volume
// is found.
func (vs *volumeSet) isVolumeAvailableOnNode(mount *api.Mount, node *NodeInfo) string {
source := mount.Source
// first, discern whether we're looking for a group or a volume
// try trimming off the "group:" prefix. if the resulting string is
// different from the input string (meaning something has been trimmed),
// then this volume is actually a volume group.
if group := strings.TrimPrefix(source, "group:"); group != source {
ids, ok := vs.byGroup[group]
// if there are no volumes of this group specified, then no volume
// meets the moutn criteria.
if !ok {
return ""
}
// iterate through all ids in the group, checking if any one meets the
// spec.
for id := range ids {
if vs.checkVolume(id, node, mount.ReadOnly) {
return id
}
}
return ""
}
// if it's not a group, it's a name. resolve the volume name to its ID
id, ok := vs.byName[source]
if !ok || !vs.checkVolume(id, node, mount.ReadOnly) {
return ""
}
return id
}
// checkVolume checks if an individual volume with the given ID can be placed
// on the given node.
func (vs *volumeSet) checkVolume(id string, info *NodeInfo, readOnly bool) bool {
vi := vs.volumes[id]
// first, check if the volume's availability is even Active. If not. no
// reason to bother with anything further.
if vi.volume != nil && vi.volume.Spec.Availability != api.VolumeAvailabilityActive {
return false
}
// get the node topology for this volume
var top *api.Topology
// get the topology for this volume's driver on this node
for _, info := range info.Description.CSIInfo {
if info.PluginName == vi.volume.Spec.Driver.Name {
top = info.AccessibleTopology
break
}
}
// check if the volume is available on this node. a volume's
// availability on a node depends on its accessible topology, how it's
// already being used, and how this task intends to use it.
if vi.volume.Spec.AccessMode.Scope == api.VolumeScopeSingleNode {
// if the volume is not in use on this node already, then it can't
// be used here.
for _, usage := range vi.tasks {
if usage.nodeID != info.ID {
return false
}
}
}
// even if the volume is currently on this node, or it has multi-node
// access, the volume sharing needs to be compatible.
switch vi.volume.Spec.AccessMode.Sharing {
case api.VolumeSharingNone:
// if the volume sharing is none, then the volume cannot be
// used by another task
if len(vi.tasks) > 0 {
return false
}
case api.VolumeSharingOneWriter:
// if the mount is not ReadOnly, and the volume has a writer, then
// we this volume does not work.
if !readOnly && hasWriter(vi) {
return false
}
case api.VolumeSharingReadOnly:
// if the volume sharing is read-only, then the Mount must also
// be read-only
if !readOnly {
return false
}
}
// then, do the quick check of whether this volume is in the topology. if
// the volume has an AccessibleTopology, and it does not lie within the
// node's topology, then this volume won't fit.
return IsInTopology(top, vi.volume.VolumeInfo.AccessibleTopology)
}
// hasWriter is a helper function that returns true if at least one task is
// using this volume not in read-only mode.
func hasWriter(info volumeInfo) bool {
for _, usage := range info.tasks {
if !usage.readOnly {
return true
}
}
return false
}
| {
if mount.Source == va.Source && mount.Target == va.Target {
vs.reserveVolume(va.ID, task.ID, task.NodeID, mount.ReadOnly)
}
} | conditional_block |
handlers.go | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"bufio"
"encoding/json"
"fmt"
"net"
"net/http"
"regexp"
"runtime/debug"
"strings"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/auth/authorizer"
"k8s.io/kubernetes/pkg/httplog"
"k8s.io/kubernetes/pkg/util/sets"
)
// specialVerbs contains just strings which are used in REST paths for special actions that don't fall under the normal
// CRUDdy GET/POST/PUT/DELETE actions on REST objects.
// TODO: find a way to keep this up to date automatically. Maybe dynamically populate list as handlers added to
// master's Mux.
var specialVerbs = sets.NewString("proxy", "redirect", "watch")
// specialVerbsNoSubresources contains root verbs which do not allow subresources
var specialVerbsNoSubresources = sets.NewString("proxy", "redirect")
// Constant for the retry-after interval on rate limiting.
// TODO: maybe make this dynamic? or user-adjustable?
const RetryAfter = "1"
// IsReadOnlyReq() is true for any (or at least many) request which has no observable
// side effects on state of apiserver (though there may be internal side effects like
// caching and logging).
func IsReadOnlyReq(req http.Request) bool {
if req.Method == "GET" {
// TODO: add OPTIONS and HEAD if we ever support those.
return true
}
return false
}
// ReadOnly passes all GET requests on to handler, and returns an error on all other requests.
func ReadOnly(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if IsReadOnlyReq(*req) {
handler.ServeHTTP(w, req)
return
}
w.WriteHeader(http.StatusForbidden)
fmt.Fprintf(w, "This is a read-only endpoint.")
})
}
type LongRunningRequestCheck func(r *http.Request) bool
// BasicLongRunningRequestCheck pathRegex operates against the url path, the queryParams match is case insensitive.
// Any one match flags the request.
// TODO tighten this check to eliminate the abuse potential by malicious clients that start setting queryParameters
// to bypass the rate limitter. This could be done using a full parse and special casing the bits we need.
func BasicLongRunningRequestCheck(pathRegex *regexp.Regexp, queryParams map[string]string) LongRunningRequestCheck {
return func(r *http.Request) bool {
if pathRegex.MatchString(r.URL.Path) {
return true
}
for key, expectedValue := range queryParams {
if strings.ToLower(expectedValue) == strings.ToLower(r.URL.Query().Get(key)) {
return true
}
}
return false
}
}
// MaxInFlight limits the number of in-flight requests to buffer size of the passed in channel.
func MaxInFlightLimit(c chan bool, longRunningRequestCheck LongRunningRequestCheck, handler http.Handler) http.Handler {
if c == nil {
return handler
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if longRunningRequestCheck(r) {
// Skip tracking long running events.
handler.ServeHTTP(w, r)
return
}
select {
case c <- true:
defer func() { <-c }()
handler.ServeHTTP(w, r)
default:
tooManyRequests(w)
}
})
}
func tooManyRequests(w http.ResponseWriter) {
// Return a 429 status indicating "Too Many Requests"
w.Header().Set("Retry-After", RetryAfter)
http.Error(w, "Too many requests, please try again later.", errors.StatusTooManyRequests)
}
// RecoverPanics wraps an http Handler to recover and log panics.
func RecoverPanics(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
defer func() {
if x := recover(); x != nil {
http.Error(w, "apis panic. Look in log for details.", http.StatusInternalServerError)
glog.Errorf("APIServer panic'd on %v %v: %v\n%s\n", req.Method, req.RequestURI, x, debug.Stack())
}
}()
defer httplog.NewLogged(req, &w).StacktraceWhen(
httplog.StatusIsNot(
http.StatusOK,
http.StatusCreated,
http.StatusAccepted,
http.StatusBadRequest,
http.StatusMovedPermanently,
http.StatusTemporaryRedirect,
http.StatusConflict,
http.StatusNotFound,
http.StatusUnauthorized,
http.StatusForbidden,
errors.StatusUnprocessableEntity,
http.StatusSwitchingProtocols,
),
).Log()
// Dispatch to the internal handler
handler.ServeHTTP(w, req)
})
}
// TimeoutHandler returns an http.Handler that runs h with a timeout
// determined by timeoutFunc. The new http.Handler calls h.ServeHTTP to handle
// each request, but if a call runs for longer than its time limit, the
// handler responds with a 503 Service Unavailable error and the message
// provided. (If msg is empty, a suitable default message with be sent.) After
// the handler times out, writes by h to its http.ResponseWriter will return
// http.ErrHandlerTimeout. If timeoutFunc returns a nil timeout channel, no
// timeout will be enforced.
func TimeoutHandler(h http.Handler, timeoutFunc func(*http.Request) (timeout <-chan time.Time, msg string)) http.Handler {
return &timeoutHandler{h, timeoutFunc}
}
type timeoutHandler struct {
handler http.Handler
timeout func(*http.Request) (<-chan time.Time, string)
}
func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
after, msg := t.timeout(r)
if after == nil {
t.handler.ServeHTTP(w, r)
return
}
done := make(chan struct{}, 1)
tw := newTimeoutWriter(w)
go func() {
t.handler.ServeHTTP(tw, r)
done <- struct{}{}
}()
select {
case <-done:
return
case <-after:
tw.timeout(msg)
}
}
type timeoutWriter interface {
http.ResponseWriter
timeout(string)
}
func newTimeoutWriter(w http.ResponseWriter) timeoutWriter {
base := &baseTimeoutWriter{w: w}
_, notifiable := w.(http.CloseNotifier)
_, hijackable := w.(http.Hijacker)
switch {
case notifiable && hijackable:
return &closeHijackTimeoutWriter{base}
case notifiable:
return &closeTimeoutWriter{base}
case hijackable:
return &hijackTimeoutWriter{base}
default:
return base
}
}
type baseTimeoutWriter struct {
w http.ResponseWriter
mu sync.Mutex
timedOut bool
wroteHeader bool
hijacked bool
}
func (tw *baseTimeoutWriter) Header() http.Header {
return tw.w.Header()
}
func (tw *baseTimeoutWriter) Write(p []byte) (int, error) {
tw.mu.Lock()
defer tw.mu.Unlock()
tw.wroteHeader = true
if tw.hijacked {
return 0, http.ErrHijacked
}
if tw.timedOut {
return 0, http.ErrHandlerTimeout
}
return tw.w.Write(p)
}
func (tw *baseTimeoutWriter) Flush() {
tw.mu.Lock()
defer tw.mu.Unlock()
if flusher, ok := tw.w.(http.Flusher); ok {
flusher.Flush()
}
}
func (tw *baseTimeoutWriter) WriteHeader(code int) {
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut || tw.wroteHeader || tw.hijacked {
return
}
tw.wroteHeader = true
tw.w.WriteHeader(code)
}
func (tw *baseTimeoutWriter) timeout(msg string) {
tw.mu.Lock()
defer tw.mu.Unlock()
if !tw.wroteHeader && !tw.hijacked {
tw.w.WriteHeader(http.StatusGatewayTimeout)
if msg != "" {
tw.w.Write([]byte(msg))
} else {
enc := json.NewEncoder(tw.w)
enc.Encode(errors.NewServerTimeout(api.Resource(""), "", 0))
}
}
tw.timedOut = true
}
func (tw *baseTimeoutWriter) closeNotify() <-chan bool {
return tw.w.(http.CloseNotifier).CloseNotify()
}
func (tw *baseTimeoutWriter) hijack() (net.Conn, *bufio.ReadWriter, error) {
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut {
return nil, nil, http.ErrHandlerTimeout
}
conn, rw, err := tw.w.(http.Hijacker).Hijack()
if err == nil {
tw.hijacked = true
}
return conn, rw, err
}
type closeTimeoutWriter struct {
*baseTimeoutWriter
}
func (tw *closeTimeoutWriter) CloseNotify() <-chan bool {
return tw.closeNotify()
}
type hijackTimeoutWriter struct {
*baseTimeoutWriter
}
func (tw *hijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return tw.hijack()
}
type closeHijackTimeoutWriter struct {
*baseTimeoutWriter
}
func (tw *closeHijackTimeoutWriter) CloseNotify() <-chan bool {
return tw.closeNotify()
}
func (tw *closeHijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return tw.hijack()
}
// TODO: use restful.CrossOriginResourceSharing
// Simple CORS implementation that wraps an http Handler
// For a more detailed implementation use https://github.com/martini-contrib/cors
// or implement CORS at your proxy layer
// Pass nil for allowedMethods and allowedHeaders to use the defaults
func CORS(handler http.Handler, allowedOriginPatterns []*regexp.Regexp, allowedMethods []string, allowedHeaders []string, allowCredentials string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
origin := req.Header.Get("Origin")
if origin != "" {
allowed := false
for _, pattern := range allowedOriginPatterns {
if allowed = pattern.MatchString(origin); allowed {
break
}
}
if allowed {
w.Header().Set("Access-Control-Allow-Origin", origin)
// Set defaults for methods and headers if nothing was passed
if allowedMethods == nil {
allowedMethods = []string{"POST", "GET", "OPTIONS", "PUT", "DELETE"}
}
if allowedHeaders == nil {
allowedHeaders = []string{"Content-Type", "Content-Length", "Accept-Encoding", "X-CSRF-Token", "Authorization", "X-Requested-With", "If-Modified-Since"}
}
w.Header().Set("Access-Control-Allow-Methods", strings.Join(allowedMethods, ", "))
w.Header().Set("Access-Control-Allow-Headers", strings.Join(allowedHeaders, ", "))
w.Header().Set("Access-Control-Allow-Credentials", allowCredentials)
// Stop here if its a preflight OPTIONS request
if req.Method == "OPTIONS" {
w.WriteHeader(http.StatusNoContent)
return
}
}
}
// Dispatch to the next handler
handler.ServeHTTP(w, req)
})
}
// RequestAttributeGetter is a function that extracts authorizer.Attributes from an http.Request
type RequestAttributeGetter interface {
GetAttribs(req *http.Request) (attribs authorizer.Attributes)
}
type requestAttributeGetter struct {
requestContextMapper api.RequestContextMapper
requestInfoResolver *RequestInfoResolver
}
// NewAttributeGetter returns an object which implements the RequestAttributeGetter interface.
func NewRequestAttributeGetter(requestContextMapper api.RequestContextMapper, requestInfoResolver *RequestInfoResolver) RequestAttributeGetter {
return &requestAttributeGetter{requestContextMapper, requestInfoResolver}
}
func (r *requestAttributeGetter) | (req *http.Request) authorizer.Attributes {
attribs := authorizer.AttributesRecord{}
ctx, ok := r.requestContextMapper.Get(req)
if ok {
user, ok := api.UserFrom(ctx)
if ok {
attribs.User = user
}
}
requestInfo, _ := r.requestInfoResolver.GetRequestInfo(req)
// Start with common attributes that apply to resource and non-resource requests
attribs.ResourceRequest = requestInfo.IsResourceRequest
attribs.Path = requestInfo.Path
attribs.Verb = requestInfo.Verb
// If the request was for a resource in an API group, include that info
attribs.APIGroup = requestInfo.APIGroup
// If a path follows the conventions of the REST object store, then
// we can extract the resource. Otherwise, not.
attribs.Resource = requestInfo.Resource
// If the request specifies a namespace, then the namespace is filled in.
// Assumes there is no empty string namespace. Unspecified results
// in empty (does not understand defaulting rules.)
attribs.Namespace = requestInfo.Namespace
return &attribs
}
// WithAuthorizationCheck passes all authorized requests on to handler, and returns a forbidden error otherwise.
func WithAuthorizationCheck(handler http.Handler, getAttribs RequestAttributeGetter, a authorizer.Authorizer) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
err := a.Authorize(getAttribs.GetAttribs(req))
if err == nil {
handler.ServeHTTP(w, req)
return
}
forbidden(w, req)
})
}
// RequestInfo holds information parsed from the http.Request
type RequestInfo struct {
// IsResourceRequest indicates whether or not the request is for an API resource or subresource
IsResourceRequest bool
// Path is the URL path of the request
Path string
// Verb is the kube verb associated with the request for API requests, not the http verb. This includes things like list and watch.
// for non-resource requests, this is the lowercase http verb
Verb string
APIPrefix string
APIGroup string
APIVersion string
Namespace string
// Resource is the name of the resource being requested. This is not the kind. For example: pods
Resource string
// Subresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind.
// For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod"
// (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding".
Subresource string
// Name is empty for some verbs, but if the request directly indicates a name (not in body content) then this field is filled in.
Name string
// Parts are the path parts for the request, always starting with /{resource}/{name}
Parts []string
}
type RequestInfoResolver struct {
APIPrefixes sets.String
GrouplessAPIPrefixes sets.String
}
// TODO write an integration test against the swagger doc to test the RequestInfo and match up behavior to responses
// GetRequestInfo returns the information from the http request. If error is not nil, RequestInfo holds the information as best it is known before the failure
// It handles both resource and non-resource requests and fills in all the pertinent information for each.
// Valid Inputs:
// Resource paths
// /apis/{api-group}/{version}/namespaces
// /api/{version}/namespaces
// /api/{version}/namespaces/{namespace}
// /api/{version}/namespaces/{namespace}/{resource}
// /api/{version}/namespaces/{namespace}/{resource}/{resourceName}
// /api/{version}/{resource}
// /api/{version}/{resource}/{resourceName}
//
// Special verbs without subresources:
// /api/{version}/proxy/{resource}/{resourceName}
// /api/{version}/proxy/namespaces/{namespace}/{resource}/{resourceName}
// /api/{version}/redirect/namespaces/{namespace}/{resource}/{resourceName}
// /api/{version}/redirect/{resource}/{resourceName}
//
// Special verbs with subresources:
// /api/{version}/watch/{resource}
// /api/{version}/watch/namespaces/{namespace}/{resource}
//
// NonResource paths
// /apis/{api-group}/{version}
// /apis/{api-group}
// /apis
// /api/{version}
// /api
// /healthz
// /
func (r *RequestInfoResolver) GetRequestInfo(req *http.Request) (RequestInfo, error) {
// start with a non-resource request until proven otherwise
requestInfo := RequestInfo{
IsResourceRequest: false,
Path: req.URL.Path,
Verb: strings.ToLower(req.Method),
}
currentParts := splitPath(req.URL.Path)
if len(currentParts) < 3 {
// return a non-resource request
return requestInfo, nil
}
if !r.APIPrefixes.Has(currentParts[0]) {
// return a non-resource request
return requestInfo, nil
}
requestInfo.APIPrefix = currentParts[0]
currentParts = currentParts[1:]
if !r.GrouplessAPIPrefixes.Has(requestInfo.APIPrefix) {
// one part (APIPrefix) has already been consumed, so this is actually "do we have four parts?"
if len(currentParts) < 3 {
// return a non-resource request
return requestInfo, nil
}
requestInfo.APIGroup = currentParts[0]
currentParts = currentParts[1:]
}
requestInfo.IsResourceRequest = true
requestInfo.APIVersion = currentParts[0]
currentParts = currentParts[1:]
// handle input of form /{specialVerb}/*
if specialVerbs.Has(currentParts[0]) {
if len(currentParts) < 2 {
return requestInfo, fmt.Errorf("unable to determine kind and namespace from url, %v", req.URL)
}
requestInfo.Verb = currentParts[0]
currentParts = currentParts[1:]
} else {
switch req.Method {
case "POST":
requestInfo.Verb = "create"
case "GET", "HEAD":
requestInfo.Verb = "get"
case "PUT":
requestInfo.Verb = "update"
case "PATCH":
requestInfo.Verb = "patch"
case "DELETE":
requestInfo.Verb = "delete"
default:
requestInfo.Verb = ""
}
}
// URL forms: /namespaces/{namespace}/{kind}/*, where parts are adjusted to be relative to kind
if currentParts[0] == "namespaces" {
if len(currentParts) > 1 {
requestInfo.Namespace = currentParts[1]
// if there is another step after the namespace name and it is not a known namespace subresource
// move currentParts to include it as a resource in its own right
if len(currentParts) > 2 {
currentParts = currentParts[2:]
}
}
} else {
requestInfo.Namespace = api.NamespaceNone
}
// parsing successful, so we now know the proper value for .Parts
requestInfo.Parts = currentParts
// parts look like: resource/resourceName/subresource/other/stuff/we/don't/interpret
switch {
case len(requestInfo.Parts) >= 3 && !specialVerbsNoSubresources.Has(requestInfo.Verb):
requestInfo.Subresource = requestInfo.Parts[2]
fallthrough
case len(requestInfo.Parts) >= 2:
requestInfo.Name = requestInfo.Parts[1]
fallthrough
case len(requestInfo.Parts) >= 1:
requestInfo.Resource = requestInfo.Parts[0]
}
// if there's no name on the request and we thought it was a get before, then the actual verb is a list
if len(requestInfo.Name) == 0 && requestInfo.Verb == "get" {
requestInfo.Verb = "list"
}
// if there's no name on the request and we thought it was a delete before, then the actual verb is deletecollection
if len(requestInfo.Name) == 0 && requestInfo.Verb == "delete" {
requestInfo.Verb = "deletecollection"
}
return requestInfo, nil
}
| GetAttribs | identifier_name |
handlers.go | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"bufio"
"encoding/json"
"fmt"
"net"
"net/http"
"regexp"
"runtime/debug"
"strings"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/auth/authorizer"
"k8s.io/kubernetes/pkg/httplog"
"k8s.io/kubernetes/pkg/util/sets"
)
// specialVerbs contains just strings which are used in REST paths for special actions that don't fall under the normal
// CRUDdy GET/POST/PUT/DELETE actions on REST objects.
// TODO: find a way to keep this up to date automatically. Maybe dynamically populate list as handlers added to
// master's Mux.
var specialVerbs = sets.NewString("proxy", "redirect", "watch")
// specialVerbsNoSubresources contains root verbs which do not allow subresources
var specialVerbsNoSubresources = sets.NewString("proxy", "redirect")
// Constant for the retry-after interval on rate limiting.
// TODO: maybe make this dynamic? or user-adjustable?
const RetryAfter = "1"
// IsReadOnlyReq() is true for any (or at least many) request which has no observable
// side effects on state of apiserver (though there may be internal side effects like
// caching and logging).
func IsReadOnlyReq(req http.Request) bool {
if req.Method == "GET" {
// TODO: add OPTIONS and HEAD if we ever support those.
return true
}
return false
}
// ReadOnly passes all GET requests on to handler, and returns an error on all other requests.
func ReadOnly(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if IsReadOnlyReq(*req) {
handler.ServeHTTP(w, req)
return
}
w.WriteHeader(http.StatusForbidden)
fmt.Fprintf(w, "This is a read-only endpoint.")
})
}
type LongRunningRequestCheck func(r *http.Request) bool
// BasicLongRunningRequestCheck pathRegex operates against the url path, the queryParams match is case insensitive.
// Any one match flags the request.
// TODO tighten this check to eliminate the abuse potential by malicious clients that start setting queryParameters
// to bypass the rate limitter. This could be done using a full parse and special casing the bits we need.
func BasicLongRunningRequestCheck(pathRegex *regexp.Regexp, queryParams map[string]string) LongRunningRequestCheck {
return func(r *http.Request) bool {
if pathRegex.MatchString(r.URL.Path) {
return true
}
for key, expectedValue := range queryParams {
if strings.ToLower(expectedValue) == strings.ToLower(r.URL.Query().Get(key)) {
return true
}
}
return false
}
}
// MaxInFlight limits the number of in-flight requests to buffer size of the passed in channel.
func MaxInFlightLimit(c chan bool, longRunningRequestCheck LongRunningRequestCheck, handler http.Handler) http.Handler {
if c == nil {
return handler
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if longRunningRequestCheck(r) {
// Skip tracking long running events.
handler.ServeHTTP(w, r)
return
}
select {
case c <- true:
defer func() { <-c }()
handler.ServeHTTP(w, r)
default:
tooManyRequests(w)
}
})
}
func tooManyRequests(w http.ResponseWriter) {
// Return a 429 status indicating "Too Many Requests"
w.Header().Set("Retry-After", RetryAfter)
http.Error(w, "Too many requests, please try again later.", errors.StatusTooManyRequests)
}
// RecoverPanics wraps an http Handler to recover and log panics.
func RecoverPanics(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
defer func() {
if x := recover(); x != nil {
http.Error(w, "apis panic. Look in log for details.", http.StatusInternalServerError)
glog.Errorf("APIServer panic'd on %v %v: %v\n%s\n", req.Method, req.RequestURI, x, debug.Stack())
}
}()
defer httplog.NewLogged(req, &w).StacktraceWhen(
httplog.StatusIsNot(
http.StatusOK,
http.StatusCreated,
http.StatusAccepted,
http.StatusBadRequest,
http.StatusMovedPermanently,
http.StatusTemporaryRedirect,
http.StatusConflict,
http.StatusNotFound,
http.StatusUnauthorized,
http.StatusForbidden,
errors.StatusUnprocessableEntity,
http.StatusSwitchingProtocols,
),
).Log()
// Dispatch to the internal handler
handler.ServeHTTP(w, req)
})
}
// TimeoutHandler returns an http.Handler that runs h with a timeout
// determined by timeoutFunc. The new http.Handler calls h.ServeHTTP to handle
// each request, but if a call runs for longer than its time limit, the
// handler responds with a 503 Service Unavailable error and the message
// provided. (If msg is empty, a suitable default message with be sent.) After
// the handler times out, writes by h to its http.ResponseWriter will return
// http.ErrHandlerTimeout. If timeoutFunc returns a nil timeout channel, no
// timeout will be enforced.
func TimeoutHandler(h http.Handler, timeoutFunc func(*http.Request) (timeout <-chan time.Time, msg string)) http.Handler {
return &timeoutHandler{h, timeoutFunc}
}
type timeoutHandler struct {
handler http.Handler
timeout func(*http.Request) (<-chan time.Time, string)
}
func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
after, msg := t.timeout(r)
if after == nil {
t.handler.ServeHTTP(w, r)
return
}
done := make(chan struct{}, 1)
tw := newTimeoutWriter(w)
go func() {
t.handler.ServeHTTP(tw, r)
done <- struct{}{}
}()
select {
case <-done:
return
case <-after:
tw.timeout(msg)
}
}
type timeoutWriter interface {
http.ResponseWriter
timeout(string)
}
func newTimeoutWriter(w http.ResponseWriter) timeoutWriter {
base := &baseTimeoutWriter{w: w}
_, notifiable := w.(http.CloseNotifier)
_, hijackable := w.(http.Hijacker)
switch {
case notifiable && hijackable:
return &closeHijackTimeoutWriter{base}
case notifiable:
return &closeTimeoutWriter{base}
case hijackable:
return &hijackTimeoutWriter{base}
default:
return base
}
}
type baseTimeoutWriter struct {
w http.ResponseWriter
mu sync.Mutex
timedOut bool
wroteHeader bool
hijacked bool
}
func (tw *baseTimeoutWriter) Header() http.Header {
return tw.w.Header()
}
func (tw *baseTimeoutWriter) Write(p []byte) (int, error) {
tw.mu.Lock()
defer tw.mu.Unlock()
tw.wroteHeader = true
if tw.hijacked {
return 0, http.ErrHijacked
}
if tw.timedOut {
return 0, http.ErrHandlerTimeout
}
return tw.w.Write(p)
}
func (tw *baseTimeoutWriter) Flush() {
tw.mu.Lock()
defer tw.mu.Unlock()
if flusher, ok := tw.w.(http.Flusher); ok {
flusher.Flush()
}
}
func (tw *baseTimeoutWriter) WriteHeader(code int) {
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut || tw.wroteHeader || tw.hijacked {
return
}
tw.wroteHeader = true
tw.w.WriteHeader(code)
}
func (tw *baseTimeoutWriter) timeout(msg string) {
tw.mu.Lock()
defer tw.mu.Unlock()
if !tw.wroteHeader && !tw.hijacked |
tw.timedOut = true
}
func (tw *baseTimeoutWriter) closeNotify() <-chan bool {
return tw.w.(http.CloseNotifier).CloseNotify()
}
func (tw *baseTimeoutWriter) hijack() (net.Conn, *bufio.ReadWriter, error) {
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut {
return nil, nil, http.ErrHandlerTimeout
}
conn, rw, err := tw.w.(http.Hijacker).Hijack()
if err == nil {
tw.hijacked = true
}
return conn, rw, err
}
type closeTimeoutWriter struct {
*baseTimeoutWriter
}
func (tw *closeTimeoutWriter) CloseNotify() <-chan bool {
return tw.closeNotify()
}
type hijackTimeoutWriter struct {
*baseTimeoutWriter
}
func (tw *hijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return tw.hijack()
}
type closeHijackTimeoutWriter struct {
*baseTimeoutWriter
}
func (tw *closeHijackTimeoutWriter) CloseNotify() <-chan bool {
return tw.closeNotify()
}
func (tw *closeHijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return tw.hijack()
}
// TODO: use restful.CrossOriginResourceSharing
// Simple CORS implementation that wraps an http Handler
// For a more detailed implementation use https://github.com/martini-contrib/cors
// or implement CORS at your proxy layer
// Pass nil for allowedMethods and allowedHeaders to use the defaults
func CORS(handler http.Handler, allowedOriginPatterns []*regexp.Regexp, allowedMethods []string, allowedHeaders []string, allowCredentials string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
origin := req.Header.Get("Origin")
if origin != "" {
allowed := false
for _, pattern := range allowedOriginPatterns {
if allowed = pattern.MatchString(origin); allowed {
break
}
}
if allowed {
w.Header().Set("Access-Control-Allow-Origin", origin)
// Set defaults for methods and headers if nothing was passed
if allowedMethods == nil {
allowedMethods = []string{"POST", "GET", "OPTIONS", "PUT", "DELETE"}
}
if allowedHeaders == nil {
allowedHeaders = []string{"Content-Type", "Content-Length", "Accept-Encoding", "X-CSRF-Token", "Authorization", "X-Requested-With", "If-Modified-Since"}
}
w.Header().Set("Access-Control-Allow-Methods", strings.Join(allowedMethods, ", "))
w.Header().Set("Access-Control-Allow-Headers", strings.Join(allowedHeaders, ", "))
w.Header().Set("Access-Control-Allow-Credentials", allowCredentials)
// Stop here if its a preflight OPTIONS request
if req.Method == "OPTIONS" {
w.WriteHeader(http.StatusNoContent)
return
}
}
}
// Dispatch to the next handler
handler.ServeHTTP(w, req)
})
}
// RequestAttributeGetter is a function that extracts authorizer.Attributes from an http.Request
type RequestAttributeGetter interface {
GetAttribs(req *http.Request) (attribs authorizer.Attributes)
}
type requestAttributeGetter struct {
requestContextMapper api.RequestContextMapper
requestInfoResolver *RequestInfoResolver
}
// NewAttributeGetter returns an object which implements the RequestAttributeGetter interface.
func NewRequestAttributeGetter(requestContextMapper api.RequestContextMapper, requestInfoResolver *RequestInfoResolver) RequestAttributeGetter {
return &requestAttributeGetter{requestContextMapper, requestInfoResolver}
}
func (r *requestAttributeGetter) GetAttribs(req *http.Request) authorizer.Attributes {
attribs := authorizer.AttributesRecord{}
ctx, ok := r.requestContextMapper.Get(req)
if ok {
user, ok := api.UserFrom(ctx)
if ok {
attribs.User = user
}
}
requestInfo, _ := r.requestInfoResolver.GetRequestInfo(req)
// Start with common attributes that apply to resource and non-resource requests
attribs.ResourceRequest = requestInfo.IsResourceRequest
attribs.Path = requestInfo.Path
attribs.Verb = requestInfo.Verb
// If the request was for a resource in an API group, include that info
attribs.APIGroup = requestInfo.APIGroup
// If a path follows the conventions of the REST object store, then
// we can extract the resource. Otherwise, not.
attribs.Resource = requestInfo.Resource
// If the request specifies a namespace, then the namespace is filled in.
// Assumes there is no empty string namespace. Unspecified results
// in empty (does not understand defaulting rules.)
attribs.Namespace = requestInfo.Namespace
return &attribs
}
// WithAuthorizationCheck passes all authorized requests on to handler, and returns a forbidden error otherwise.
func WithAuthorizationCheck(handler http.Handler, getAttribs RequestAttributeGetter, a authorizer.Authorizer) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
err := a.Authorize(getAttribs.GetAttribs(req))
if err == nil {
handler.ServeHTTP(w, req)
return
}
forbidden(w, req)
})
}
// RequestInfo holds information parsed from the http.Request
type RequestInfo struct {
// IsResourceRequest indicates whether or not the request is for an API resource or subresource
IsResourceRequest bool
// Path is the URL path of the request
Path string
// Verb is the kube verb associated with the request for API requests, not the http verb. This includes things like list and watch.
// for non-resource requests, this is the lowercase http verb
Verb string
APIPrefix string
APIGroup string
APIVersion string
Namespace string
// Resource is the name of the resource being requested. This is not the kind. For example: pods
Resource string
// Subresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind.
// For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod"
// (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding".
Subresource string
// Name is empty for some verbs, but if the request directly indicates a name (not in body content) then this field is filled in.
Name string
// Parts are the path parts for the request, always starting with /{resource}/{name}
Parts []string
}
type RequestInfoResolver struct {
APIPrefixes sets.String
GrouplessAPIPrefixes sets.String
}
// TODO write an integration test against the swagger doc to test the RequestInfo and match up behavior to responses
// GetRequestInfo returns the information from the http request. If error is not nil, RequestInfo holds the information as best it is known before the failure
// It handles both resource and non-resource requests and fills in all the pertinent information for each.
// Valid Inputs:
// Resource paths
// /apis/{api-group}/{version}/namespaces
// /api/{version}/namespaces
// /api/{version}/namespaces/{namespace}
// /api/{version}/namespaces/{namespace}/{resource}
// /api/{version}/namespaces/{namespace}/{resource}/{resourceName}
// /api/{version}/{resource}
// /api/{version}/{resource}/{resourceName}
//
// Special verbs without subresources:
// /api/{version}/proxy/{resource}/{resourceName}
// /api/{version}/proxy/namespaces/{namespace}/{resource}/{resourceName}
// /api/{version}/redirect/namespaces/{namespace}/{resource}/{resourceName}
// /api/{version}/redirect/{resource}/{resourceName}
//
// Special verbs with subresources:
// /api/{version}/watch/{resource}
// /api/{version}/watch/namespaces/{namespace}/{resource}
//
// NonResource paths
// /apis/{api-group}/{version}
// /apis/{api-group}
// /apis
// /api/{version}
// /api
// /healthz
// /
func (r *RequestInfoResolver) GetRequestInfo(req *http.Request) (RequestInfo, error) {
// start with a non-resource request until proven otherwise
requestInfo := RequestInfo{
IsResourceRequest: false,
Path: req.URL.Path,
Verb: strings.ToLower(req.Method),
}
currentParts := splitPath(req.URL.Path)
if len(currentParts) < 3 {
// return a non-resource request
return requestInfo, nil
}
if !r.APIPrefixes.Has(currentParts[0]) {
// return a non-resource request
return requestInfo, nil
}
requestInfo.APIPrefix = currentParts[0]
currentParts = currentParts[1:]
if !r.GrouplessAPIPrefixes.Has(requestInfo.APIPrefix) {
// one part (APIPrefix) has already been consumed, so this is actually "do we have four parts?"
if len(currentParts) < 3 {
// return a non-resource request
return requestInfo, nil
}
requestInfo.APIGroup = currentParts[0]
currentParts = currentParts[1:]
}
requestInfo.IsResourceRequest = true
requestInfo.APIVersion = currentParts[0]
currentParts = currentParts[1:]
// handle input of form /{specialVerb}/*
if specialVerbs.Has(currentParts[0]) {
if len(currentParts) < 2 {
return requestInfo, fmt.Errorf("unable to determine kind and namespace from url, %v", req.URL)
}
requestInfo.Verb = currentParts[0]
currentParts = currentParts[1:]
} else {
switch req.Method {
case "POST":
requestInfo.Verb = "create"
case "GET", "HEAD":
requestInfo.Verb = "get"
case "PUT":
requestInfo.Verb = "update"
case "PATCH":
requestInfo.Verb = "patch"
case "DELETE":
requestInfo.Verb = "delete"
default:
requestInfo.Verb = ""
}
}
// URL forms: /namespaces/{namespace}/{kind}/*, where parts are adjusted to be relative to kind
if currentParts[0] == "namespaces" {
if len(currentParts) > 1 {
requestInfo.Namespace = currentParts[1]
// if there is another step after the namespace name and it is not a known namespace subresource
// move currentParts to include it as a resource in its own right
if len(currentParts) > 2 {
currentParts = currentParts[2:]
}
}
} else {
requestInfo.Namespace = api.NamespaceNone
}
// parsing successful, so we now know the proper value for .Parts
requestInfo.Parts = currentParts
// parts look like: resource/resourceName/subresource/other/stuff/we/don't/interpret
switch {
case len(requestInfo.Parts) >= 3 && !specialVerbsNoSubresources.Has(requestInfo.Verb):
requestInfo.Subresource = requestInfo.Parts[2]
fallthrough
case len(requestInfo.Parts) >= 2:
requestInfo.Name = requestInfo.Parts[1]
fallthrough
case len(requestInfo.Parts) >= 1:
requestInfo.Resource = requestInfo.Parts[0]
}
// if there's no name on the request and we thought it was a get before, then the actual verb is a list
if len(requestInfo.Name) == 0 && requestInfo.Verb == "get" {
requestInfo.Verb = "list"
}
// if there's no name on the request and we thought it was a delete before, then the actual verb is deletecollection
if len(requestInfo.Name) == 0 && requestInfo.Verb == "delete" {
requestInfo.Verb = "deletecollection"
}
return requestInfo, nil
}
| {
tw.w.WriteHeader(http.StatusGatewayTimeout)
if msg != "" {
tw.w.Write([]byte(msg))
} else {
enc := json.NewEncoder(tw.w)
enc.Encode(errors.NewServerTimeout(api.Resource(""), "", 0))
}
} | conditional_block |
handlers.go | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"bufio"
"encoding/json"
"fmt"
"net"
"net/http"
"regexp"
"runtime/debug"
"strings"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/auth/authorizer"
"k8s.io/kubernetes/pkg/httplog"
"k8s.io/kubernetes/pkg/util/sets"
)
// specialVerbs contains just strings which are used in REST paths for special actions that don't fall under the normal
// CRUDdy GET/POST/PUT/DELETE actions on REST objects.
// TODO: find a way to keep this up to date automatically. Maybe dynamically populate list as handlers added to
// master's Mux.
var specialVerbs = sets.NewString("proxy", "redirect", "watch")
// specialVerbsNoSubresources contains root verbs which do not allow subresources
var specialVerbsNoSubresources = sets.NewString("proxy", "redirect")
// Constant for the retry-after interval on rate limiting.
// TODO: maybe make this dynamic? or user-adjustable?
const RetryAfter = "1"
// IsReadOnlyReq() is true for any (or at least many) request which has no observable
// side effects on state of apiserver (though there may be internal side effects like
// caching and logging).
func IsReadOnlyReq(req http.Request) bool {
if req.Method == "GET" {
// TODO: add OPTIONS and HEAD if we ever support those.
return true
}
return false
}
// ReadOnly passes all GET requests on to handler, and returns an error on all other requests.
func ReadOnly(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if IsReadOnlyReq(*req) {
handler.ServeHTTP(w, req)
return
}
w.WriteHeader(http.StatusForbidden)
fmt.Fprintf(w, "This is a read-only endpoint.")
})
}
type LongRunningRequestCheck func(r *http.Request) bool
// BasicLongRunningRequestCheck pathRegex operates against the url path, the queryParams match is case insensitive.
// Any one match flags the request.
// TODO tighten this check to eliminate the abuse potential by malicious clients that start setting queryParameters
// to bypass the rate limitter. This could be done using a full parse and special casing the bits we need.
func BasicLongRunningRequestCheck(pathRegex *regexp.Regexp, queryParams map[string]string) LongRunningRequestCheck {
return func(r *http.Request) bool {
if pathRegex.MatchString(r.URL.Path) {
return true
}
for key, expectedValue := range queryParams {
if strings.ToLower(expectedValue) == strings.ToLower(r.URL.Query().Get(key)) {
return true
}
}
return false
}
}
// MaxInFlight limits the number of in-flight requests to buffer size of the passed in channel.
func MaxInFlightLimit(c chan bool, longRunningRequestCheck LongRunningRequestCheck, handler http.Handler) http.Handler {
if c == nil {
return handler
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if longRunningRequestCheck(r) {
// Skip tracking long running events.
handler.ServeHTTP(w, r)
return
}
select {
case c <- true:
defer func() { <-c }()
handler.ServeHTTP(w, r)
default:
tooManyRequests(w)
}
})
}
func tooManyRequests(w http.ResponseWriter) {
// Return a 429 status indicating "Too Many Requests"
w.Header().Set("Retry-After", RetryAfter)
http.Error(w, "Too many requests, please try again later.", errors.StatusTooManyRequests)
}
// RecoverPanics wraps an http Handler to recover and log panics.
func RecoverPanics(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
defer func() {
if x := recover(); x != nil {
http.Error(w, "apis panic. Look in log for details.", http.StatusInternalServerError)
glog.Errorf("APIServer panic'd on %v %v: %v\n%s\n", req.Method, req.RequestURI, x, debug.Stack())
}
}()
defer httplog.NewLogged(req, &w).StacktraceWhen(
httplog.StatusIsNot(
http.StatusOK,
http.StatusCreated,
http.StatusAccepted,
http.StatusBadRequest,
http.StatusMovedPermanently,
http.StatusTemporaryRedirect,
http.StatusConflict,
http.StatusNotFound,
http.StatusUnauthorized,
http.StatusForbidden,
errors.StatusUnprocessableEntity,
http.StatusSwitchingProtocols,
),
).Log()
// Dispatch to the internal handler
handler.ServeHTTP(w, req)
})
}
// TimeoutHandler returns an http.Handler that runs h with a timeout
// determined by timeoutFunc. The new http.Handler calls h.ServeHTTP to handle
// each request, but if a call runs for longer than its time limit, the
// handler responds with a 503 Service Unavailable error and the message
// provided. (If msg is empty, a suitable default message with be sent.) After
// the handler times out, writes by h to its http.ResponseWriter will return
// http.ErrHandlerTimeout. If timeoutFunc returns a nil timeout channel, no
// timeout will be enforced.
func TimeoutHandler(h http.Handler, timeoutFunc func(*http.Request) (timeout <-chan time.Time, msg string)) http.Handler |
type timeoutHandler struct {
handler http.Handler
timeout func(*http.Request) (<-chan time.Time, string)
}
func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
after, msg := t.timeout(r)
if after == nil {
t.handler.ServeHTTP(w, r)
return
}
done := make(chan struct{}, 1)
tw := newTimeoutWriter(w)
go func() {
t.handler.ServeHTTP(tw, r)
done <- struct{}{}
}()
select {
case <-done:
return
case <-after:
tw.timeout(msg)
}
}
type timeoutWriter interface {
http.ResponseWriter
timeout(string)
}
func newTimeoutWriter(w http.ResponseWriter) timeoutWriter {
base := &baseTimeoutWriter{w: w}
_, notifiable := w.(http.CloseNotifier)
_, hijackable := w.(http.Hijacker)
switch {
case notifiable && hijackable:
return &closeHijackTimeoutWriter{base}
case notifiable:
return &closeTimeoutWriter{base}
case hijackable:
return &hijackTimeoutWriter{base}
default:
return base
}
}
type baseTimeoutWriter struct {
w http.ResponseWriter
mu sync.Mutex
timedOut bool
wroteHeader bool
hijacked bool
}
func (tw *baseTimeoutWriter) Header() http.Header {
return tw.w.Header()
}
func (tw *baseTimeoutWriter) Write(p []byte) (int, error) {
tw.mu.Lock()
defer tw.mu.Unlock()
tw.wroteHeader = true
if tw.hijacked {
return 0, http.ErrHijacked
}
if tw.timedOut {
return 0, http.ErrHandlerTimeout
}
return tw.w.Write(p)
}
func (tw *baseTimeoutWriter) Flush() {
tw.mu.Lock()
defer tw.mu.Unlock()
if flusher, ok := tw.w.(http.Flusher); ok {
flusher.Flush()
}
}
func (tw *baseTimeoutWriter) WriteHeader(code int) {
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut || tw.wroteHeader || tw.hijacked {
return
}
tw.wroteHeader = true
tw.w.WriteHeader(code)
}
func (tw *baseTimeoutWriter) timeout(msg string) {
tw.mu.Lock()
defer tw.mu.Unlock()
if !tw.wroteHeader && !tw.hijacked {
tw.w.WriteHeader(http.StatusGatewayTimeout)
if msg != "" {
tw.w.Write([]byte(msg))
} else {
enc := json.NewEncoder(tw.w)
enc.Encode(errors.NewServerTimeout(api.Resource(""), "", 0))
}
}
tw.timedOut = true
}
func (tw *baseTimeoutWriter) closeNotify() <-chan bool {
return tw.w.(http.CloseNotifier).CloseNotify()
}
func (tw *baseTimeoutWriter) hijack() (net.Conn, *bufio.ReadWriter, error) {
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut {
return nil, nil, http.ErrHandlerTimeout
}
conn, rw, err := tw.w.(http.Hijacker).Hijack()
if err == nil {
tw.hijacked = true
}
return conn, rw, err
}
type closeTimeoutWriter struct {
*baseTimeoutWriter
}
func (tw *closeTimeoutWriter) CloseNotify() <-chan bool {
return tw.closeNotify()
}
type hijackTimeoutWriter struct {
*baseTimeoutWriter
}
func (tw *hijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return tw.hijack()
}
type closeHijackTimeoutWriter struct {
*baseTimeoutWriter
}
func (tw *closeHijackTimeoutWriter) CloseNotify() <-chan bool {
return tw.closeNotify()
}
func (tw *closeHijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return tw.hijack()
}
// TODO: use restful.CrossOriginResourceSharing
// Simple CORS implementation that wraps an http Handler
// For a more detailed implementation use https://github.com/martini-contrib/cors
// or implement CORS at your proxy layer
// Pass nil for allowedMethods and allowedHeaders to use the defaults
func CORS(handler http.Handler, allowedOriginPatterns []*regexp.Regexp, allowedMethods []string, allowedHeaders []string, allowCredentials string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
origin := req.Header.Get("Origin")
if origin != "" {
allowed := false
for _, pattern := range allowedOriginPatterns {
if allowed = pattern.MatchString(origin); allowed {
break
}
}
if allowed {
w.Header().Set("Access-Control-Allow-Origin", origin)
// Set defaults for methods and headers if nothing was passed
if allowedMethods == nil {
allowedMethods = []string{"POST", "GET", "OPTIONS", "PUT", "DELETE"}
}
if allowedHeaders == nil {
allowedHeaders = []string{"Content-Type", "Content-Length", "Accept-Encoding", "X-CSRF-Token", "Authorization", "X-Requested-With", "If-Modified-Since"}
}
w.Header().Set("Access-Control-Allow-Methods", strings.Join(allowedMethods, ", "))
w.Header().Set("Access-Control-Allow-Headers", strings.Join(allowedHeaders, ", "))
w.Header().Set("Access-Control-Allow-Credentials", allowCredentials)
// Stop here if its a preflight OPTIONS request
if req.Method == "OPTIONS" {
w.WriteHeader(http.StatusNoContent)
return
}
}
}
// Dispatch to the next handler
handler.ServeHTTP(w, req)
})
}
// RequestAttributeGetter is a function that extracts authorizer.Attributes from an http.Request
type RequestAttributeGetter interface {
GetAttribs(req *http.Request) (attribs authorizer.Attributes)
}
type requestAttributeGetter struct {
requestContextMapper api.RequestContextMapper
requestInfoResolver *RequestInfoResolver
}
// NewAttributeGetter returns an object which implements the RequestAttributeGetter interface.
func NewRequestAttributeGetter(requestContextMapper api.RequestContextMapper, requestInfoResolver *RequestInfoResolver) RequestAttributeGetter {
return &requestAttributeGetter{requestContextMapper, requestInfoResolver}
}
func (r *requestAttributeGetter) GetAttribs(req *http.Request) authorizer.Attributes {
attribs := authorizer.AttributesRecord{}
ctx, ok := r.requestContextMapper.Get(req)
if ok {
user, ok := api.UserFrom(ctx)
if ok {
attribs.User = user
}
}
requestInfo, _ := r.requestInfoResolver.GetRequestInfo(req)
// Start with common attributes that apply to resource and non-resource requests
attribs.ResourceRequest = requestInfo.IsResourceRequest
attribs.Path = requestInfo.Path
attribs.Verb = requestInfo.Verb
// If the request was for a resource in an API group, include that info
attribs.APIGroup = requestInfo.APIGroup
// If a path follows the conventions of the REST object store, then
// we can extract the resource. Otherwise, not.
attribs.Resource = requestInfo.Resource
// If the request specifies a namespace, then the namespace is filled in.
// Assumes there is no empty string namespace. Unspecified results
// in empty (does not understand defaulting rules.)
attribs.Namespace = requestInfo.Namespace
return &attribs
}
// WithAuthorizationCheck passes all authorized requests on to handler, and returns a forbidden error otherwise.
func WithAuthorizationCheck(handler http.Handler, getAttribs RequestAttributeGetter, a authorizer.Authorizer) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
err := a.Authorize(getAttribs.GetAttribs(req))
if err == nil {
handler.ServeHTTP(w, req)
return
}
forbidden(w, req)
})
}
// RequestInfo holds information parsed from the http.Request
type RequestInfo struct {
// IsResourceRequest indicates whether or not the request is for an API resource or subresource
IsResourceRequest bool
// Path is the URL path of the request
Path string
// Verb is the kube verb associated with the request for API requests, not the http verb. This includes things like list and watch.
// for non-resource requests, this is the lowercase http verb
Verb string
APIPrefix string
APIGroup string
APIVersion string
Namespace string
// Resource is the name of the resource being requested. This is not the kind. For example: pods
Resource string
// Subresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind.
// For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod"
// (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding".
Subresource string
// Name is empty for some verbs, but if the request directly indicates a name (not in body content) then this field is filled in.
Name string
// Parts are the path parts for the request, always starting with /{resource}/{name}
Parts []string
}
type RequestInfoResolver struct {
APIPrefixes sets.String
GrouplessAPIPrefixes sets.String
}
// TODO write an integration test against the swagger doc to test the RequestInfo and match up behavior to responses
// GetRequestInfo returns the information from the http request. If error is not nil, RequestInfo holds the information as best it is known before the failure
// It handles both resource and non-resource requests and fills in all the pertinent information for each.
// Valid Inputs:
// Resource paths
// /apis/{api-group}/{version}/namespaces
// /api/{version}/namespaces
// /api/{version}/namespaces/{namespace}
// /api/{version}/namespaces/{namespace}/{resource}
// /api/{version}/namespaces/{namespace}/{resource}/{resourceName}
// /api/{version}/{resource}
// /api/{version}/{resource}/{resourceName}
//
// Special verbs without subresources:
// /api/{version}/proxy/{resource}/{resourceName}
// /api/{version}/proxy/namespaces/{namespace}/{resource}/{resourceName}
// /api/{version}/redirect/namespaces/{namespace}/{resource}/{resourceName}
// /api/{version}/redirect/{resource}/{resourceName}
//
// Special verbs with subresources:
// /api/{version}/watch/{resource}
// /api/{version}/watch/namespaces/{namespace}/{resource}
//
// NonResource paths
// /apis/{api-group}/{version}
// /apis/{api-group}
// /apis
// /api/{version}
// /api
// /healthz
// /
func (r *RequestInfoResolver) GetRequestInfo(req *http.Request) (RequestInfo, error) {
// start with a non-resource request until proven otherwise
requestInfo := RequestInfo{
IsResourceRequest: false,
Path: req.URL.Path,
Verb: strings.ToLower(req.Method),
}
currentParts := splitPath(req.URL.Path)
if len(currentParts) < 3 {
// return a non-resource request
return requestInfo, nil
}
if !r.APIPrefixes.Has(currentParts[0]) {
// return a non-resource request
return requestInfo, nil
}
requestInfo.APIPrefix = currentParts[0]
currentParts = currentParts[1:]
if !r.GrouplessAPIPrefixes.Has(requestInfo.APIPrefix) {
// one part (APIPrefix) has already been consumed, so this is actually "do we have four parts?"
if len(currentParts) < 3 {
// return a non-resource request
return requestInfo, nil
}
requestInfo.APIGroup = currentParts[0]
currentParts = currentParts[1:]
}
requestInfo.IsResourceRequest = true
requestInfo.APIVersion = currentParts[0]
currentParts = currentParts[1:]
// handle input of form /{specialVerb}/*
if specialVerbs.Has(currentParts[0]) {
if len(currentParts) < 2 {
return requestInfo, fmt.Errorf("unable to determine kind and namespace from url, %v", req.URL)
}
requestInfo.Verb = currentParts[0]
currentParts = currentParts[1:]
} else {
switch req.Method {
case "POST":
requestInfo.Verb = "create"
case "GET", "HEAD":
requestInfo.Verb = "get"
case "PUT":
requestInfo.Verb = "update"
case "PATCH":
requestInfo.Verb = "patch"
case "DELETE":
requestInfo.Verb = "delete"
default:
requestInfo.Verb = ""
}
}
// URL forms: /namespaces/{namespace}/{kind}/*, where parts are adjusted to be relative to kind
if currentParts[0] == "namespaces" {
if len(currentParts) > 1 {
requestInfo.Namespace = currentParts[1]
// if there is another step after the namespace name and it is not a known namespace subresource
// move currentParts to include it as a resource in its own right
if len(currentParts) > 2 {
currentParts = currentParts[2:]
}
}
} else {
requestInfo.Namespace = api.NamespaceNone
}
// parsing successful, so we now know the proper value for .Parts
requestInfo.Parts = currentParts
// parts look like: resource/resourceName/subresource/other/stuff/we/don't/interpret
switch {
case len(requestInfo.Parts) >= 3 && !specialVerbsNoSubresources.Has(requestInfo.Verb):
requestInfo.Subresource = requestInfo.Parts[2]
fallthrough
case len(requestInfo.Parts) >= 2:
requestInfo.Name = requestInfo.Parts[1]
fallthrough
case len(requestInfo.Parts) >= 1:
requestInfo.Resource = requestInfo.Parts[0]
}
// if there's no name on the request and we thought it was a get before, then the actual verb is a list
if len(requestInfo.Name) == 0 && requestInfo.Verb == "get" {
requestInfo.Verb = "list"
}
// if there's no name on the request and we thought it was a delete before, then the actual verb is deletecollection
if len(requestInfo.Name) == 0 && requestInfo.Verb == "delete" {
requestInfo.Verb = "deletecollection"
}
return requestInfo, nil
}
| {
return &timeoutHandler{h, timeoutFunc}
} | identifier_body |
handlers.go | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"bufio"
"encoding/json"
"fmt"
"net"
"net/http"
"regexp"
"runtime/debug"
"strings"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/auth/authorizer"
"k8s.io/kubernetes/pkg/httplog"
"k8s.io/kubernetes/pkg/util/sets"
)
// specialVerbs contains just strings which are used in REST paths for special actions that don't fall under the normal
// CRUDdy GET/POST/PUT/DELETE actions on REST objects.
// TODO: find a way to keep this up to date automatically. Maybe dynamically populate list as handlers added to
// master's Mux.
var specialVerbs = sets.NewString("proxy", "redirect", "watch")
// specialVerbsNoSubresources contains root verbs which do not allow subresources
var specialVerbsNoSubresources = sets.NewString("proxy", "redirect")
// Constant for the retry-after interval on rate limiting.
// TODO: maybe make this dynamic? or user-adjustable?
const RetryAfter = "1"
// IsReadOnlyReq() is true for any (or at least many) request which has no observable
// side effects on state of apiserver (though there may be internal side effects like
// caching and logging).
func IsReadOnlyReq(req http.Request) bool {
if req.Method == "GET" {
// TODO: add OPTIONS and HEAD if we ever support those.
return true
}
return false
}
// ReadOnly passes all GET requests on to handler, and returns an error on all other requests.
func ReadOnly(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if IsReadOnlyReq(*req) {
handler.ServeHTTP(w, req)
return
}
w.WriteHeader(http.StatusForbidden)
fmt.Fprintf(w, "This is a read-only endpoint.")
})
}
type LongRunningRequestCheck func(r *http.Request) bool
// BasicLongRunningRequestCheck pathRegex operates against the url path, the queryParams match is case insensitive.
// Any one match flags the request.
// TODO tighten this check to eliminate the abuse potential by malicious clients that start setting queryParameters
// to bypass the rate limitter. This could be done using a full parse and special casing the bits we need.
func BasicLongRunningRequestCheck(pathRegex *regexp.Regexp, queryParams map[string]string) LongRunningRequestCheck {
return func(r *http.Request) bool {
if pathRegex.MatchString(r.URL.Path) {
return true
}
for key, expectedValue := range queryParams {
if strings.ToLower(expectedValue) == strings.ToLower(r.URL.Query().Get(key)) {
return true
}
}
return false
}
}
// MaxInFlight limits the number of in-flight requests to buffer size of the passed in channel.
func MaxInFlightLimit(c chan bool, longRunningRequestCheck LongRunningRequestCheck, handler http.Handler) http.Handler {
if c == nil {
return handler
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if longRunningRequestCheck(r) {
// Skip tracking long running events.
handler.ServeHTTP(w, r)
return
}
select {
case c <- true:
defer func() { <-c }()
handler.ServeHTTP(w, r)
default:
tooManyRequests(w)
}
})
}
func tooManyRequests(w http.ResponseWriter) {
// Return a 429 status indicating "Too Many Requests"
w.Header().Set("Retry-After", RetryAfter)
http.Error(w, "Too many requests, please try again later.", errors.StatusTooManyRequests)
}
// RecoverPanics wraps an http Handler to recover and log panics.
func RecoverPanics(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
defer func() {
if x := recover(); x != nil {
http.Error(w, "apis panic. Look in log for details.", http.StatusInternalServerError)
glog.Errorf("APIServer panic'd on %v %v: %v\n%s\n", req.Method, req.RequestURI, x, debug.Stack())
}
}()
defer httplog.NewLogged(req, &w).StacktraceWhen(
httplog.StatusIsNot(
http.StatusOK,
http.StatusCreated,
http.StatusAccepted,
http.StatusBadRequest,
http.StatusMovedPermanently,
http.StatusTemporaryRedirect,
http.StatusConflict,
http.StatusNotFound,
http.StatusUnauthorized,
http.StatusForbidden,
errors.StatusUnprocessableEntity,
http.StatusSwitchingProtocols,
),
).Log()
// Dispatch to the internal handler
handler.ServeHTTP(w, req)
})
}
// TimeoutHandler returns an http.Handler that runs h with a timeout
// determined by timeoutFunc. The new http.Handler calls h.ServeHTTP to handle
// each request, but if a call runs for longer than its time limit, the
// handler responds with a 503 Service Unavailable error and the message
// provided. (If msg is empty, a suitable default message with be sent.) After
// the handler times out, writes by h to its http.ResponseWriter will return
// http.ErrHandlerTimeout. If timeoutFunc returns a nil timeout channel, no
// timeout will be enforced.
func TimeoutHandler(h http.Handler, timeoutFunc func(*http.Request) (timeout <-chan time.Time, msg string)) http.Handler {
return &timeoutHandler{h, timeoutFunc}
}
type timeoutHandler struct {
handler http.Handler
timeout func(*http.Request) (<-chan time.Time, string)
}
func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
after, msg := t.timeout(r)
if after == nil {
t.handler.ServeHTTP(w, r)
return
}
done := make(chan struct{}, 1)
tw := newTimeoutWriter(w)
go func() {
t.handler.ServeHTTP(tw, r)
done <- struct{}{}
}()
select {
case <-done:
return
case <-after:
tw.timeout(msg)
}
}
type timeoutWriter interface {
http.ResponseWriter
timeout(string)
}
func newTimeoutWriter(w http.ResponseWriter) timeoutWriter {
base := &baseTimeoutWriter{w: w}
_, notifiable := w.(http.CloseNotifier)
_, hijackable := w.(http.Hijacker)
switch {
case notifiable && hijackable:
return &closeHijackTimeoutWriter{base}
case notifiable:
return &closeTimeoutWriter{base}
case hijackable:
return &hijackTimeoutWriter{base}
default:
return base
}
}
type baseTimeoutWriter struct {
w http.ResponseWriter
mu sync.Mutex
timedOut bool
wroteHeader bool
hijacked bool
}
func (tw *baseTimeoutWriter) Header() http.Header {
return tw.w.Header()
}
func (tw *baseTimeoutWriter) Write(p []byte) (int, error) {
tw.mu.Lock()
defer tw.mu.Unlock()
tw.wroteHeader = true
if tw.hijacked {
return 0, http.ErrHijacked
}
if tw.timedOut {
return 0, http.ErrHandlerTimeout
}
return tw.w.Write(p)
}
func (tw *baseTimeoutWriter) Flush() {
tw.mu.Lock()
defer tw.mu.Unlock()
if flusher, ok := tw.w.(http.Flusher); ok {
flusher.Flush()
}
}
func (tw *baseTimeoutWriter) WriteHeader(code int) {
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut || tw.wroteHeader || tw.hijacked {
return
}
tw.wroteHeader = true
tw.w.WriteHeader(code)
}
func (tw *baseTimeoutWriter) timeout(msg string) {
tw.mu.Lock()
defer tw.mu.Unlock()
if !tw.wroteHeader && !tw.hijacked {
tw.w.WriteHeader(http.StatusGatewayTimeout)
if msg != "" {
tw.w.Write([]byte(msg))
} else {
enc := json.NewEncoder(tw.w)
enc.Encode(errors.NewServerTimeout(api.Resource(""), "", 0))
}
}
tw.timedOut = true
}
func (tw *baseTimeoutWriter) closeNotify() <-chan bool {
return tw.w.(http.CloseNotifier).CloseNotify()
}
func (tw *baseTimeoutWriter) hijack() (net.Conn, *bufio.ReadWriter, error) {
tw.mu.Lock()
defer tw.mu.Unlock()
if tw.timedOut {
return nil, nil, http.ErrHandlerTimeout
}
conn, rw, err := tw.w.(http.Hijacker).Hijack()
if err == nil {
tw.hijacked = true
}
return conn, rw, err
}
type closeTimeoutWriter struct {
*baseTimeoutWriter
} |
type hijackTimeoutWriter struct {
*baseTimeoutWriter
}
func (tw *hijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return tw.hijack()
}
type closeHijackTimeoutWriter struct {
*baseTimeoutWriter
}
func (tw *closeHijackTimeoutWriter) CloseNotify() <-chan bool {
return tw.closeNotify()
}
func (tw *closeHijackTimeoutWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return tw.hijack()
}
// TODO: use restful.CrossOriginResourceSharing
// Simple CORS implementation that wraps an http Handler
// For a more detailed implementation use https://github.com/martini-contrib/cors
// or implement CORS at your proxy layer
// Pass nil for allowedMethods and allowedHeaders to use the defaults
func CORS(handler http.Handler, allowedOriginPatterns []*regexp.Regexp, allowedMethods []string, allowedHeaders []string, allowCredentials string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
origin := req.Header.Get("Origin")
if origin != "" {
allowed := false
for _, pattern := range allowedOriginPatterns {
if allowed = pattern.MatchString(origin); allowed {
break
}
}
if allowed {
w.Header().Set("Access-Control-Allow-Origin", origin)
// Set defaults for methods and headers if nothing was passed
if allowedMethods == nil {
allowedMethods = []string{"POST", "GET", "OPTIONS", "PUT", "DELETE"}
}
if allowedHeaders == nil {
allowedHeaders = []string{"Content-Type", "Content-Length", "Accept-Encoding", "X-CSRF-Token", "Authorization", "X-Requested-With", "If-Modified-Since"}
}
w.Header().Set("Access-Control-Allow-Methods", strings.Join(allowedMethods, ", "))
w.Header().Set("Access-Control-Allow-Headers", strings.Join(allowedHeaders, ", "))
w.Header().Set("Access-Control-Allow-Credentials", allowCredentials)
// Stop here if its a preflight OPTIONS request
if req.Method == "OPTIONS" {
w.WriteHeader(http.StatusNoContent)
return
}
}
}
// Dispatch to the next handler
handler.ServeHTTP(w, req)
})
}
// RequestAttributeGetter is a function that extracts authorizer.Attributes from an http.Request
type RequestAttributeGetter interface {
GetAttribs(req *http.Request) (attribs authorizer.Attributes)
}
type requestAttributeGetter struct {
requestContextMapper api.RequestContextMapper
requestInfoResolver *RequestInfoResolver
}
// NewAttributeGetter returns an object which implements the RequestAttributeGetter interface.
func NewRequestAttributeGetter(requestContextMapper api.RequestContextMapper, requestInfoResolver *RequestInfoResolver) RequestAttributeGetter {
return &requestAttributeGetter{requestContextMapper, requestInfoResolver}
}
func (r *requestAttributeGetter) GetAttribs(req *http.Request) authorizer.Attributes {
attribs := authorizer.AttributesRecord{}
ctx, ok := r.requestContextMapper.Get(req)
if ok {
user, ok := api.UserFrom(ctx)
if ok {
attribs.User = user
}
}
requestInfo, _ := r.requestInfoResolver.GetRequestInfo(req)
// Start with common attributes that apply to resource and non-resource requests
attribs.ResourceRequest = requestInfo.IsResourceRequest
attribs.Path = requestInfo.Path
attribs.Verb = requestInfo.Verb
// If the request was for a resource in an API group, include that info
attribs.APIGroup = requestInfo.APIGroup
// If a path follows the conventions of the REST object store, then
// we can extract the resource. Otherwise, not.
attribs.Resource = requestInfo.Resource
// If the request specifies a namespace, then the namespace is filled in.
// Assumes there is no empty string namespace. Unspecified results
// in empty (does not understand defaulting rules.)
attribs.Namespace = requestInfo.Namespace
return &attribs
}
// WithAuthorizationCheck passes all authorized requests on to handler, and returns a forbidden error otherwise.
func WithAuthorizationCheck(handler http.Handler, getAttribs RequestAttributeGetter, a authorizer.Authorizer) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
err := a.Authorize(getAttribs.GetAttribs(req))
if err == nil {
handler.ServeHTTP(w, req)
return
}
forbidden(w, req)
})
}
// RequestInfo holds information parsed from the http.Request
type RequestInfo struct {
// IsResourceRequest indicates whether or not the request is for an API resource or subresource
IsResourceRequest bool
// Path is the URL path of the request
Path string
// Verb is the kube verb associated with the request for API requests, not the http verb. This includes things like list and watch.
// for non-resource requests, this is the lowercase http verb
Verb string
APIPrefix string
APIGroup string
APIVersion string
Namespace string
// Resource is the name of the resource being requested. This is not the kind. For example: pods
Resource string
// Subresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind.
// For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod"
// (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding".
Subresource string
// Name is empty for some verbs, but if the request directly indicates a name (not in body content) then this field is filled in.
Name string
// Parts are the path parts for the request, always starting with /{resource}/{name}
Parts []string
}
type RequestInfoResolver struct {
APIPrefixes sets.String
GrouplessAPIPrefixes sets.String
}
// TODO write an integration test against the swagger doc to test the RequestInfo and match up behavior to responses
// GetRequestInfo returns the information from the http request. If error is not nil, RequestInfo holds the information as best it is known before the failure
// It handles both resource and non-resource requests and fills in all the pertinent information for each.
// Valid Inputs:
// Resource paths
// /apis/{api-group}/{version}/namespaces
// /api/{version}/namespaces
// /api/{version}/namespaces/{namespace}
// /api/{version}/namespaces/{namespace}/{resource}
// /api/{version}/namespaces/{namespace}/{resource}/{resourceName}
// /api/{version}/{resource}
// /api/{version}/{resource}/{resourceName}
//
// Special verbs without subresources:
// /api/{version}/proxy/{resource}/{resourceName}
// /api/{version}/proxy/namespaces/{namespace}/{resource}/{resourceName}
// /api/{version}/redirect/namespaces/{namespace}/{resource}/{resourceName}
// /api/{version}/redirect/{resource}/{resourceName}
//
// Special verbs with subresources:
// /api/{version}/watch/{resource}
// /api/{version}/watch/namespaces/{namespace}/{resource}
//
// NonResource paths
// /apis/{api-group}/{version}
// /apis/{api-group}
// /apis
// /api/{version}
// /api
// /healthz
// /
func (r *RequestInfoResolver) GetRequestInfo(req *http.Request) (RequestInfo, error) {
// start with a non-resource request until proven otherwise
requestInfo := RequestInfo{
IsResourceRequest: false,
Path: req.URL.Path,
Verb: strings.ToLower(req.Method),
}
currentParts := splitPath(req.URL.Path)
if len(currentParts) < 3 {
// return a non-resource request
return requestInfo, nil
}
if !r.APIPrefixes.Has(currentParts[0]) {
// return a non-resource request
return requestInfo, nil
}
requestInfo.APIPrefix = currentParts[0]
currentParts = currentParts[1:]
if !r.GrouplessAPIPrefixes.Has(requestInfo.APIPrefix) {
// one part (APIPrefix) has already been consumed, so this is actually "do we have four parts?"
if len(currentParts) < 3 {
// return a non-resource request
return requestInfo, nil
}
requestInfo.APIGroup = currentParts[0]
currentParts = currentParts[1:]
}
requestInfo.IsResourceRequest = true
requestInfo.APIVersion = currentParts[0]
currentParts = currentParts[1:]
// handle input of form /{specialVerb}/*
if specialVerbs.Has(currentParts[0]) {
if len(currentParts) < 2 {
return requestInfo, fmt.Errorf("unable to determine kind and namespace from url, %v", req.URL)
}
requestInfo.Verb = currentParts[0]
currentParts = currentParts[1:]
} else {
switch req.Method {
case "POST":
requestInfo.Verb = "create"
case "GET", "HEAD":
requestInfo.Verb = "get"
case "PUT":
requestInfo.Verb = "update"
case "PATCH":
requestInfo.Verb = "patch"
case "DELETE":
requestInfo.Verb = "delete"
default:
requestInfo.Verb = ""
}
}
// URL forms: /namespaces/{namespace}/{kind}/*, where parts are adjusted to be relative to kind
if currentParts[0] == "namespaces" {
if len(currentParts) > 1 {
requestInfo.Namespace = currentParts[1]
// if there is another step after the namespace name and it is not a known namespace subresource
// move currentParts to include it as a resource in its own right
if len(currentParts) > 2 {
currentParts = currentParts[2:]
}
}
} else {
requestInfo.Namespace = api.NamespaceNone
}
// parsing successful, so we now know the proper value for .Parts
requestInfo.Parts = currentParts
// parts look like: resource/resourceName/subresource/other/stuff/we/don't/interpret
switch {
case len(requestInfo.Parts) >= 3 && !specialVerbsNoSubresources.Has(requestInfo.Verb):
requestInfo.Subresource = requestInfo.Parts[2]
fallthrough
case len(requestInfo.Parts) >= 2:
requestInfo.Name = requestInfo.Parts[1]
fallthrough
case len(requestInfo.Parts) >= 1:
requestInfo.Resource = requestInfo.Parts[0]
}
// if there's no name on the request and we thought it was a get before, then the actual verb is a list
if len(requestInfo.Name) == 0 && requestInfo.Verb == "get" {
requestInfo.Verb = "list"
}
// if there's no name on the request and we thought it was a delete before, then the actual verb is deletecollection
if len(requestInfo.Name) == 0 && requestInfo.Verb == "delete" {
requestInfo.Verb = "deletecollection"
}
return requestInfo, nil
} |
func (tw *closeTimeoutWriter) CloseNotify() <-chan bool {
return tw.closeNotify()
} | random_line_split |
test.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import matplotlib.pyplot as plt
import sys
import tables
import numpy as np
from tensorflow.python.ops import control_flow_ops
from datasets import dataset_factory
from deployment import model_deploy
from nets import nets_factory
from auxiliary import losses
from preprocessing import preprocessing_factory
from roc_curve import calculate_roc
from roc_curve import PlotROC
from roc_curve import PlotPR
from roc_curve import PlotHIST
import pickle
slim = tf.contrib.slim
######################
# Train Directory #
######################
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'train_dir', 'TRAIN_LIPREAD/train_logs-1093',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer('num_clones', 2,
'Number of model clones to deploy.')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False,
'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')
tf.app.flags.DEFINE_integer(
'num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
tf.app.flags.DEFINE_integer(
'num_readers', 8,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 8,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 20,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 10,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 500,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'task', 0, 'Task id of the replica running the training.')
######################
# Optimization Flags #
######################
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
'optimizer', 'sgd',
'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
'adadelta_rho', 0.95,
'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
'adagrad_initial_accumulator_value', 0.1,
'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
'The learning rate power.')
tf.app.flags.DEFINE_float(
'ftrl_initial_accumulator_value', 0.1,
'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
#######################
# Learning Rate Flags #
#######################
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.0001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays.')
tf.app.flags.DEFINE_bool(
'sync_replicas', False,
'Whether or not to synchronize the replicas during training.')
tf.app.flags.DEFINE_integer(
'replicas_to_aggregate', 1,
'The Number of gradients to collect before updating params.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
#######################
# Dataset Flags #
#######################
tf.app.flags.DEFINE_string(
'dataset_name', 'lipread', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', '/PATH/to/DATASET', 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_speech_name', 'lipread_speech_1D', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'model_mouth_name', 'lipread_mouth_big', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer(
'batch_size', 1024 , 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', None, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', 125000,
'The maximum number of training steps.')
#####################
# Fine-Tuning Flags #
#####################
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune. ex:/Path/to/chackpoint')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring'
'from a checkpoint. ex: vgg_19/fc8/biases,vgg_19/fc8/weights')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', False,
'When restoring a checkpoint would ignore missing variables.')
# Store all elemnts in FLAG structure!
FLAGS = tf.app.flags.FLAGS
def _configure_learning_rate(num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
decay_steps = int(num_samples_per_epoch / FLAGS.batch_size *
FLAGS.num_epochs_per_decay)
if FLAGS.sync_replicas:
decay_steps /= FLAGS.replicas_to_aggregate
if FLAGS.learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif FLAGS.learning_rate_decay_type == 'fixed':
return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')
elif FLAGS.learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized',
FLAGS.learning_rate_decay_type)
def _configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
return optimizer
def _add_variables_summaries(learning_rate):
summaries = []
for variable in slim.get_model_variables():
summaries.append(tf.summary.histogram(variable.op.name, variable))
summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate))
return summaries
def _get_init_fn():
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if FLAGS.checkpoint_path is None:
return None
# Warn the user if a checkpoint exists in the train_dir. Then we'll be
# ignoring the checkpoint anyway.
if tf.train.latest_checkpoint(FLAGS.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% FLAGS.train_dir)
return None
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=FLAGS.ignore_missing_vars)
def _get_variables_to_train():
|
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
graph = tf.Graph()
with graph.as_default():
######################
# Config model_deploy#
######################
deploy_config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.worker_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# # Create global_step
# with tf.device(deploy_config.variables_device()):
# global_step = slim.create_global_step()
###############################
# Select and load the dataset #
###############################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
# Load the dataset
fileh = tables.open_file(
'/path/to/dataset/train.hdf5', mode='r')
fileh_test = tables.open_file(
'/path/to/dataset/test.hdf5',
mode='r')
num_samples_per_epoch = fileh_test.root.label.shape[0]
num_batches_per_epoch = int(num_samples_per_epoch / FLAGS.batch_size)
######################
# Select the network #
######################
network_speech_fn = nets_factory.get_network_fn(
FLAGS.model_speech_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=False)
network_mouth_fn = nets_factory.get_network_fn(
FLAGS.model_mouth_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=False)
#####################################
# Select the preprocessing function #
#####################################
#TODO: Do some preprocessing if necessary.
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
with tf.device(deploy_config.inputs_device()):
"""
Define the place holders and creating the batch tensor.
"""
# Place holders
mouth = tf.placeholder(tf.float32, (60, 100, 9))
speech = tf.placeholder(tf.float32, (40, 15, 3))
label = tf.placeholder(tf.uint8, (1))
# Create the batch tensors
batch_speech, batch_mouth, batch_labels = tf.train.batch(
[speech, mouth, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
####################
# Run the model #
####################
# Outputs of two networks
logits_speech, end_points_speech = network_speech_fn(batch_speech)
logits_mouth, end_points_mouth = network_mouth_fn(batch_mouth)
#############################
# Specify the loss function #
#############################
# Two distance metric are defined:
# 1 - distance_weighted: which is a weighted average of the distance between two structures.
# 2 - distance_l2: which is the regular l2-norm of the two networks outputs.
#### Weighted distance ######
distance_vector = tf.subtract(logits_speech, logits_mouth, name=None)
# distance_weighted = slim.fully_connected(distance_vector, 1, activation_fn=None, normalizer_fn=None,
# scope='fc_weighted')
#### Euclidean distance ####
distance_l2 = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(logits_speech, logits_mouth), 2), 1, keep_dims=True))
#### Contrastive loss #####
loss = losses.contrastive_loss(batch_labels, distance_l2, 1)
# Adding the accuracy metric
with tf.name_scope('accuracy'):
predictions = tf.to_int64(tf.sign(tf.sign(distance_l2 - 0.5) + 1))
labels = tf.argmax(distance_l2, 1)
accuracy = tf.reduce_mean(tf.to_float(tf.equal(predictions, labels)))
tf.add_to_collection('accuracy', accuracy)
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Add summaries for all end_points.
for end_point in end_points_speech:
x = end_points_speech[end_point]
summaries.add(tf.summary.histogram('activations_speech/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity_speech/' + end_point,
tf.nn.zero_fraction(x)))
for end_point in end_points_mouth:
x = end_points_mouth[end_point]
summaries.add(tf.summary.histogram('activations_mouth/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity_mouth/' + end_point,
tf.nn.zero_fraction(x)))
# # Add summaries for losses.
# for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
# summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
#################################
# Configure the moving averages #
#################################
if FLAGS.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
else:
moving_average_variables, variable_averages = None, None
#########################################
# Configure the optimization procedure. #
#########################################
# deploy_config.optimizer_device()
# with tf.device(deploy_config.optimizer_device()):
# learning_rate = _configure_learning_rate(num_samples_per_epoch, global_step)
# optimizer = _configure_optimizer(learning_rate)
# optimizer = optimizer.minimize(loss)
# summaries.add(tf.summary.scalar('learning_rate', learning_rate))
# if FLAGS.sync_replicas:
# # If sync_replicas is enabled, the averaging will be done in the chief
# # queue runner.
# optimizer = tf.train.SyncReplicasOptimizer(
# opt=optimizer,
# replicas_to_aggregate=FLAGS.replicas_to_aggregate,
# variable_averages=variable_averages,
# variables_to_average=moving_average_variables,
# replica_id=tf.constant(FLAGS.task, tf.int32, shape=()),
# total_num_replicas=FLAGS.worker_replicas)
# elif FLAGS.moving_average_decay:
# # Update ops executed locally by trainer.
# update_ops.append(variable_averages.apply(moving_average_variables))
#
# summaries.add(tf.summary.scalar('eval/Loss', loss))
#
# summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES))
#
# # Merge all summaries together.
# summary_op = tf.summary.merge(list(summaries), name='summary_op')
###########################
# Kicks off the training. #
###########################
with tf.Session(graph=graph) as sess:
# Initialization of the network.
variables_to_restore = slim.get_variables_to_restore()
saver = tf.train.Saver(slim.get_variables_to_restore(),max_to_keep=15)
coord = tf.train.Coordinator()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
num_epoch = 1
# Save the model
list_checkpoints = ['1093','2176','3279','5465']
list_checkpoints = ['3279','5465','6558','7651','8744','9837','10930','12023','13116','15302','16395','17488','18581','19674','20767','21860']
FPR = []
TPR = []
for checkpoint_num, checkpoint_index in enumerate(list_checkpoints):
checkpoint_dir = 'TRAIN_LIPREAD/train_logs-' + checkpoint_index
saver.restore(sess, checkpoint_dir)
# op to write logs to Tensorboard
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, graph=graph)
# score_dissimilarity_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch , 1))
# label_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch,))
# num_batches_per_epoch = 20
score_dissimilarity_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch, 1))
label_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch,))
for epoch in range(num_epoch):
# Loop over all batches
# num_batches_per_epoch
for i in range(num_batches_per_epoch):
start_idx = i * FLAGS.batch_size
end_idx = (i + 1) * FLAGS.batch_size
speech, mouth, label = fileh.root.speech[start_idx:end_idx], fileh.root.mouth[
start_idx:end_idx], fileh.root.label[
start_idx:end_idx]
# mean subtraction
speech = (speech - mean_speech) / std_speech
mouth = (mouth - mean_mouth) / std_mouth
score_dissimilarity, test_accuracy = sess.run([distance_l2, accuracy],
feed_dict={batch_speech: speech, batch_mouth: mouth,
batch_labels: label.reshape([FLAGS.batch_size, 1])})
print("Epoch " + str(epoch + 1) + ", Minibatch " + str(i+1) + " of %d " % num_batches_per_epoch)
score_dissimilarity_vector[start_idx:end_idx] = score_dissimilarity
label_vector[start_idx:end_idx] = label
# ROC
##############################
##### K-split validation #####
##############################
K = 10
EER = np.zeros((len(list_checkpoints), K, 1))
AUC = np.zeros((len(list_checkpoints), K, 1))
AP = np.zeros((len(list_checkpoints), K, 1))
batch_k_validation = int(label_vector.shape[0] / float(K))
# PlotROC.Plot_ROC_Fn(label_vector, score_dissimilarity_vector, phase='test')
# PlotPR.Plot_PR_Fn(label_vector, score_dissimilarity_vector, phase='test')
# PlotHIST.Plot_HIST_Fn(label_vector, score_dissimilarity_vector, phase='test')
for i in range(K):
EER[checkpoint_num,i,:], AUC[checkpoint_num,i,:], AP[checkpoint_num,i,:],fpr, tpr = calculate_roc.calculate_eer_auc_ap(label_vector[i * batch_k_validation:(i+1) * batch_k_validation], score_dissimilarity_vector[i * batch_k_validation:(i+1) * batch_k_validation])
FPR.append(fpr.tolist())
TPR.append(tpr.tolist())
print('EER=',np.mean(EER[checkpoint_num],axis=0),np.std(EER[checkpoint_num],axis=0))
print('STD=',np.mean(AUC[checkpoint_num],axis=0),np.std(AUC[checkpoint_num],axis=0))
print('AP=', np.mean(AP[checkpoint_num], axis=0), np.std(AP[checkpoint_num], axis=0))
pickle.dump(FPR, open("fpr.p", "wb"))
pickle.dump(TPR, open("tpr.p", "wb"))
# color = ['red','blue']
# fig = plt.figure()
# ax = fig.gca()
# for i in range(len(FPR)):
# # Plot the ROC
# plt.plot(FPR[i], TPR[i], color = color[i], linewidth=2, label='ROC Curve' + '_' + str(i))
#
# plt.plot([0, 1], [0, 1], 'k--', lw=1)
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# # ax.set_xticks(np.arange(0, 1.1, 0.1))
# # ax.set_yticks(np.arange(0, 1.1, 0.1))
# plt.title('ROC.jpg')
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
#
# # # Cutting the floating number
# # AUC = '%.2f' % AUC
# # EER = '%.2f' % EER
# # # AP = '%.2f' % AP
# #
# # # Setting text to plot
# # # plt.text(0.5, 0.6, 'AP = ' + str(AP), fontdict=None)
# # plt.text(0.5, 0.5, 'AUC = ' + str(AUC), fontdict=None)
# # plt.text(0.5, 0.4, 'EER = ' + str(EER), fontdict=None)
# plt.grid(True)
# plt.legend(loc="lower right")
# plt.show()
# fig.savefig('ROC.jpg')
if __name__ == '__main__':
tf.app.run()
| """Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if FLAGS.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train | identifier_body |
test.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import matplotlib.pyplot as plt
import sys
import tables
import numpy as np
from tensorflow.python.ops import control_flow_ops
from datasets import dataset_factory
from deployment import model_deploy
from nets import nets_factory
from auxiliary import losses
from preprocessing import preprocessing_factory
from roc_curve import calculate_roc
from roc_curve import PlotROC
from roc_curve import PlotPR
from roc_curve import PlotHIST
import pickle
slim = tf.contrib.slim
######################
# Train Directory #
######################
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'train_dir', 'TRAIN_LIPREAD/train_logs-1093',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer('num_clones', 2,
'Number of model clones to deploy.')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False,
'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')
tf.app.flags.DEFINE_integer(
'num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
tf.app.flags.DEFINE_integer(
'num_readers', 8,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 8,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 20,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 10,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 500,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'task', 0, 'Task id of the replica running the training.')
######################
# Optimization Flags #
######################
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
'optimizer', 'sgd',
'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
'adadelta_rho', 0.95,
'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
'adagrad_initial_accumulator_value', 0.1,
'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
'The learning rate power.')
tf.app.flags.DEFINE_float(
'ftrl_initial_accumulator_value', 0.1,
'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
#######################
# Learning Rate Flags #
#######################
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.0001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays.')
tf.app.flags.DEFINE_bool(
'sync_replicas', False,
'Whether or not to synchronize the replicas during training.')
tf.app.flags.DEFINE_integer(
'replicas_to_aggregate', 1,
'The Number of gradients to collect before updating params.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
#######################
# Dataset Flags #
#######################
tf.app.flags.DEFINE_string(
'dataset_name', 'lipread', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', '/PATH/to/DATASET', 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_speech_name', 'lipread_speech_1D', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'model_mouth_name', 'lipread_mouth_big', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer(
'batch_size', 1024 , 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', None, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', 125000,
'The maximum number of training steps.')
#####################
# Fine-Tuning Flags #
#####################
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune. ex:/Path/to/chackpoint')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring'
'from a checkpoint. ex: vgg_19/fc8/biases,vgg_19/fc8/weights')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', False,
'When restoring a checkpoint would ignore missing variables.')
# Store all elemnts in FLAG structure!
FLAGS = tf.app.flags.FLAGS
def _configure_learning_rate(num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
decay_steps = int(num_samples_per_epoch / FLAGS.batch_size *
FLAGS.num_epochs_per_decay)
if FLAGS.sync_replicas:
decay_steps /= FLAGS.replicas_to_aggregate
if FLAGS.learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif FLAGS.learning_rate_decay_type == 'fixed':
return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')
elif FLAGS.learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized',
FLAGS.learning_rate_decay_type)
def _configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
return optimizer
def _add_variables_summaries(learning_rate):
summaries = []
for variable in slim.get_model_variables():
summaries.append(tf.summary.histogram(variable.op.name, variable))
summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate))
return summaries
def _get_init_fn():
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if FLAGS.checkpoint_path is None:
return None
# Warn the user if a checkpoint exists in the train_dir. Then we'll be
# ignoring the checkpoint anyway.
if tf.train.latest_checkpoint(FLAGS.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% FLAGS.train_dir)
return None
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=FLAGS.ignore_missing_vars)
def _get_variables_to_train():
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if FLAGS.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
graph = tf.Graph()
with graph.as_default():
######################
# Config model_deploy#
######################
deploy_config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.worker_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# # Create global_step
# with tf.device(deploy_config.variables_device()):
# global_step = slim.create_global_step()
###############################
# Select and load the dataset #
###############################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
# Load the dataset
fileh = tables.open_file(
'/path/to/dataset/train.hdf5', mode='r')
fileh_test = tables.open_file(
'/path/to/dataset/test.hdf5',
mode='r')
num_samples_per_epoch = fileh_test.root.label.shape[0]
num_batches_per_epoch = int(num_samples_per_epoch / FLAGS.batch_size)
######################
# Select the network #
######################
network_speech_fn = nets_factory.get_network_fn(
FLAGS.model_speech_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=False)
network_mouth_fn = nets_factory.get_network_fn(
FLAGS.model_mouth_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=False)
#####################################
# Select the preprocessing function #
#####################################
#TODO: Do some preprocessing if necessary.
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
with tf.device(deploy_config.inputs_device()):
"""
Define the place holders and creating the batch tensor.
"""
# Place holders
mouth = tf.placeholder(tf.float32, (60, 100, 9))
speech = tf.placeholder(tf.float32, (40, 15, 3))
label = tf.placeholder(tf.uint8, (1))
# Create the batch tensors
batch_speech, batch_mouth, batch_labels = tf.train.batch(
[speech, mouth, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
####################
# Run the model #
####################
# Outputs of two networks
logits_speech, end_points_speech = network_speech_fn(batch_speech)
logits_mouth, end_points_mouth = network_mouth_fn(batch_mouth)
#############################
# Specify the loss function #
#############################
# Two distance metric are defined:
# 1 - distance_weighted: which is a weighted average of the distance between two structures.
# 2 - distance_l2: which is the regular l2-norm of the two networks outputs.
#### Weighted distance ######
distance_vector = tf.subtract(logits_speech, logits_mouth, name=None)
# distance_weighted = slim.fully_connected(distance_vector, 1, activation_fn=None, normalizer_fn=None,
# scope='fc_weighted')
#### Euclidean distance ####
distance_l2 = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(logits_speech, logits_mouth), 2), 1, keep_dims=True))
#### Contrastive loss #####
loss = losses.contrastive_loss(batch_labels, distance_l2, 1)
# Adding the accuracy metric
with tf.name_scope('accuracy'):
predictions = tf.to_int64(tf.sign(tf.sign(distance_l2 - 0.5) + 1))
labels = tf.argmax(distance_l2, 1)
accuracy = tf.reduce_mean(tf.to_float(tf.equal(predictions, labels)))
tf.add_to_collection('accuracy', accuracy)
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Add summaries for all end_points.
for end_point in end_points_speech:
x = end_points_speech[end_point]
summaries.add(tf.summary.histogram('activations_speech/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity_speech/' + end_point,
tf.nn.zero_fraction(x)))
for end_point in end_points_mouth:
x = end_points_mouth[end_point]
summaries.add(tf.summary.histogram('activations_mouth/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity_mouth/' + end_point,
tf.nn.zero_fraction(x)))
# # Add summaries for losses.
# for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
# summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
#################################
# Configure the moving averages #
#################################
if FLAGS.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
else:
moving_average_variables, variable_averages = None, None
#########################################
# Configure the optimization procedure. #
#########################################
# deploy_config.optimizer_device()
# with tf.device(deploy_config.optimizer_device()):
# learning_rate = _configure_learning_rate(num_samples_per_epoch, global_step)
# optimizer = _configure_optimizer(learning_rate)
# optimizer = optimizer.minimize(loss)
# summaries.add(tf.summary.scalar('learning_rate', learning_rate))
# if FLAGS.sync_replicas:
# # If sync_replicas is enabled, the averaging will be done in the chief
# # queue runner.
# optimizer = tf.train.SyncReplicasOptimizer( | # replica_id=tf.constant(FLAGS.task, tf.int32, shape=()),
# total_num_replicas=FLAGS.worker_replicas)
# elif FLAGS.moving_average_decay:
# # Update ops executed locally by trainer.
# update_ops.append(variable_averages.apply(moving_average_variables))
#
# summaries.add(tf.summary.scalar('eval/Loss', loss))
#
# summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES))
#
# # Merge all summaries together.
# summary_op = tf.summary.merge(list(summaries), name='summary_op')
###########################
# Kicks off the training. #
###########################
with tf.Session(graph=graph) as sess:
# Initialization of the network.
variables_to_restore = slim.get_variables_to_restore()
saver = tf.train.Saver(slim.get_variables_to_restore(),max_to_keep=15)
coord = tf.train.Coordinator()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
num_epoch = 1
# Save the model
list_checkpoints = ['1093','2176','3279','5465']
list_checkpoints = ['3279','5465','6558','7651','8744','9837','10930','12023','13116','15302','16395','17488','18581','19674','20767','21860']
FPR = []
TPR = []
for checkpoint_num, checkpoint_index in enumerate(list_checkpoints):
checkpoint_dir = 'TRAIN_LIPREAD/train_logs-' + checkpoint_index
saver.restore(sess, checkpoint_dir)
# op to write logs to Tensorboard
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, graph=graph)
# score_dissimilarity_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch , 1))
# label_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch,))
# num_batches_per_epoch = 20
score_dissimilarity_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch, 1))
label_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch,))
for epoch in range(num_epoch):
# Loop over all batches
# num_batches_per_epoch
for i in range(num_batches_per_epoch):
start_idx = i * FLAGS.batch_size
end_idx = (i + 1) * FLAGS.batch_size
speech, mouth, label = fileh.root.speech[start_idx:end_idx], fileh.root.mouth[
start_idx:end_idx], fileh.root.label[
start_idx:end_idx]
# mean subtraction
speech = (speech - mean_speech) / std_speech
mouth = (mouth - mean_mouth) / std_mouth
score_dissimilarity, test_accuracy = sess.run([distance_l2, accuracy],
feed_dict={batch_speech: speech, batch_mouth: mouth,
batch_labels: label.reshape([FLAGS.batch_size, 1])})
print("Epoch " + str(epoch + 1) + ", Minibatch " + str(i+1) + " of %d " % num_batches_per_epoch)
score_dissimilarity_vector[start_idx:end_idx] = score_dissimilarity
label_vector[start_idx:end_idx] = label
# ROC
##############################
##### K-split validation #####
##############################
K = 10
EER = np.zeros((len(list_checkpoints), K, 1))
AUC = np.zeros((len(list_checkpoints), K, 1))
AP = np.zeros((len(list_checkpoints), K, 1))
batch_k_validation = int(label_vector.shape[0] / float(K))
# PlotROC.Plot_ROC_Fn(label_vector, score_dissimilarity_vector, phase='test')
# PlotPR.Plot_PR_Fn(label_vector, score_dissimilarity_vector, phase='test')
# PlotHIST.Plot_HIST_Fn(label_vector, score_dissimilarity_vector, phase='test')
for i in range(K):
EER[checkpoint_num,i,:], AUC[checkpoint_num,i,:], AP[checkpoint_num,i,:],fpr, tpr = calculate_roc.calculate_eer_auc_ap(label_vector[i * batch_k_validation:(i+1) * batch_k_validation], score_dissimilarity_vector[i * batch_k_validation:(i+1) * batch_k_validation])
FPR.append(fpr.tolist())
TPR.append(tpr.tolist())
print('EER=',np.mean(EER[checkpoint_num],axis=0),np.std(EER[checkpoint_num],axis=0))
print('STD=',np.mean(AUC[checkpoint_num],axis=0),np.std(AUC[checkpoint_num],axis=0))
print('AP=', np.mean(AP[checkpoint_num], axis=0), np.std(AP[checkpoint_num], axis=0))
pickle.dump(FPR, open("fpr.p", "wb"))
pickle.dump(TPR, open("tpr.p", "wb"))
# color = ['red','blue']
# fig = plt.figure()
# ax = fig.gca()
# for i in range(len(FPR)):
# # Plot the ROC
# plt.plot(FPR[i], TPR[i], color = color[i], linewidth=2, label='ROC Curve' + '_' + str(i))
#
# plt.plot([0, 1], [0, 1], 'k--', lw=1)
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# # ax.set_xticks(np.arange(0, 1.1, 0.1))
# # ax.set_yticks(np.arange(0, 1.1, 0.1))
# plt.title('ROC.jpg')
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
#
# # # Cutting the floating number
# # AUC = '%.2f' % AUC
# # EER = '%.2f' % EER
# # # AP = '%.2f' % AP
# #
# # # Setting text to plot
# # # plt.text(0.5, 0.6, 'AP = ' + str(AP), fontdict=None)
# # plt.text(0.5, 0.5, 'AUC = ' + str(AUC), fontdict=None)
# # plt.text(0.5, 0.4, 'EER = ' + str(EER), fontdict=None)
# plt.grid(True)
# plt.legend(loc="lower right")
# plt.show()
# fig.savefig('ROC.jpg')
if __name__ == '__main__':
tf.app.run() | # opt=optimizer,
# replicas_to_aggregate=FLAGS.replicas_to_aggregate,
# variable_averages=variable_averages,
# variables_to_average=moving_average_variables, | random_line_split |
test.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import matplotlib.pyplot as plt
import sys
import tables
import numpy as np
from tensorflow.python.ops import control_flow_ops
from datasets import dataset_factory
from deployment import model_deploy
from nets import nets_factory
from auxiliary import losses
from preprocessing import preprocessing_factory
from roc_curve import calculate_roc
from roc_curve import PlotROC
from roc_curve import PlotPR
from roc_curve import PlotHIST
import pickle
slim = tf.contrib.slim
######################
# Train Directory #
######################
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'train_dir', 'TRAIN_LIPREAD/train_logs-1093',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer('num_clones', 2,
'Number of model clones to deploy.')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False,
'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')
tf.app.flags.DEFINE_integer(
'num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
tf.app.flags.DEFINE_integer(
'num_readers', 8,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 8,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 20,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 10,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 500,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'task', 0, 'Task id of the replica running the training.')
######################
# Optimization Flags #
######################
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
'optimizer', 'sgd',
'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
'adadelta_rho', 0.95,
'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
'adagrad_initial_accumulator_value', 0.1,
'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
'The learning rate power.')
tf.app.flags.DEFINE_float(
'ftrl_initial_accumulator_value', 0.1,
'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
#######################
# Learning Rate Flags #
#######################
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.0001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays.')
tf.app.flags.DEFINE_bool(
'sync_replicas', False,
'Whether or not to synchronize the replicas during training.')
tf.app.flags.DEFINE_integer(
'replicas_to_aggregate', 1,
'The Number of gradients to collect before updating params.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
#######################
# Dataset Flags #
#######################
tf.app.flags.DEFINE_string(
'dataset_name', 'lipread', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', '/PATH/to/DATASET', 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_speech_name', 'lipread_speech_1D', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'model_mouth_name', 'lipread_mouth_big', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer(
'batch_size', 1024 , 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', None, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', 125000,
'The maximum number of training steps.')
#####################
# Fine-Tuning Flags #
#####################
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune. ex:/Path/to/chackpoint')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring'
'from a checkpoint. ex: vgg_19/fc8/biases,vgg_19/fc8/weights')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', False,
'When restoring a checkpoint would ignore missing variables.')
# Store all elemnts in FLAG structure!
FLAGS = tf.app.flags.FLAGS
def _configure_learning_rate(num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
decay_steps = int(num_samples_per_epoch / FLAGS.batch_size *
FLAGS.num_epochs_per_decay)
if FLAGS.sync_replicas:
decay_steps /= FLAGS.replicas_to_aggregate
if FLAGS.learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif FLAGS.learning_rate_decay_type == 'fixed':
return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')
elif FLAGS.learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized',
FLAGS.learning_rate_decay_type)
def _configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
return optimizer
def _add_variables_summaries(learning_rate):
summaries = []
for variable in slim.get_model_variables():
summaries.append(tf.summary.histogram(variable.op.name, variable))
summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate))
return summaries
def _get_init_fn():
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if FLAGS.checkpoint_path is None:
return None
# Warn the user if a checkpoint exists in the train_dir. Then we'll be
# ignoring the checkpoint anyway.
if tf.train.latest_checkpoint(FLAGS.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% FLAGS.train_dir)
return None
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=FLAGS.ignore_missing_vars)
def _get_variables_to_train():
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if FLAGS.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def | (_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
graph = tf.Graph()
with graph.as_default():
######################
# Config model_deploy#
######################
deploy_config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.worker_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# # Create global_step
# with tf.device(deploy_config.variables_device()):
# global_step = slim.create_global_step()
###############################
# Select and load the dataset #
###############################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
# Load the dataset
fileh = tables.open_file(
'/path/to/dataset/train.hdf5', mode='r')
fileh_test = tables.open_file(
'/path/to/dataset/test.hdf5',
mode='r')
num_samples_per_epoch = fileh_test.root.label.shape[0]
num_batches_per_epoch = int(num_samples_per_epoch / FLAGS.batch_size)
######################
# Select the network #
######################
network_speech_fn = nets_factory.get_network_fn(
FLAGS.model_speech_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=False)
network_mouth_fn = nets_factory.get_network_fn(
FLAGS.model_mouth_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=False)
#####################################
# Select the preprocessing function #
#####################################
#TODO: Do some preprocessing if necessary.
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
with tf.device(deploy_config.inputs_device()):
"""
Define the place holders and creating the batch tensor.
"""
# Place holders
mouth = tf.placeholder(tf.float32, (60, 100, 9))
speech = tf.placeholder(tf.float32, (40, 15, 3))
label = tf.placeholder(tf.uint8, (1))
# Create the batch tensors
batch_speech, batch_mouth, batch_labels = tf.train.batch(
[speech, mouth, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
####################
# Run the model #
####################
# Outputs of two networks
logits_speech, end_points_speech = network_speech_fn(batch_speech)
logits_mouth, end_points_mouth = network_mouth_fn(batch_mouth)
#############################
# Specify the loss function #
#############################
# Two distance metric are defined:
# 1 - distance_weighted: which is a weighted average of the distance between two structures.
# 2 - distance_l2: which is the regular l2-norm of the two networks outputs.
#### Weighted distance ######
distance_vector = tf.subtract(logits_speech, logits_mouth, name=None)
# distance_weighted = slim.fully_connected(distance_vector, 1, activation_fn=None, normalizer_fn=None,
# scope='fc_weighted')
#### Euclidean distance ####
distance_l2 = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(logits_speech, logits_mouth), 2), 1, keep_dims=True))
#### Contrastive loss #####
loss = losses.contrastive_loss(batch_labels, distance_l2, 1)
# Adding the accuracy metric
with tf.name_scope('accuracy'):
predictions = tf.to_int64(tf.sign(tf.sign(distance_l2 - 0.5) + 1))
labels = tf.argmax(distance_l2, 1)
accuracy = tf.reduce_mean(tf.to_float(tf.equal(predictions, labels)))
tf.add_to_collection('accuracy', accuracy)
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Add summaries for all end_points.
for end_point in end_points_speech:
x = end_points_speech[end_point]
summaries.add(tf.summary.histogram('activations_speech/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity_speech/' + end_point,
tf.nn.zero_fraction(x)))
for end_point in end_points_mouth:
x = end_points_mouth[end_point]
summaries.add(tf.summary.histogram('activations_mouth/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity_mouth/' + end_point,
tf.nn.zero_fraction(x)))
# # Add summaries for losses.
# for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
# summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
#################################
# Configure the moving averages #
#################################
if FLAGS.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
else:
moving_average_variables, variable_averages = None, None
#########################################
# Configure the optimization procedure. #
#########################################
# deploy_config.optimizer_device()
# with tf.device(deploy_config.optimizer_device()):
# learning_rate = _configure_learning_rate(num_samples_per_epoch, global_step)
# optimizer = _configure_optimizer(learning_rate)
# optimizer = optimizer.minimize(loss)
# summaries.add(tf.summary.scalar('learning_rate', learning_rate))
# if FLAGS.sync_replicas:
# # If sync_replicas is enabled, the averaging will be done in the chief
# # queue runner.
# optimizer = tf.train.SyncReplicasOptimizer(
# opt=optimizer,
# replicas_to_aggregate=FLAGS.replicas_to_aggregate,
# variable_averages=variable_averages,
# variables_to_average=moving_average_variables,
# replica_id=tf.constant(FLAGS.task, tf.int32, shape=()),
# total_num_replicas=FLAGS.worker_replicas)
# elif FLAGS.moving_average_decay:
# # Update ops executed locally by trainer.
# update_ops.append(variable_averages.apply(moving_average_variables))
#
# summaries.add(tf.summary.scalar('eval/Loss', loss))
#
# summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES))
#
# # Merge all summaries together.
# summary_op = tf.summary.merge(list(summaries), name='summary_op')
###########################
# Kicks off the training. #
###########################
with tf.Session(graph=graph) as sess:
# Initialization of the network.
variables_to_restore = slim.get_variables_to_restore()
saver = tf.train.Saver(slim.get_variables_to_restore(),max_to_keep=15)
coord = tf.train.Coordinator()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
num_epoch = 1
# Save the model
list_checkpoints = ['1093','2176','3279','5465']
list_checkpoints = ['3279','5465','6558','7651','8744','9837','10930','12023','13116','15302','16395','17488','18581','19674','20767','21860']
FPR = []
TPR = []
for checkpoint_num, checkpoint_index in enumerate(list_checkpoints):
checkpoint_dir = 'TRAIN_LIPREAD/train_logs-' + checkpoint_index
saver.restore(sess, checkpoint_dir)
# op to write logs to Tensorboard
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, graph=graph)
# score_dissimilarity_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch , 1))
# label_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch,))
# num_batches_per_epoch = 20
score_dissimilarity_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch, 1))
label_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch,))
for epoch in range(num_epoch):
# Loop over all batches
# num_batches_per_epoch
for i in range(num_batches_per_epoch):
start_idx = i * FLAGS.batch_size
end_idx = (i + 1) * FLAGS.batch_size
speech, mouth, label = fileh.root.speech[start_idx:end_idx], fileh.root.mouth[
start_idx:end_idx], fileh.root.label[
start_idx:end_idx]
# mean subtraction
speech = (speech - mean_speech) / std_speech
mouth = (mouth - mean_mouth) / std_mouth
score_dissimilarity, test_accuracy = sess.run([distance_l2, accuracy],
feed_dict={batch_speech: speech, batch_mouth: mouth,
batch_labels: label.reshape([FLAGS.batch_size, 1])})
print("Epoch " + str(epoch + 1) + ", Minibatch " + str(i+1) + " of %d " % num_batches_per_epoch)
score_dissimilarity_vector[start_idx:end_idx] = score_dissimilarity
label_vector[start_idx:end_idx] = label
# ROC
##############################
##### K-split validation #####
##############################
K = 10
EER = np.zeros((len(list_checkpoints), K, 1))
AUC = np.zeros((len(list_checkpoints), K, 1))
AP = np.zeros((len(list_checkpoints), K, 1))
batch_k_validation = int(label_vector.shape[0] / float(K))
# PlotROC.Plot_ROC_Fn(label_vector, score_dissimilarity_vector, phase='test')
# PlotPR.Plot_PR_Fn(label_vector, score_dissimilarity_vector, phase='test')
# PlotHIST.Plot_HIST_Fn(label_vector, score_dissimilarity_vector, phase='test')
for i in range(K):
EER[checkpoint_num,i,:], AUC[checkpoint_num,i,:], AP[checkpoint_num,i,:],fpr, tpr = calculate_roc.calculate_eer_auc_ap(label_vector[i * batch_k_validation:(i+1) * batch_k_validation], score_dissimilarity_vector[i * batch_k_validation:(i+1) * batch_k_validation])
FPR.append(fpr.tolist())
TPR.append(tpr.tolist())
print('EER=',np.mean(EER[checkpoint_num],axis=0),np.std(EER[checkpoint_num],axis=0))
print('STD=',np.mean(AUC[checkpoint_num],axis=0),np.std(AUC[checkpoint_num],axis=0))
print('AP=', np.mean(AP[checkpoint_num], axis=0), np.std(AP[checkpoint_num], axis=0))
pickle.dump(FPR, open("fpr.p", "wb"))
pickle.dump(TPR, open("tpr.p", "wb"))
# color = ['red','blue']
# fig = plt.figure()
# ax = fig.gca()
# for i in range(len(FPR)):
# # Plot the ROC
# plt.plot(FPR[i], TPR[i], color = color[i], linewidth=2, label='ROC Curve' + '_' + str(i))
#
# plt.plot([0, 1], [0, 1], 'k--', lw=1)
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# # ax.set_xticks(np.arange(0, 1.1, 0.1))
# # ax.set_yticks(np.arange(0, 1.1, 0.1))
# plt.title('ROC.jpg')
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
#
# # # Cutting the floating number
# # AUC = '%.2f' % AUC
# # EER = '%.2f' % EER
# # # AP = '%.2f' % AP
# #
# # # Setting text to plot
# # # plt.text(0.5, 0.6, 'AP = ' + str(AP), fontdict=None)
# # plt.text(0.5, 0.5, 'AUC = ' + str(AUC), fontdict=None)
# # plt.text(0.5, 0.4, 'EER = ' + str(EER), fontdict=None)
# plt.grid(True)
# plt.legend(loc="lower right")
# plt.show()
# fig.savefig('ROC.jpg')
if __name__ == '__main__':
tf.app.run()
| main | identifier_name |
test.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import matplotlib.pyplot as plt
import sys
import tables
import numpy as np
from tensorflow.python.ops import control_flow_ops
from datasets import dataset_factory
from deployment import model_deploy
from nets import nets_factory
from auxiliary import losses
from preprocessing import preprocessing_factory
from roc_curve import calculate_roc
from roc_curve import PlotROC
from roc_curve import PlotPR
from roc_curve import PlotHIST
import pickle
slim = tf.contrib.slim
######################
# Train Directory #
######################
tf.app.flags.DEFINE_string(
'master', '', 'The address of the TensorFlow master to use.')
tf.app.flags.DEFINE_string(
'train_dir', 'TRAIN_LIPREAD/train_logs-1093',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer('num_clones', 2,
'Number of model clones to deploy.')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False,
'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.')
tf.app.flags.DEFINE_integer(
'num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
tf.app.flags.DEFINE_integer(
'num_readers', 8,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 8,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 20,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 10,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 500,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'task', 0, 'Task id of the replica running the training.')
######################
# Optimization Flags #
######################
tf.app.flags.DEFINE_float(
'weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_string(
'optimizer', 'sgd',
'The name of the optimizer, one of "adadelta", "adagrad", "adam",'
'"ftrl", "momentum", "sgd" or "rmsprop".')
tf.app.flags.DEFINE_float(
'adadelta_rho', 0.95,
'The decay rate for adadelta.')
tf.app.flags.DEFINE_float(
'adagrad_initial_accumulator_value', 0.1,
'Starting value for the AdaGrad accumulators.')
tf.app.flags.DEFINE_float(
'adam_beta1', 0.9,
'The exponential decay rate for the 1st moment estimates.')
tf.app.flags.DEFINE_float(
'adam_beta2', 0.999,
'The exponential decay rate for the 2nd moment estimates.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('ftrl_learning_rate_power', -0.5,
'The learning rate power.')
tf.app.flags.DEFINE_float(
'ftrl_initial_accumulator_value', 0.1,
'Starting value for the FTRL accumulators.')
tf.app.flags.DEFINE_float(
'ftrl_l1', 0.0, 'The FTRL l1 regularization strength.')
tf.app.flags.DEFINE_float(
'ftrl_l2', 0.0, 'The FTRL l2 regularization strength.')
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
#######################
# Learning Rate Flags #
#######################
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
tf.app.flags.DEFINE_float(
'end_learning_rate', 0.0001,
'The minimal end learning rate used by a polynomial decay learning rate.')
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float(
'learning_rate_decay_factor', 0.94, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float(
'num_epochs_per_decay', 2.0,
'Number of epochs after which learning rate decays.')
tf.app.flags.DEFINE_bool(
'sync_replicas', False,
'Whether or not to synchronize the replicas during training.')
tf.app.flags.DEFINE_integer(
'replicas_to_aggregate', 1,
'The Number of gradients to collect before updating params.')
tf.app.flags.DEFINE_float(
'moving_average_decay', None,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
#######################
# Dataset Flags #
#######################
tf.app.flags.DEFINE_string(
'dataset_name', 'lipread', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', '/PATH/to/DATASET', 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_integer(
'labels_offset', 0,
'An offset for the labels in the dataset. This flag is primarily used to '
'evaluate the VGG and ResNet architectures which do not use a background '
'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
'model_speech_name', 'lipread_speech_1D', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'model_mouth_name', 'lipread_mouth_big', 'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'preprocessing_name', None, 'The name of the preprocessing to use. If left '
'as `None`, then the model_name flag is used.')
tf.app.flags.DEFINE_integer(
'batch_size', 1024 , 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', None, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', 125000,
'The maximum number of training steps.')
#####################
# Fine-Tuning Flags #
#####################
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune. ex:/Path/to/chackpoint')
tf.app.flags.DEFINE_string(
'checkpoint_exclude_scopes', None,
'Comma-separated list of scopes of variables to exclude when restoring'
'from a checkpoint. ex: vgg_19/fc8/biases,vgg_19/fc8/weights')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', False,
'When restoring a checkpoint would ignore missing variables.')
# Store all elemnts in FLAG structure!
FLAGS = tf.app.flags.FLAGS
def _configure_learning_rate(num_samples_per_epoch, global_step):
"""Configures the learning rate.
Args:
num_samples_per_epoch: The number of samples in each epoch of training.
global_step: The global_step tensor.
Returns:
A `Tensor` representing the learning rate.
Raises:
ValueError: if
"""
decay_steps = int(num_samples_per_epoch / FLAGS.batch_size *
FLAGS.num_epochs_per_decay)
if FLAGS.sync_replicas:
decay_steps /= FLAGS.replicas_to_aggregate
if FLAGS.learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif FLAGS.learning_rate_decay_type == 'fixed':
return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')
elif FLAGS.learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized',
FLAGS.learning_rate_decay_type)
def _configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
return optimizer
def _add_variables_summaries(learning_rate):
summaries = []
for variable in slim.get_model_variables():
summaries.append(tf.summary.histogram(variable.op.name, variable))
summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate))
return summaries
def _get_init_fn():
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if FLAGS.checkpoint_path is None:
return None
# Warn the user if a checkpoint exists in the train_dir. Then we'll be
# ignoring the checkpoint anyway.
if tf.train.latest_checkpoint(FLAGS.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% FLAGS.train_dir)
return None
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
# TODO(sguada) variables.filter_variables()
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from %s' % checkpoint_path)
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=FLAGS.ignore_missing_vars)
def _get_variables_to_train():
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if FLAGS.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def main(_):
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
graph = tf.Graph()
with graph.as_default():
######################
# Config model_deploy#
######################
deploy_config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.worker_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# # Create global_step
# with tf.device(deploy_config.variables_device()):
# global_step = slim.create_global_step()
###############################
# Select and load the dataset #
###############################
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
# Load the dataset
fileh = tables.open_file(
'/path/to/dataset/train.hdf5', mode='r')
fileh_test = tables.open_file(
'/path/to/dataset/test.hdf5',
mode='r')
num_samples_per_epoch = fileh_test.root.label.shape[0]
num_batches_per_epoch = int(num_samples_per_epoch / FLAGS.batch_size)
######################
# Select the network #
######################
network_speech_fn = nets_factory.get_network_fn(
FLAGS.model_speech_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=False)
network_mouth_fn = nets_factory.get_network_fn(
FLAGS.model_mouth_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=False)
#####################################
# Select the preprocessing function #
#####################################
#TODO: Do some preprocessing if necessary.
##############################################################
# Create a dataset provider that loads data from the dataset #
##############################################################
with tf.device(deploy_config.inputs_device()):
"""
Define the place holders and creating the batch tensor.
"""
# Place holders
mouth = tf.placeholder(tf.float32, (60, 100, 9))
speech = tf.placeholder(tf.float32, (40, 15, 3))
label = tf.placeholder(tf.uint8, (1))
# Create the batch tensors
batch_speech, batch_mouth, batch_labels = tf.train.batch(
[speech, mouth, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
####################
# Run the model #
####################
# Outputs of two networks
logits_speech, end_points_speech = network_speech_fn(batch_speech)
logits_mouth, end_points_mouth = network_mouth_fn(batch_mouth)
#############################
# Specify the loss function #
#############################
# Two distance metric are defined:
# 1 - distance_weighted: which is a weighted average of the distance between two structures.
# 2 - distance_l2: which is the regular l2-norm of the two networks outputs.
#### Weighted distance ######
distance_vector = tf.subtract(logits_speech, logits_mouth, name=None)
# distance_weighted = slim.fully_connected(distance_vector, 1, activation_fn=None, normalizer_fn=None,
# scope='fc_weighted')
#### Euclidean distance ####
distance_l2 = tf.sqrt(tf.reduce_sum(tf.pow(tf.subtract(logits_speech, logits_mouth), 2), 1, keep_dims=True))
#### Contrastive loss #####
loss = losses.contrastive_loss(batch_labels, distance_l2, 1)
# Adding the accuracy metric
with tf.name_scope('accuracy'):
predictions = tf.to_int64(tf.sign(tf.sign(distance_l2 - 0.5) + 1))
labels = tf.argmax(distance_l2, 1)
accuracy = tf.reduce_mean(tf.to_float(tf.equal(predictions, labels)))
tf.add_to_collection('accuracy', accuracy)
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Add summaries for all end_points.
for end_point in end_points_speech:
x = end_points_speech[end_point]
summaries.add(tf.summary.histogram('activations_speech/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity_speech/' + end_point,
tf.nn.zero_fraction(x)))
for end_point in end_points_mouth:
x = end_points_mouth[end_point]
summaries.add(tf.summary.histogram('activations_mouth/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity_mouth/' + end_point,
tf.nn.zero_fraction(x)))
# # Add summaries for losses.
# for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
# summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Add summaries for variables.
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
#################################
# Configure the moving averages #
#################################
if FLAGS.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay, global_step)
else:
moving_average_variables, variable_averages = None, None
#########################################
# Configure the optimization procedure. #
#########################################
# deploy_config.optimizer_device()
# with tf.device(deploy_config.optimizer_device()):
# learning_rate = _configure_learning_rate(num_samples_per_epoch, global_step)
# optimizer = _configure_optimizer(learning_rate)
# optimizer = optimizer.minimize(loss)
# summaries.add(tf.summary.scalar('learning_rate', learning_rate))
# if FLAGS.sync_replicas:
# # If sync_replicas is enabled, the averaging will be done in the chief
# # queue runner.
# optimizer = tf.train.SyncReplicasOptimizer(
# opt=optimizer,
# replicas_to_aggregate=FLAGS.replicas_to_aggregate,
# variable_averages=variable_averages,
# variables_to_average=moving_average_variables,
# replica_id=tf.constant(FLAGS.task, tf.int32, shape=()),
# total_num_replicas=FLAGS.worker_replicas)
# elif FLAGS.moving_average_decay:
# # Update ops executed locally by trainer.
# update_ops.append(variable_averages.apply(moving_average_variables))
#
# summaries.add(tf.summary.scalar('eval/Loss', loss))
#
# summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES))
#
# # Merge all summaries together.
# summary_op = tf.summary.merge(list(summaries), name='summary_op')
###########################
# Kicks off the training. #
###########################
with tf.Session(graph=graph) as sess:
# Initialization of the network.
variables_to_restore = slim.get_variables_to_restore()
saver = tf.train.Saver(slim.get_variables_to_restore(),max_to_keep=15)
coord = tf.train.Coordinator()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
num_epoch = 1
# Save the model
list_checkpoints = ['1093','2176','3279','5465']
list_checkpoints = ['3279','5465','6558','7651','8744','9837','10930','12023','13116','15302','16395','17488','18581','19674','20767','21860']
FPR = []
TPR = []
for checkpoint_num, checkpoint_index in enumerate(list_checkpoints):
checkpoint_dir = 'TRAIN_LIPREAD/train_logs-' + checkpoint_index
saver.restore(sess, checkpoint_dir)
# op to write logs to Tensorboard
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, graph=graph)
# score_dissimilarity_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch , 1))
# label_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch,))
# num_batches_per_epoch = 20
score_dissimilarity_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch, 1))
label_vector = np.zeros((FLAGS.batch_size * num_batches_per_epoch,))
for epoch in range(num_epoch):
# Loop over all batches
# num_batches_per_epoch
for i in range(num_batches_per_epoch):
|
# ROC
##############################
##### K-split validation #####
##############################
K = 10
EER = np.zeros((len(list_checkpoints), K, 1))
AUC = np.zeros((len(list_checkpoints), K, 1))
AP = np.zeros((len(list_checkpoints), K, 1))
batch_k_validation = int(label_vector.shape[0] / float(K))
# PlotROC.Plot_ROC_Fn(label_vector, score_dissimilarity_vector, phase='test')
# PlotPR.Plot_PR_Fn(label_vector, score_dissimilarity_vector, phase='test')
# PlotHIST.Plot_HIST_Fn(label_vector, score_dissimilarity_vector, phase='test')
for i in range(K):
EER[checkpoint_num,i,:], AUC[checkpoint_num,i,:], AP[checkpoint_num,i,:],fpr, tpr = calculate_roc.calculate_eer_auc_ap(label_vector[i * batch_k_validation:(i+1) * batch_k_validation], score_dissimilarity_vector[i * batch_k_validation:(i+1) * batch_k_validation])
FPR.append(fpr.tolist())
TPR.append(tpr.tolist())
print('EER=',np.mean(EER[checkpoint_num],axis=0),np.std(EER[checkpoint_num],axis=0))
print('STD=',np.mean(AUC[checkpoint_num],axis=0),np.std(AUC[checkpoint_num],axis=0))
print('AP=', np.mean(AP[checkpoint_num], axis=0), np.std(AP[checkpoint_num], axis=0))
pickle.dump(FPR, open("fpr.p", "wb"))
pickle.dump(TPR, open("tpr.p", "wb"))
# color = ['red','blue']
# fig = plt.figure()
# ax = fig.gca()
# for i in range(len(FPR)):
# # Plot the ROC
# plt.plot(FPR[i], TPR[i], color = color[i], linewidth=2, label='ROC Curve' + '_' + str(i))
#
# plt.plot([0, 1], [0, 1], 'k--', lw=1)
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# # ax.set_xticks(np.arange(0, 1.1, 0.1))
# # ax.set_yticks(np.arange(0, 1.1, 0.1))
# plt.title('ROC.jpg')
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
#
# # # Cutting the floating number
# # AUC = '%.2f' % AUC
# # EER = '%.2f' % EER
# # # AP = '%.2f' % AP
# #
# # # Setting text to plot
# # # plt.text(0.5, 0.6, 'AP = ' + str(AP), fontdict=None)
# # plt.text(0.5, 0.5, 'AUC = ' + str(AUC), fontdict=None)
# # plt.text(0.5, 0.4, 'EER = ' + str(EER), fontdict=None)
# plt.grid(True)
# plt.legend(loc="lower right")
# plt.show()
# fig.savefig('ROC.jpg')
if __name__ == '__main__':
tf.app.run()
| start_idx = i * FLAGS.batch_size
end_idx = (i + 1) * FLAGS.batch_size
speech, mouth, label = fileh.root.speech[start_idx:end_idx], fileh.root.mouth[
start_idx:end_idx], fileh.root.label[
start_idx:end_idx]
# mean subtraction
speech = (speech - mean_speech) / std_speech
mouth = (mouth - mean_mouth) / std_mouth
score_dissimilarity, test_accuracy = sess.run([distance_l2, accuracy],
feed_dict={batch_speech: speech, batch_mouth: mouth,
batch_labels: label.reshape([FLAGS.batch_size, 1])})
print("Epoch " + str(epoch + 1) + ", Minibatch " + str(i+1) + " of %d " % num_batches_per_epoch)
score_dissimilarity_vector[start_idx:end_idx] = score_dissimilarity
label_vector[start_idx:end_idx] = label | conditional_block |
run.go | package cmd
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/pinpt/agent/v4/internal/util"
"github.com/pinpt/agent/v4/runner"
"github.com/pinpt/agent/v4/sdk"
"github.com/pinpt/agent/v4/sysinfo"
"github.com/pinpt/go-common/v10/api"
"github.com/pinpt/go-common/v10/datetime"
"github.com/pinpt/go-common/v10/event"
"github.com/pinpt/go-common/v10/fileutil"
"github.com/pinpt/go-common/v10/graphql"
pjson "github.com/pinpt/go-common/v10/json"
"github.com/pinpt/go-common/v10/log"
pos "github.com/pinpt/go-common/v10/os"
pstrings "github.com/pinpt/go-common/v10/strings"
"github.com/pinpt/integration-sdk/agent"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// DBChange event
type DBChange struct {
// Action the action that was taken
Action string `json:"action" codec:"action" bson:"action" yaml:"action" faker:"-"`
// Data the data payload of the change
Data string `json:"data" codec:"data" bson:"data" yaml:"data" faker:"-"`
}
// Integration A registry integration
type Integration struct {
// RefType the reference type
RefType string `json:"ref_type" codec:"ref_type" bson:"ref_type" yaml:"ref_type" faker:"-"`
// UpdatedAt the date the integration was last updated
UpdatedAt int64 `json:"updated_ts" codec:"updated_ts" bson:"updated_ts" yaml:"updated_ts" faker:"-"`
// Version the latest version that was published
Version string `json:"version" codec:"version" bson:"version" yaml:"version" faker:"-"`
}
func getIntegration(ctx context.Context, logger log.Logger, channel string, dir string, publisher string, integration string, version string, cmdargs []string, force bool) (*exec.Cmd, error) {
if publisher == "" {
return nil, fmt.Errorf("missing publisher")
}
if integration == "" {
return nil, fmt.Errorf("missing integration")
}
longName := fmt.Sprintf("%s/%s", publisher, integration)
if version != "" {
longName += "/" + version
}
integrationExecutable, _ := filepath.Abs(filepath.Join(dir, integration))
if force || !fileutil.FileExists(integrationExecutable) {
log.Info(logger, "need to download integration", "integration", longName, "force", force)
var err error
integrationExecutable, err = downloadIntegration(logger, channel, dir, publisher, integration, version)
if err != nil {
return nil, fmt.Errorf("error downloading integration %s: %w", longName, err)
}
log.Info(logger, "downloaded", "integration", integrationExecutable)
}
return startIntegration(ctx, logger, integrationExecutable, cmdargs)
}
func startIntegration(ctx context.Context, logger log.Logger, integrationExecutable string, cmdargs []string) (*exec.Cmd, error) {
log.Info(logger, "starting", "file", integrationExecutable)
cm := exec.CommandContext(ctx, integrationExecutable, cmdargs...)
cm.Stdout = os.Stdout
cm.Stderr = os.Stderr
cm.Stdin = os.Stdin
cm.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if err := cm.Start(); err != nil {
return nil, err
}
return cm, nil
}
func configFilename(cmd *cobra.Command) (string, error) {
fn, _ := cmd.Flags().GetString("config")
if fn == "" {
fn = filepath.Join(os.Getenv("HOME"), ".pinpoint-agent/config.json")
}
return filepath.Abs(fn)
}
// clientFromConfig will use the contents of ConfigFile to make a client
func clientFromConfig(config *runner.ConfigFile) (graphql.Client, error) {
gclient, err := graphql.NewClient(config.CustomerID, "", "", api.BackendURL(api.GraphService, config.Channel))
if err != nil {
return nil, err
}
gclient.SetHeader("Authorization", config.APIKey)
return gclient, nil
}
func validateConfig(config *runner.ConfigFile, channel string, force bool) (bool, error) {
var resp struct {
Expired bool `json:"expired"`
Valid bool `json:"valid"`
}
if !force {
res, err := api.Get(context.Background(), channel, api.AuthService, "/validate?customer_id="+config.CustomerID, config.APIKey)
if err != nil {
return false, err
}
defer res.Body.Close()
if err := json.NewDecoder(res.Body).Decode(&resp); err != nil {
return false, err
}
} else {
resp.Expired = true
}
if resp.Expired {
newToken, err := util.RefreshOAuth2Token(http.DefaultClient, channel, "pinpoint", config.RefreshKey)
if err != nil {
return false, err
}
config.APIKey = newToken // update the new token
return true, nil
}
if resp.Valid {
return false, fmt.Errorf("the apikey or refresh token is no longer valid")
}
return false, nil
}
func saveConfig(cmd *cobra.Command, config *runner.ConfigFile) error {
cfg, err := configFilename(cmd)
if err != nil {
return err
}
of, err := os.Open(cfg)
if err != nil {
return err
}
defer of.Close()
// save our channels back to the config
if err := json.NewEncoder(of).Encode(config); err != nil {
return err
}
return nil
}
func loadConfig(cmd *cobra.Command, logger log.Logger, channel string) (string, *runner.ConfigFile) {
cfg, err := configFilename(cmd)
if err != nil {
log.Fatal(logger, "error getting config file name", "err", err)
}
if fileutil.FileExists(cfg) {
var config runner.ConfigFile
of, err := os.Open(cfg)
if err != nil {
log.Fatal(logger, "error opening config file at "+cfg, "err", err)
}
defer of.Close()
if err := json.NewDecoder(of).Decode(&config); err != nil {
log.Fatal(logger, "error parsing config file at "+cfg, "err", err)
}
of.Close()
updated, err := validateConfig(&config, channel, false)
if err != nil {
log.Fatal(logger, "error validating the apikey", "err", err)
}
if updated {
// save our changes back to the config
if err := saveConfig(cmd, &config); err != nil {
log.Fatal(logger, "error opening config file for writing at "+cfg, "err", err)
}
}
client, err := clientFromConfig(&config)
if err != nil {
log.Fatal(logger, "error creating client", "err", err)
}
exists, err := enrollmentExists(client, config.EnrollmentID)
if err != nil {
log.Fatal(logger, "error checking enrollment", "err", err)
}
if exists {
return cfg, &config
}
log.Info(logger, "agent configuration found, but not known to Pinpoint, re-enrolling now", "path", cfg)
} else {
log.Info(logger, "no agent configuration found, enrolling now", "path", cfg)
}
config, err := enrollAgent(logger, channel, cfg)
if err != nil {
log.Fatal(logger, "error enrolling new agent", "err", err)
}
return cfg, config
}
type integrationInstruction int
const (
doNothing integrationInstruction = iota
shouldStart
shouldStop
)
func vetDBChange(evt event.SubscriptionEvent, enrollmentID string) (integrationInstruction, *agent.IntegrationInstance, error) {
var dbchange DBChange
if err := json.Unmarshal([]byte(evt.Data), &dbchange); err != nil {
return 0, nil, fmt.Errorf("error decoding dbchange: %w", err)
}
var instance agent.IntegrationInstance
if err := json.Unmarshal([]byte(dbchange.Data), &instance); err != nil {
return 0, nil, fmt.Errorf("error decoding integration instance: %w", err)
}
if instance.EnrollmentID == nil || *instance.EnrollmentID == "" || *instance.EnrollmentID != enrollmentID {
return doNothing, nil, nil
}
if instance.Active == true && instance.Setup == agent.IntegrationInstanceSetupReady {
return shouldStart, &instance, nil
}
if instance.Active == false && instance.Deleted == true {
return shouldStop, &instance, nil
}
return doNothing, nil, nil
}
type integrationResult struct {
Data *struct {
Integration struct {
RefType string `json:"ref_type"`
Publisher struct {
Identifier string `json:"identifier"`
} `json:"publisher"`
} `json:"Integration"`
} `json:"registry"`
}
var integrationQuery = `query findIntegration($id: ID!) {
registry {
Integration(_id: $id) {
ref_type
publisher {
identifier
}
}
}
}`
func pingEnrollment(logger log.Logger, client graphql.Client, enrollmentID string, datefield string, active bool) error {
log.Info(logger, "updating enrollment", "setting", datefield, "enrollment_id", enrollmentID, "active", active)
now := datetime.NewDateNow()
vars := make(graphql.Variables)
if datefield != "" {
vars[datefield] = now
vars[agent.EnrollmentModelRunningColumn] = active
}
vars[agent.EnrollmentModelLastPingDateColumn] = now
return agent.ExecEnrollmentSilentUpdateMutation(client, enrollmentID, vars, false)
}
func setIntegrationRunning(client graphql.Client, integrationInstanceID string) error {
vars := graphql.Variables{
agent.IntegrationInstanceModelSetupColumn: agent.IntegrationInstanceSetupRunning,
}
if err := agent.ExecIntegrationInstanceSilentUpdateMutation(client, integrationInstanceID, vars, false); err != nil {
return fmt.Errorf("error updating integration instance to running: %w", err)
}
return nil
}
func runIntegrationMonitor(ctx context.Context, logger log.Logger, cmd *cobra.Command) {
channel, _ := cmd.Flags().GetString("channel")
args := []string{}
cmd.Flags().Visit(func(f *pflag.Flag) {
args = append(args, "--"+f.Name, f.Value.String())
})
var gclient graphql.Client
integrations := make(map[string]string) // id -> identifier/ref_type
processes := make(map[string]*exec.Cmd)
var processLock sync.Mutex
getIntegration := func(id string) (string, error) {
processLock.Lock()
val := integrations[id]
if val != "" {
processLock.Unlock()
return val, nil
}
var res integrationResult
if err := gclient.Query(integrationQuery, graphql.Variables{"id": id}, &res); err != nil {
processLock.Unlock()
return "", err
}
if res.Data == nil {
processLock.Unlock()
return "", fmt.Errorf("couldn't find integration with id: %s", id)
}
val = res.Data.Integration.Publisher.Identifier + "/" + res.Data.Integration.RefType
integrations[id] = val
processLock.Unlock()
return val, nil
}
cfg, config := loadConfig(cmd, logger, channel)
if channel == "" {
channel = config.Channel
}
args = append(args, "--config", cfg)
gclient, err := graphql.NewClient(config.CustomerID, "", "", api.BackendURL(api.GraphService, channel))
if err != nil {
log.Fatal(logger, "error creating graphql client", "err", err)
}
gclient.SetHeader("Authorization", config.APIKey)
errors := make(chan error)
go func() {
for err := range errors {
if err != nil {
log.Fatal(logger, err.Error())
}
}
}()
groupID := "agent-run-monitor"
if config.EnrollmentID != "" {
// if self managed, we need to use a different group id than the cloud
groupID += "-" + config.EnrollmentID
}
ch, err := event.NewSubscription(ctx, event.Subscription{
GroupID: "agent-run-monitor-" + config.EnrollmentID,
Topics: []string{"ops.db.Change"},
Channel: channel,
APIKey: config.APIKey,
DisablePing: true,
Logger: logger,
Errors: errors,
DisableAutoCommit: true,
Filter: &event.SubscriptionFilter{
ObjectExpr: `model:"agent.IntegrationInstance" AND action:"update"`,
},
})
if err != nil {
log.Fatal(logger, "error creating montior subscription", "err", err)
}
ch.WaitForReady()
defer ch.Close()
// set startup date
if err := pingEnrollment(logger, gclient, config.EnrollmentID, agent.EnrollmentModelLastStartupDateColumn, true); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
runIntegration := func(name string) {
log.Info(logger, "running integration", "name", name)
processLock.Lock()
startFile, _ := ioutil.TempFile("", "")
defer os.Remove(startFile.Name())
args = append(args, "--start-file", startFile.Name())
c := exec.CommandContext(ctx, os.Args[0], append([]string{"run", name}, args...)...)
var wg sync.WaitGroup
wg.Add(1)
c.Stdin = os.Stdin
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if err := c.Start(); err != nil {
processLock.Unlock()
log.Fatal(logger, "error starting "+name, "err", err)
}
exited := make(chan bool)
pos.OnExit(func(_ int) {
log.Debug(logger, "exit")
close(exited)
})
go func() {
for {
select {
case <-exited:
wg.Done()
return
case <-time.After(time.Second):
if fileutil.FileExists(startFile.Name()) {
wg.Done()
os.Remove(startFile.Name())
return
}
case <-time.After(5 * time.Minute):
log.Fatal(logger, "failed to start integration "+name+" after 5 minutes")
}
}
}()
processes[name] = c
processLock.Unlock()
log.Debug(logger, "waiting for integration to start")
wg.Wait()
if c != nil && c.ProcessState != nil && c.ProcessState.Exited() {
log.Info(logger, "integration is not running")
} else {
log.Info(logger, "integration is running")
}
}
// find all the integrations we have setup
query := &agent.IntegrationInstanceQuery{
Filters: []string{
agent.IntegrationInstanceModelDeletedColumn + " = ?",
agent.IntegrationInstanceModelEnrollmentIDColumn + " = ?",
},
Params: []interface{}{
false,
config.EnrollmentID,
},
}
q := &agent.IntegrationInstanceQueryInput{Query: query}
instances, err := agent.FindIntegrationInstances(gclient, q)
if err != nil {
log.Fatal(logger, "error finding integration instances", "err", err)
}
if instances != nil {
for _, edge := range instances.Edges {
name, err := getIntegration(edge.Node.IntegrationID)
if err != nil {
log.Fatal(logger, "error fetching integration name for integration", "err", err, "integration_id", edge.Node.IntegrationID, "id", edge.Node.ID)
}
runIntegration(name)
if edge.Node.Setup == agent.IntegrationInstanceSetupReady {
log.Info(logger, "setting integration to running 🏃♀️", "integration_instance_id", edge.Node.ID)
if err := setIntegrationRunning(gclient, edge.Node.ID); err != nil {
log.Fatal(logger, "error updating integration instance", "err", err, "integration_id", edge.Node.IntegrationID, "id", edge.Node.ID)
}
}
}
}
var restarting bool
done := make(chan bool)
finished := make(chan bool)
pos.OnExit(func(_ int) {
if !restarting {
done <- true
<-finished
log.Info(logger, "👯")
}
})
if err := pingEnrollment(logger, gclient, config.EnrollmentID, "", true); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
// calculate the duration of time left before the
refreshDuration := config.Expires.Sub(time.Now().Add(time.Minute * 30))
var shutdownWg sync.WaitGroup
// run a loop waiting for exit or an updated integration instance
completed:
for {
select {
case <-time.After(refreshDuration):
// if we get here, we need to refresh our expiring apikey and restart all the integrations
log.Info(logger, "need to update apikey before it expires and restart 🤞🏽")
// 1. fetch our new apikey
if _, err := validateConfig(config, channel, true); err != nil {
log.Fatal(logger, "error refreshing the expiring apikey", "err", err)
}
// 2. save the config file changes
if err := saveConfig(cmd, config); err != nil {
log.Fatal(logger, "error saving the new apikey", "err", err)
}
// 3. stop all of our current integrations
restarting = true
done <- true
// 4. restart ourself which should re-entrant this function
go runIntegrationMonitor(ctx, logger, cmd)
case <-time.After(time.Minute * 5):
if err := pingEnrollment(logger, gclient, config.EnrollmentID, "", true); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
case <-done:
processLock.Lock()
for k, c := range processes {
log.Debug(logger, "stopping "+k, "pid", c.Process.Pid)
shutdownWg.Add(1)
go func(c *exec.Cmd, name string) {
defer shutdownWg.Done()
syscall.Kill(-c.Process.Pid, syscall.SIGINT)
exited := make(chan bool)
go func() {
c.Wait()
log.Debug(logger, "exited "+name)
exited <- true
}()
select {
case <-time.After(time.Second * 15):
log.Debug(logger, "timed out on exit for "+name)
if c.Process != nil {
c.Process.Kill()
}
return
case <-exited:
return
}
}(c, k)
delete(processes, k)
}
processLock.Unlock()
break completed
case evt := <-ch.Channel():
instruction, instance, err := vetDBChange(evt, config.EnrollmentID)
if err != nil {
log.Fatal(logger, "error decoding db change", "err", err)
}
switch instruction {
case shouldStart:
log.Info(logger, "db change received, need to create a new process", "id", instance.ID)
if err := setIntegrationRunning(gclient, instance.ID); err != nil {
log.Fatal(logger, "error updating integration", "err", err)
}
name, err := getIntegration(instance.ID)
if err != nil {
log.Fatal(logger, "error fetching integration detail", "err", err)
}
processLock.Lock()
c := processes[name] | if c == nil {
processLock.Unlock()
runIntegration(name)
} else {
processLock.Unlock()
}
case shouldStop:
log.Info(logger, "db change delete received, need to delete process", "id", instance.ID)
name, err := getIntegration(instance.ID)
if err != nil {
log.Fatal(logger, "error fetching integration detail", "err", err)
}
processLock.Lock()
c := processes[name]
if c != nil {
c.Process.Kill()
delete(processes, instance.RefType)
}
processLock.Unlock()
}
evt.Commit()
}
}
shutdownWg.Wait()
// if restarting, dont send a shutdown or block on finished
if restarting {
return
}
if err := pingEnrollment(logger, gclient, config.EnrollmentID, agent.EnrollmentModelLastShutdownDateColumn, false); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
finished <- true
}
func enrollmentExists(client graphql.Client, enrollmentID string) (bool, error) {
enrollment, err := agent.FindEnrollment(client, enrollmentID)
return enrollment != nil, err
}
func enrollAgent(logger log.Logger, channel string, configFileName string) (*runner.ConfigFile, error) {
var config runner.ConfigFile
config.Channel = channel
url := sdk.JoinURL(api.BackendURL(api.AppService, channel), "/enroll")
var userID string
err := util.WaitForRedirect(url, func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
config.APIKey = q.Get("apikey")
config.RefreshKey = q.Get("refresh_token")
config.CustomerID = q.Get("customer_id")
expires := q.Get("expires")
if expires != "" {
e, _ := strconv.ParseInt(expires, 10, 64)
config.Expires = datetime.DateFromEpoch(e)
} else {
config.Expires = time.Now().Add(time.Hour * 23)
}
userID = q.Get("user_id")
w.WriteHeader(http.StatusOK)
})
if err != nil {
return nil, fmt.Errorf("error waiting for browser: %w", err)
}
log.Info(logger, "logged in", "customer_id", config.CustomerID)
log.Info(logger, "enrolling agent...", "customer_id", config.CustomerID)
client, err := graphql.NewClient(config.CustomerID, "", "", api.BackendURL(api.GraphService, channel))
if err != nil {
return nil, fmt.Errorf("error creating graphql client: %w", err)
}
client.SetHeader("Authorization", config.APIKey)
info, err := sysinfo.GetSystemInfo()
if err != nil {
return nil, fmt.Errorf("error getting system info: %w", err)
}
config.SystemID = info.ID
created := agent.EnrollmentCreatedDate(datetime.NewDateNow())
enr := agent.Enrollment{
AgentVersion: commit, // TODO(robin): when we start versioning, switch this to version
CreatedDate: created,
SystemID: info.ID,
Hostname: info.Hostname,
NumCPU: info.NumCPU,
OS: info.OS,
Architecture: info.Architecture,
GoVersion: info.GoVersion,
CustomerID: config.CustomerID,
UserID: userID,
ID: agent.NewEnrollmentID(config.CustomerID, info.ID),
}
config.EnrollmentID = enr.ID
if err := agent.CreateEnrollment(client, enr); err != nil {
if strings.Contains(err.Error(), "duplicate key error") {
log.Info(logger, "looks like this system has already been enrolled, recreating local config")
} else {
return nil, fmt.Errorf("error creating enrollment: %w", err)
}
}
os.MkdirAll(filepath.Dir(configFileName), 0700)
if err := ioutil.WriteFile(configFileName, []byte(pjson.Stringify(config)), 0644); err != nil {
return nil, fmt.Errorf("error writing config file: %w", err)
}
log.Info(logger, "agent enrolled 🎉", "customer_id", config.CustomerID)
return &config, nil
}
// enrollAgentCmd will authenticate with pinpoint and create an agent enrollment
var enrollAgentCmd = &cobra.Command{
Use: "enroll",
Short: "connect this agent to Pinpoint's backend",
Hidden: true,
Run: func(cmd *cobra.Command, args []string) {
logger := log.NewCommandLogger(cmd)
defer logger.Close()
channel, _ := cmd.Flags().GetString("channel")
fn, err := configFilename(cmd)
if err != nil {
log.Fatal(logger, "error getting config file name", "err", err)
}
if _, err := enrollAgent(logger, channel, fn); err != nil {
log.Fatal(logger, "error enrolling this agent", "err", err)
}
},
}
func copyFile(from, to string) error {
in, err := os.Open(from)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(to)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, in)
return err
}
// runCmd represents the run command
var runCmd = &cobra.Command{
Use: "run <integration> <version>",
Short: "run a published integration",
Args: cobra.MinimumNArgs(0),
Run: func(cmd *cobra.Command, args []string) {
_logger := log.NewCommandLogger(cmd)
defer _logger.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if len(args) == 0 {
log.Info(_logger, "starting main process")
// need to figure out all our configured integrations and run each one
runIntegrationMonitor(ctx, _logger, cmd)
return
}
fullIntegration := args[0]
var version string
if len(args) > 1 {
version = args[1]
}
tok := strings.Split(fullIntegration, "/")
if len(tok) != 2 {
log.Fatal(_logger, "integration should be in the format: publisher/integration such as pinpt/github")
}
publisher := tok[0]
integration := tok[1]
logger := log.With(_logger, "pkg", integration)
channel, _ := cmd.Flags().GetString("channel")
dir, _ := cmd.Flags().GetString("dir")
secret, _ := cmd.Flags().GetString("secret")
dir, err := filepath.Abs(dir)
if err != nil {
log.Fatal(logger, "error getting dir absolute path", "err", err)
}
var selfManaged bool
var ch *event.SubscriptionChannel
var cmdargs []string
if secret != "" {
log.Debug(logger, "creating internal subscription")
if channel == "" {
channel = "stable"
}
// each replica agent should recieve updates
groupSuffix, err := os.Hostname()
if err != nil {
groupSuffix = pstrings.NewUUIDV4()
log.Warn(logger, "unable to get hostname, using random uuid", "uuid", groupSuffix, "err", err)
}
ch, err = event.NewSubscription(ctx, event.Subscription{
GroupID: "agent-run-" + publisher + "-" + integration + "-" + groupSuffix,
Topics: []string{"ops.db.Change"},
Channel: channel,
HTTPHeaders: map[string]string{"x-api-key": secret},
DisablePing: true,
Temporary: true,
Logger: logger,
Filter: &event.SubscriptionFilter{
ObjectExpr: `model:"registry.Integration"`,
},
})
cmdargs = append(cmdargs, "--secret="+secret, "--channel="+channel)
} else {
selfManaged = true
log.Debug(logger, "creating external subscription")
cfg, config := loadConfig(cmd, logger, channel)
apikey := config.APIKey
if channel == "" {
channel = config.Channel
}
ch, err = event.NewSubscription(ctx, event.Subscription{
GroupID: "agent-run-" + publisher + "-" + integration + "-" + config.EnrollmentID,
Topics: []string{"ops.db.Change"},
Channel: channel,
APIKey: apikey,
DisablePing: true,
Logger: logger,
Filter: &event.SubscriptionFilter{
ObjectExpr: `model:"registry.Integration"`,
},
})
cmdargs = append(cmdargs, "--config="+cfg, "--channel="+channel)
}
if err != nil {
log.Fatal(logger, "error creating subscription", "err", err)
}
// start file is used to signal to the monitor when we're running
startfile, _ := cmd.Flags().GetString("start-file")
if startfile != "" {
os.Remove(startfile)
cmdargs = append(cmdargs, "--start-file", startfile)
defer os.Remove(startfile)
}
log.Info(logger, "waiting for subscription to be ready", "channel", channel)
ch.WaitForReady()
log.Info(logger, "subscription is ready")
defer ch.Close()
var stopped, restarting bool
var stoppedLock, restartLock sync.Mutex
done := make(chan bool)
finished := make(chan bool, 1)
restart := make(chan bool, 2)
exited := make(chan bool)
var currentCmd *exec.Cmd
var restarted int
if selfManaged {
pos.OnExit(func(_ int) {
stoppedLock.Lock()
stopped = true
stoppedLock.Unlock()
done <- true
<-finished
})
}
integrationBinary := filepath.Join(dir, integration)
previousIntegrationBinary := filepath.Join(dir, "old-"+integration)
restart <- true // start it up
exit:
for {
stoppedLock.Lock()
s := stopped
stoppedLock.Unlock()
if s {
break
}
select {
case evt := <-ch.Channel():
var dbchange DBChange
json.Unmarshal([]byte(evt.Data), &dbchange)
var instance Integration
json.Unmarshal([]byte(dbchange.Data), &instance)
log.Debug(logger, "db change event received", "ref_type", instance.RefType, "integration", integration)
if instance.RefType == integration {
switch dbchange.Action {
case "update", "UPDATE", "upsert", "UPSERT":
// copy the binary so we can rollback if needed
if err := copyFile(integrationBinary, previousIntegrationBinary); err != nil {
log.Error(logger, "error copying integration", "err", err)
break exit
}
log.Info(logger, "new integration detected, will restart in 15s", "version", instance.Version)
restartLock.Lock()
restarting = true
restarted = 0
version = instance.Version
time.Sleep(time.Second * 15)
restart <- true // force a new download
restartLock.Unlock()
case "delete", "DELETE":
// TODO -- exit with a special code to indicate we don't need to restart this integration
}
}
go evt.Commit() // we need to put in a separate thread since we're holding the sub thread
case <-done:
if currentCmd != nil {
syscall.Kill(-currentCmd.Process.Pid, syscall.SIGINT)
select {
case <-time.After(time.Second * 10):
break
case <-exited:
break
}
}
break
case force := <-restart:
log.Info(logger, "restart requested")
if currentCmd != nil && currentCmd.Process != nil {
currentCmd.Process.Kill()
currentCmd = nil
}
stoppedLock.Lock()
s := stopped
stoppedLock.Unlock()
log.Info(logger, "need to start", "stopped", s)
if !s {
restarted++
c, err := getIntegration(ctx, logger, channel, dir, publisher, integration, version, cmdargs, force)
if err != nil {
log.Error(logger, "error running integration", "err", err)
if !fileutil.FileExists(previousIntegrationBinary) {
break exit
} else {
log.Info(logger, "attempting to roll back to previous version of integration", "integration", integration)
os.Remove(integrationBinary)
os.Rename(previousIntegrationBinary, integrationBinary)
os.Chmod(integrationBinary, 0775)
c, err = startIntegration(ctx, logger, integrationBinary, cmdargs)
if err != nil {
log.Error(logger, "error running rolled back integration", "err", err)
break exit
}
}
}
currentCmd = c
os.Remove(previousIntegrationBinary)
log.Info(logger, "started", "pid", c.Process.Pid)
go func() {
// monitor the exit
err := currentCmd.Wait()
if err != nil {
if currentCmd != nil && currentCmd.ProcessState != nil {
if currentCmd.ProcessState.ExitCode() != 0 {
if !selfManaged {
// in cloud we should just end the process
log.Fatal(logger, "integration has exited", "restarted", restarted, "code", currentCmd.ProcessState.ExitCode(), "err", err)
} else {
log.Error(logger, "integration has exited", "restarted", restarted, "code", currentCmd.ProcessState.ExitCode(), "err", err)
}
}
}
log.Info(logger, "pausing", "duration", time.Second*time.Duration(restarted))
time.Sleep(time.Second * time.Duration(restarted))
} else {
restarted = 0
}
// try and restart if we're not in the stopping mode
stoppedLock.Lock()
s := stopped
stoppedLock.Unlock()
if !s {
restartLock.Lock()
r := restarting
restartLock.Unlock()
if !r {
restart <- false // restart but don't force a new download
}
} else {
exited <- true
}
}()
}
}
}
log.Info(logger, "👋")
finished <- true
},
}
func init() {
rootCmd.AddCommand(runCmd)
runCmd.Flags().String("channel", pos.Getenv("PP_CHANNEL", ""), "the channel which can be set")
runCmd.Flags().String("config", "", "the location of the config file")
runCmd.Flags().StringP("dir", "d", "", "directory inside of which to run the integration")
runCmd.Flags().String("secret", pos.Getenv("PP_AUTH_SHARED_SECRET", ""), "internal shared secret")
runCmd.Flags().String("start-file", "", "the start file to write once running")
runCmd.Flags().MarkHidden("secret")
runCmd.Flags().MarkHidden("start-file")
rootCmd.AddCommand(enrollAgentCmd)
enrollAgentCmd.Flags().String("channel", pos.Getenv("PP_CHANNEL", ""), "the channel which can be set")
enrollAgentCmd.Flags().String("config", "", "the location of the config file")
} | random_line_split | |
run.go | package cmd
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/pinpt/agent/v4/internal/util"
"github.com/pinpt/agent/v4/runner"
"github.com/pinpt/agent/v4/sdk"
"github.com/pinpt/agent/v4/sysinfo"
"github.com/pinpt/go-common/v10/api"
"github.com/pinpt/go-common/v10/datetime"
"github.com/pinpt/go-common/v10/event"
"github.com/pinpt/go-common/v10/fileutil"
"github.com/pinpt/go-common/v10/graphql"
pjson "github.com/pinpt/go-common/v10/json"
"github.com/pinpt/go-common/v10/log"
pos "github.com/pinpt/go-common/v10/os"
pstrings "github.com/pinpt/go-common/v10/strings"
"github.com/pinpt/integration-sdk/agent"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// DBChange event
type DBChange struct {
// Action the action that was taken
Action string `json:"action" codec:"action" bson:"action" yaml:"action" faker:"-"`
// Data the data payload of the change
Data string `json:"data" codec:"data" bson:"data" yaml:"data" faker:"-"`
}
// Integration A registry integration
type Integration struct {
// RefType the reference type
RefType string `json:"ref_type" codec:"ref_type" bson:"ref_type" yaml:"ref_type" faker:"-"`
// UpdatedAt the date the integration was last updated
UpdatedAt int64 `json:"updated_ts" codec:"updated_ts" bson:"updated_ts" yaml:"updated_ts" faker:"-"`
// Version the latest version that was published
Version string `json:"version" codec:"version" bson:"version" yaml:"version" faker:"-"`
}
func getIntegration(ctx context.Context, logger log.Logger, channel string, dir string, publisher string, integration string, version string, cmdargs []string, force bool) (*exec.Cmd, error) {
if publisher == "" {
return nil, fmt.Errorf("missing publisher")
}
if integration == "" {
return nil, fmt.Errorf("missing integration")
}
longName := fmt.Sprintf("%s/%s", publisher, integration)
if version != "" {
longName += "/" + version
}
integrationExecutable, _ := filepath.Abs(filepath.Join(dir, integration))
if force || !fileutil.FileExists(integrationExecutable) {
log.Info(logger, "need to download integration", "integration", longName, "force", force)
var err error
integrationExecutable, err = downloadIntegration(logger, channel, dir, publisher, integration, version)
if err != nil {
return nil, fmt.Errorf("error downloading integration %s: %w", longName, err)
}
log.Info(logger, "downloaded", "integration", integrationExecutable)
}
return startIntegration(ctx, logger, integrationExecutable, cmdargs)
}
func startIntegration(ctx context.Context, logger log.Logger, integrationExecutable string, cmdargs []string) (*exec.Cmd, error) {
log.Info(logger, "starting", "file", integrationExecutable)
cm := exec.CommandContext(ctx, integrationExecutable, cmdargs...)
cm.Stdout = os.Stdout
cm.Stderr = os.Stderr
cm.Stdin = os.Stdin
cm.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if err := cm.Start(); err != nil {
return nil, err
}
return cm, nil
}
func configFilename(cmd *cobra.Command) (string, error) {
fn, _ := cmd.Flags().GetString("config")
if fn == "" {
fn = filepath.Join(os.Getenv("HOME"), ".pinpoint-agent/config.json")
}
return filepath.Abs(fn)
}
// clientFromConfig will use the contents of ConfigFile to make a client
func clientFromConfig(config *runner.ConfigFile) (graphql.Client, error) {
gclient, err := graphql.NewClient(config.CustomerID, "", "", api.BackendURL(api.GraphService, config.Channel))
if err != nil {
return nil, err
}
gclient.SetHeader("Authorization", config.APIKey)
return gclient, nil
}
func validateConfig(config *runner.ConfigFile, channel string, force bool) (bool, error) {
var resp struct {
Expired bool `json:"expired"`
Valid bool `json:"valid"`
}
if !force {
res, err := api.Get(context.Background(), channel, api.AuthService, "/validate?customer_id="+config.CustomerID, config.APIKey)
if err != nil {
return false, err
}
defer res.Body.Close()
if err := json.NewDecoder(res.Body).Decode(&resp); err != nil {
return false, err
}
} else {
resp.Expired = true
}
if resp.Expired {
newToken, err := util.RefreshOAuth2Token(http.DefaultClient, channel, "pinpoint", config.RefreshKey)
if err != nil {
return false, err
}
config.APIKey = newToken // update the new token
return true, nil
}
if resp.Valid {
return false, fmt.Errorf("the apikey or refresh token is no longer valid")
}
return false, nil
}
func saveConfig(cmd *cobra.Command, config *runner.ConfigFile) error {
cfg, err := configFilename(cmd)
if err != nil {
return err
}
of, err := os.Open(cfg)
if err != nil {
return err
}
defer of.Close()
// save our channels back to the config
if err := json.NewEncoder(of).Encode(config); err != nil {
return err
}
return nil
}
func loadConfig(cmd *cobra.Command, logger log.Logger, channel string) (string, *runner.ConfigFile) {
cfg, err := configFilename(cmd)
if err != nil {
log.Fatal(logger, "error getting config file name", "err", err)
}
if fileutil.FileExists(cfg) {
var config runner.ConfigFile
of, err := os.Open(cfg)
if err != nil {
log.Fatal(logger, "error opening config file at "+cfg, "err", err)
}
defer of.Close()
if err := json.NewDecoder(of).Decode(&config); err != nil {
log.Fatal(logger, "error parsing config file at "+cfg, "err", err)
}
of.Close()
updated, err := validateConfig(&config, channel, false)
if err != nil {
log.Fatal(logger, "error validating the apikey", "err", err)
}
if updated {
// save our changes back to the config
if err := saveConfig(cmd, &config); err != nil {
log.Fatal(logger, "error opening config file for writing at "+cfg, "err", err)
}
}
client, err := clientFromConfig(&config)
if err != nil {
log.Fatal(logger, "error creating client", "err", err)
}
exists, err := enrollmentExists(client, config.EnrollmentID)
if err != nil {
log.Fatal(logger, "error checking enrollment", "err", err)
}
if exists {
return cfg, &config
}
log.Info(logger, "agent configuration found, but not known to Pinpoint, re-enrolling now", "path", cfg)
} else {
log.Info(logger, "no agent configuration found, enrolling now", "path", cfg)
}
config, err := enrollAgent(logger, channel, cfg)
if err != nil {
log.Fatal(logger, "error enrolling new agent", "err", err)
}
return cfg, config
}
type integrationInstruction int
const (
doNothing integrationInstruction = iota
shouldStart
shouldStop
)
func vetDBChange(evt event.SubscriptionEvent, enrollmentID string) (integrationInstruction, *agent.IntegrationInstance, error) {
var dbchange DBChange
if err := json.Unmarshal([]byte(evt.Data), &dbchange); err != nil {
return 0, nil, fmt.Errorf("error decoding dbchange: %w", err)
}
var instance agent.IntegrationInstance
if err := json.Unmarshal([]byte(dbchange.Data), &instance); err != nil {
return 0, nil, fmt.Errorf("error decoding integration instance: %w", err)
}
if instance.EnrollmentID == nil || *instance.EnrollmentID == "" || *instance.EnrollmentID != enrollmentID |
if instance.Active == true && instance.Setup == agent.IntegrationInstanceSetupReady {
return shouldStart, &instance, nil
}
if instance.Active == false && instance.Deleted == true {
return shouldStop, &instance, nil
}
return doNothing, nil, nil
}
type integrationResult struct {
Data *struct {
Integration struct {
RefType string `json:"ref_type"`
Publisher struct {
Identifier string `json:"identifier"`
} `json:"publisher"`
} `json:"Integration"`
} `json:"registry"`
}
var integrationQuery = `query findIntegration($id: ID!) {
registry {
Integration(_id: $id) {
ref_type
publisher {
identifier
}
}
}
}`
func pingEnrollment(logger log.Logger, client graphql.Client, enrollmentID string, datefield string, active bool) error {
log.Info(logger, "updating enrollment", "setting", datefield, "enrollment_id", enrollmentID, "active", active)
now := datetime.NewDateNow()
vars := make(graphql.Variables)
if datefield != "" {
vars[datefield] = now
vars[agent.EnrollmentModelRunningColumn] = active
}
vars[agent.EnrollmentModelLastPingDateColumn] = now
return agent.ExecEnrollmentSilentUpdateMutation(client, enrollmentID, vars, false)
}
func setIntegrationRunning(client graphql.Client, integrationInstanceID string) error {
vars := graphql.Variables{
agent.IntegrationInstanceModelSetupColumn: agent.IntegrationInstanceSetupRunning,
}
if err := agent.ExecIntegrationInstanceSilentUpdateMutation(client, integrationInstanceID, vars, false); err != nil {
return fmt.Errorf("error updating integration instance to running: %w", err)
}
return nil
}
func runIntegrationMonitor(ctx context.Context, logger log.Logger, cmd *cobra.Command) {
channel, _ := cmd.Flags().GetString("channel")
args := []string{}
cmd.Flags().Visit(func(f *pflag.Flag) {
args = append(args, "--"+f.Name, f.Value.String())
})
var gclient graphql.Client
integrations := make(map[string]string) // id -> identifier/ref_type
processes := make(map[string]*exec.Cmd)
var processLock sync.Mutex
getIntegration := func(id string) (string, error) {
processLock.Lock()
val := integrations[id]
if val != "" {
processLock.Unlock()
return val, nil
}
var res integrationResult
if err := gclient.Query(integrationQuery, graphql.Variables{"id": id}, &res); err != nil {
processLock.Unlock()
return "", err
}
if res.Data == nil {
processLock.Unlock()
return "", fmt.Errorf("couldn't find integration with id: %s", id)
}
val = res.Data.Integration.Publisher.Identifier + "/" + res.Data.Integration.RefType
integrations[id] = val
processLock.Unlock()
return val, nil
}
cfg, config := loadConfig(cmd, logger, channel)
if channel == "" {
channel = config.Channel
}
args = append(args, "--config", cfg)
gclient, err := graphql.NewClient(config.CustomerID, "", "", api.BackendURL(api.GraphService, channel))
if err != nil {
log.Fatal(logger, "error creating graphql client", "err", err)
}
gclient.SetHeader("Authorization", config.APIKey)
errors := make(chan error)
go func() {
for err := range errors {
if err != nil {
log.Fatal(logger, err.Error())
}
}
}()
groupID := "agent-run-monitor"
if config.EnrollmentID != "" {
// if self managed, we need to use a different group id than the cloud
groupID += "-" + config.EnrollmentID
}
ch, err := event.NewSubscription(ctx, event.Subscription{
GroupID: "agent-run-monitor-" + config.EnrollmentID,
Topics: []string{"ops.db.Change"},
Channel: channel,
APIKey: config.APIKey,
DisablePing: true,
Logger: logger,
Errors: errors,
DisableAutoCommit: true,
Filter: &event.SubscriptionFilter{
ObjectExpr: `model:"agent.IntegrationInstance" AND action:"update"`,
},
})
if err != nil {
log.Fatal(logger, "error creating montior subscription", "err", err)
}
ch.WaitForReady()
defer ch.Close()
// set startup date
if err := pingEnrollment(logger, gclient, config.EnrollmentID, agent.EnrollmentModelLastStartupDateColumn, true); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
runIntegration := func(name string) {
log.Info(logger, "running integration", "name", name)
processLock.Lock()
startFile, _ := ioutil.TempFile("", "")
defer os.Remove(startFile.Name())
args = append(args, "--start-file", startFile.Name())
c := exec.CommandContext(ctx, os.Args[0], append([]string{"run", name}, args...)...)
var wg sync.WaitGroup
wg.Add(1)
c.Stdin = os.Stdin
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if err := c.Start(); err != nil {
processLock.Unlock()
log.Fatal(logger, "error starting "+name, "err", err)
}
exited := make(chan bool)
pos.OnExit(func(_ int) {
log.Debug(logger, "exit")
close(exited)
})
go func() {
for {
select {
case <-exited:
wg.Done()
return
case <-time.After(time.Second):
if fileutil.FileExists(startFile.Name()) {
wg.Done()
os.Remove(startFile.Name())
return
}
case <-time.After(5 * time.Minute):
log.Fatal(logger, "failed to start integration "+name+" after 5 minutes")
}
}
}()
processes[name] = c
processLock.Unlock()
log.Debug(logger, "waiting for integration to start")
wg.Wait()
if c != nil && c.ProcessState != nil && c.ProcessState.Exited() {
log.Info(logger, "integration is not running")
} else {
log.Info(logger, "integration is running")
}
}
// find all the integrations we have setup
query := &agent.IntegrationInstanceQuery{
Filters: []string{
agent.IntegrationInstanceModelDeletedColumn + " = ?",
agent.IntegrationInstanceModelEnrollmentIDColumn + " = ?",
},
Params: []interface{}{
false,
config.EnrollmentID,
},
}
q := &agent.IntegrationInstanceQueryInput{Query: query}
instances, err := agent.FindIntegrationInstances(gclient, q)
if err != nil {
log.Fatal(logger, "error finding integration instances", "err", err)
}
if instances != nil {
for _, edge := range instances.Edges {
name, err := getIntegration(edge.Node.IntegrationID)
if err != nil {
log.Fatal(logger, "error fetching integration name for integration", "err", err, "integration_id", edge.Node.IntegrationID, "id", edge.Node.ID)
}
runIntegration(name)
if edge.Node.Setup == agent.IntegrationInstanceSetupReady {
log.Info(logger, "setting integration to running 🏃♀️", "integration_instance_id", edge.Node.ID)
if err := setIntegrationRunning(gclient, edge.Node.ID); err != nil {
log.Fatal(logger, "error updating integration instance", "err", err, "integration_id", edge.Node.IntegrationID, "id", edge.Node.ID)
}
}
}
}
var restarting bool
done := make(chan bool)
finished := make(chan bool)
pos.OnExit(func(_ int) {
if !restarting {
done <- true
<-finished
log.Info(logger, "👯")
}
})
if err := pingEnrollment(logger, gclient, config.EnrollmentID, "", true); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
// calculate the duration of time left before the
refreshDuration := config.Expires.Sub(time.Now().Add(time.Minute * 30))
var shutdownWg sync.WaitGroup
// run a loop waiting for exit or an updated integration instance
completed:
for {
select {
case <-time.After(refreshDuration):
// if we get here, we need to refresh our expiring apikey and restart all the integrations
log.Info(logger, "need to update apikey before it expires and restart 🤞🏽")
// 1. fetch our new apikey
if _, err := validateConfig(config, channel, true); err != nil {
log.Fatal(logger, "error refreshing the expiring apikey", "err", err)
}
// 2. save the config file changes
if err := saveConfig(cmd, config); err != nil {
log.Fatal(logger, "error saving the new apikey", "err", err)
}
// 3. stop all of our current integrations
restarting = true
done <- true
// 4. restart ourself which should re-entrant this function
go runIntegrationMonitor(ctx, logger, cmd)
case <-time.After(time.Minute * 5):
if err := pingEnrollment(logger, gclient, config.EnrollmentID, "", true); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
case <-done:
processLock.Lock()
for k, c := range processes {
log.Debug(logger, "stopping "+k, "pid", c.Process.Pid)
shutdownWg.Add(1)
go func(c *exec.Cmd, name string) {
defer shutdownWg.Done()
syscall.Kill(-c.Process.Pid, syscall.SIGINT)
exited := make(chan bool)
go func() {
c.Wait()
log.Debug(logger, "exited "+name)
exited <- true
}()
select {
case <-time.After(time.Second * 15):
log.Debug(logger, "timed out on exit for "+name)
if c.Process != nil {
c.Process.Kill()
}
return
case <-exited:
return
}
}(c, k)
delete(processes, k)
}
processLock.Unlock()
break completed
case evt := <-ch.Channel():
instruction, instance, err := vetDBChange(evt, config.EnrollmentID)
if err != nil {
log.Fatal(logger, "error decoding db change", "err", err)
}
switch instruction {
case shouldStart:
log.Info(logger, "db change received, need to create a new process", "id", instance.ID)
if err := setIntegrationRunning(gclient, instance.ID); err != nil {
log.Fatal(logger, "error updating integration", "err", err)
}
name, err := getIntegration(instance.ID)
if err != nil {
log.Fatal(logger, "error fetching integration detail", "err", err)
}
processLock.Lock()
c := processes[name]
if c == nil {
processLock.Unlock()
runIntegration(name)
} else {
processLock.Unlock()
}
case shouldStop:
log.Info(logger, "db change delete received, need to delete process", "id", instance.ID)
name, err := getIntegration(instance.ID)
if err != nil {
log.Fatal(logger, "error fetching integration detail", "err", err)
}
processLock.Lock()
c := processes[name]
if c != nil {
c.Process.Kill()
delete(processes, instance.RefType)
}
processLock.Unlock()
}
evt.Commit()
}
}
shutdownWg.Wait()
// if restarting, dont send a shutdown or block on finished
if restarting {
return
}
if err := pingEnrollment(logger, gclient, config.EnrollmentID, agent.EnrollmentModelLastShutdownDateColumn, false); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
finished <- true
}
func enrollmentExists(client graphql.Client, enrollmentID string) (bool, error) {
enrollment, err := agent.FindEnrollment(client, enrollmentID)
return enrollment != nil, err
}
func enrollAgent(logger log.Logger, channel string, configFileName string) (*runner.ConfigFile, error) {
var config runner.ConfigFile
config.Channel = channel
url := sdk.JoinURL(api.BackendURL(api.AppService, channel), "/enroll")
var userID string
err := util.WaitForRedirect(url, func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
config.APIKey = q.Get("apikey")
config.RefreshKey = q.Get("refresh_token")
config.CustomerID = q.Get("customer_id")
expires := q.Get("expires")
if expires != "" {
e, _ := strconv.ParseInt(expires, 10, 64)
config.Expires = datetime.DateFromEpoch(e)
} else {
config.Expires = time.Now().Add(time.Hour * 23)
}
userID = q.Get("user_id")
w.WriteHeader(http.StatusOK)
})
if err != nil {
return nil, fmt.Errorf("error waiting for browser: %w", err)
}
log.Info(logger, "logged in", "customer_id", config.CustomerID)
log.Info(logger, "enrolling agent...", "customer_id", config.CustomerID)
client, err := graphql.NewClient(config.CustomerID, "", "", api.BackendURL(api.GraphService, channel))
if err != nil {
return nil, fmt.Errorf("error creating graphql client: %w", err)
}
client.SetHeader("Authorization", config.APIKey)
info, err := sysinfo.GetSystemInfo()
if err != nil {
return nil, fmt.Errorf("error getting system info: %w", err)
}
config.SystemID = info.ID
created := agent.EnrollmentCreatedDate(datetime.NewDateNow())
enr := agent.Enrollment{
AgentVersion: commit, // TODO(robin): when we start versioning, switch this to version
CreatedDate: created,
SystemID: info.ID,
Hostname: info.Hostname,
NumCPU: info.NumCPU,
OS: info.OS,
Architecture: info.Architecture,
GoVersion: info.GoVersion,
CustomerID: config.CustomerID,
UserID: userID,
ID: agent.NewEnrollmentID(config.CustomerID, info.ID),
}
config.EnrollmentID = enr.ID
if err := agent.CreateEnrollment(client, enr); err != nil {
if strings.Contains(err.Error(), "duplicate key error") {
log.Info(logger, "looks like this system has already been enrolled, recreating local config")
} else {
return nil, fmt.Errorf("error creating enrollment: %w", err)
}
}
os.MkdirAll(filepath.Dir(configFileName), 0700)
if err := ioutil.WriteFile(configFileName, []byte(pjson.Stringify(config)), 0644); err != nil {
return nil, fmt.Errorf("error writing config file: %w", err)
}
log.Info(logger, "agent enrolled 🎉", "customer_id", config.CustomerID)
return &config, nil
}
// enrollAgentCmd will authenticate with pinpoint and create an agent enrollment
var enrollAgentCmd = &cobra.Command{
Use: "enroll",
Short: "connect this agent to Pinpoint's backend",
Hidden: true,
Run: func(cmd *cobra.Command, args []string) {
logger := log.NewCommandLogger(cmd)
defer logger.Close()
channel, _ := cmd.Flags().GetString("channel")
fn, err := configFilename(cmd)
if err != nil {
log.Fatal(logger, "error getting config file name", "err", err)
}
if _, err := enrollAgent(logger, channel, fn); err != nil {
log.Fatal(logger, "error enrolling this agent", "err", err)
}
},
}
func copyFile(from, to string) error {
in, err := os.Open(from)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(to)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, in)
return err
}
// runCmd represents the run command
var runCmd = &cobra.Command{
Use: "run <integration> <version>",
Short: "run a published integration",
Args: cobra.MinimumNArgs(0),
Run: func(cmd *cobra.Command, args []string) {
_logger := log.NewCommandLogger(cmd)
defer _logger.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if len(args) == 0 {
log.Info(_logger, "starting main process")
// need to figure out all our configured integrations and run each one
runIntegrationMonitor(ctx, _logger, cmd)
return
}
fullIntegration := args[0]
var version string
if len(args) > 1 {
version = args[1]
}
tok := strings.Split(fullIntegration, "/")
if len(tok) != 2 {
log.Fatal(_logger, "integration should be in the format: publisher/integration such as pinpt/github")
}
publisher := tok[0]
integration := tok[1]
logger := log.With(_logger, "pkg", integration)
channel, _ := cmd.Flags().GetString("channel")
dir, _ := cmd.Flags().GetString("dir")
secret, _ := cmd.Flags().GetString("secret")
dir, err := filepath.Abs(dir)
if err != nil {
log.Fatal(logger, "error getting dir absolute path", "err", err)
}
var selfManaged bool
var ch *event.SubscriptionChannel
var cmdargs []string
if secret != "" {
log.Debug(logger, "creating internal subscription")
if channel == "" {
channel = "stable"
}
// each replica agent should recieve updates
groupSuffix, err := os.Hostname()
if err != nil {
groupSuffix = pstrings.NewUUIDV4()
log.Warn(logger, "unable to get hostname, using random uuid", "uuid", groupSuffix, "err", err)
}
ch, err = event.NewSubscription(ctx, event.Subscription{
GroupID: "agent-run-" + publisher + "-" + integration + "-" + groupSuffix,
Topics: []string{"ops.db.Change"},
Channel: channel,
HTTPHeaders: map[string]string{"x-api-key": secret},
DisablePing: true,
Temporary: true,
Logger: logger,
Filter: &event.SubscriptionFilter{
ObjectExpr: `model:"registry.Integration"`,
},
})
cmdargs = append(cmdargs, "--secret="+secret, "--channel="+channel)
} else {
selfManaged = true
log.Debug(logger, "creating external subscription")
cfg, config := loadConfig(cmd, logger, channel)
apikey := config.APIKey
if channel == "" {
channel = config.Channel
}
ch, err = event.NewSubscription(ctx, event.Subscription{
GroupID: "agent-run-" + publisher + "-" + integration + "-" + config.EnrollmentID,
Topics: []string{"ops.db.Change"},
Channel: channel,
APIKey: apikey,
DisablePing: true,
Logger: logger,
Filter: &event.SubscriptionFilter{
ObjectExpr: `model:"registry.Integration"`,
},
})
cmdargs = append(cmdargs, "--config="+cfg, "--channel="+channel)
}
if err != nil {
log.Fatal(logger, "error creating subscription", "err", err)
}
// start file is used to signal to the monitor when we're running
startfile, _ := cmd.Flags().GetString("start-file")
if startfile != "" {
os.Remove(startfile)
cmdargs = append(cmdargs, "--start-file", startfile)
defer os.Remove(startfile)
}
log.Info(logger, "waiting for subscription to be ready", "channel", channel)
ch.WaitForReady()
log.Info(logger, "subscription is ready")
defer ch.Close()
var stopped, restarting bool
var stoppedLock, restartLock sync.Mutex
done := make(chan bool)
finished := make(chan bool, 1)
restart := make(chan bool, 2)
exited := make(chan bool)
var currentCmd *exec.Cmd
var restarted int
if selfManaged {
pos.OnExit(func(_ int) {
stoppedLock.Lock()
stopped = true
stoppedLock.Unlock()
done <- true
<-finished
})
}
integrationBinary := filepath.Join(dir, integration)
previousIntegrationBinary := filepath.Join(dir, "old-"+integration)
restart <- true // start it up
exit:
for {
stoppedLock.Lock()
s := stopped
stoppedLock.Unlock()
if s {
break
}
select {
case evt := <-ch.Channel():
var dbchange DBChange
json.Unmarshal([]byte(evt.Data), &dbchange)
var instance Integration
json.Unmarshal([]byte(dbchange.Data), &instance)
log.Debug(logger, "db change event received", "ref_type", instance.RefType, "integration", integration)
if instance.RefType == integration {
switch dbchange.Action {
case "update", "UPDATE", "upsert", "UPSERT":
// copy the binary so we can rollback if needed
if err := copyFile(integrationBinary, previousIntegrationBinary); err != nil {
log.Error(logger, "error copying integration", "err", err)
break exit
}
log.Info(logger, "new integration detected, will restart in 15s", "version", instance.Version)
restartLock.Lock()
restarting = true
restarted = 0
version = instance.Version
time.Sleep(time.Second * 15)
restart <- true // force a new download
restartLock.Unlock()
case "delete", "DELETE":
// TODO -- exit with a special code to indicate we don't need to restart this integration
}
}
go evt.Commit() // we need to put in a separate thread since we're holding the sub thread
case <-done:
if currentCmd != nil {
syscall.Kill(-currentCmd.Process.Pid, syscall.SIGINT)
select {
case <-time.After(time.Second * 10):
break
case <-exited:
break
}
}
break
case force := <-restart:
log.Info(logger, "restart requested")
if currentCmd != nil && currentCmd.Process != nil {
currentCmd.Process.Kill()
currentCmd = nil
}
stoppedLock.Lock()
s := stopped
stoppedLock.Unlock()
log.Info(logger, "need to start", "stopped", s)
if !s {
restarted++
c, err := getIntegration(ctx, logger, channel, dir, publisher, integration, version, cmdargs, force)
if err != nil {
log.Error(logger, "error running integration", "err", err)
if !fileutil.FileExists(previousIntegrationBinary) {
break exit
} else {
log.Info(logger, "attempting to roll back to previous version of integration", "integration", integration)
os.Remove(integrationBinary)
os.Rename(previousIntegrationBinary, integrationBinary)
os.Chmod(integrationBinary, 0775)
c, err = startIntegration(ctx, logger, integrationBinary, cmdargs)
if err != nil {
log.Error(logger, "error running rolled back integration", "err", err)
break exit
}
}
}
currentCmd = c
os.Remove(previousIntegrationBinary)
log.Info(logger, "started", "pid", c.Process.Pid)
go func() {
// monitor the exit
err := currentCmd.Wait()
if err != nil {
if currentCmd != nil && currentCmd.ProcessState != nil {
if currentCmd.ProcessState.ExitCode() != 0 {
if !selfManaged {
// in cloud we should just end the process
log.Fatal(logger, "integration has exited", "restarted", restarted, "code", currentCmd.ProcessState.ExitCode(), "err", err)
} else {
log.Error(logger, "integration has exited", "restarted", restarted, "code", currentCmd.ProcessState.ExitCode(), "err", err)
}
}
}
log.Info(logger, "pausing", "duration", time.Second*time.Duration(restarted))
time.Sleep(time.Second * time.Duration(restarted))
} else {
restarted = 0
}
// try and restart if we're not in the stopping mode
stoppedLock.Lock()
s := stopped
stoppedLock.Unlock()
if !s {
restartLock.Lock()
r := restarting
restartLock.Unlock()
if !r {
restart <- false // restart but don't force a new download
}
} else {
exited <- true
}
}()
}
}
}
log.Info(logger, "👋")
finished <- true
},
}
func init() {
rootCmd.AddCommand(runCmd)
runCmd.Flags().String("channel", pos.Getenv("PP_CHANNEL", ""), "the channel which can be set")
runCmd.Flags().String("config", "", "the location of the config file")
runCmd.Flags().StringP("dir", "d", "", "directory inside of which to run the integration")
runCmd.Flags().String("secret", pos.Getenv("PP_AUTH_SHARED_SECRET", ""), "internal shared secret")
runCmd.Flags().String("start-file", "", "the start file to write once running")
runCmd.Flags().MarkHidden("secret")
runCmd.Flags().MarkHidden("start-file")
rootCmd.AddCommand(enrollAgentCmd)
enrollAgentCmd.Flags().String("channel", pos.Getenv("PP_CHANNEL", ""), "the channel which can be set")
enrollAgentCmd.Flags().String("config", "", "the location of the config file")
}
| {
return doNothing, nil, nil
} | conditional_block |
run.go | package cmd
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/pinpt/agent/v4/internal/util"
"github.com/pinpt/agent/v4/runner"
"github.com/pinpt/agent/v4/sdk"
"github.com/pinpt/agent/v4/sysinfo"
"github.com/pinpt/go-common/v10/api"
"github.com/pinpt/go-common/v10/datetime"
"github.com/pinpt/go-common/v10/event"
"github.com/pinpt/go-common/v10/fileutil"
"github.com/pinpt/go-common/v10/graphql"
pjson "github.com/pinpt/go-common/v10/json"
"github.com/pinpt/go-common/v10/log"
pos "github.com/pinpt/go-common/v10/os"
pstrings "github.com/pinpt/go-common/v10/strings"
"github.com/pinpt/integration-sdk/agent"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// DBChange event
type DBChange struct {
// Action the action that was taken
Action string `json:"action" codec:"action" bson:"action" yaml:"action" faker:"-"`
// Data the data payload of the change
Data string `json:"data" codec:"data" bson:"data" yaml:"data" faker:"-"`
}
// Integration A registry integration
type Integration struct {
// RefType the reference type
RefType string `json:"ref_type" codec:"ref_type" bson:"ref_type" yaml:"ref_type" faker:"-"`
// UpdatedAt the date the integration was last updated
UpdatedAt int64 `json:"updated_ts" codec:"updated_ts" bson:"updated_ts" yaml:"updated_ts" faker:"-"`
// Version the latest version that was published
Version string `json:"version" codec:"version" bson:"version" yaml:"version" faker:"-"`
}
func getIntegration(ctx context.Context, logger log.Logger, channel string, dir string, publisher string, integration string, version string, cmdargs []string, force bool) (*exec.Cmd, error) {
if publisher == "" {
return nil, fmt.Errorf("missing publisher")
}
if integration == "" {
return nil, fmt.Errorf("missing integration")
}
longName := fmt.Sprintf("%s/%s", publisher, integration)
if version != "" {
longName += "/" + version
}
integrationExecutable, _ := filepath.Abs(filepath.Join(dir, integration))
if force || !fileutil.FileExists(integrationExecutable) {
log.Info(logger, "need to download integration", "integration", longName, "force", force)
var err error
integrationExecutable, err = downloadIntegration(logger, channel, dir, publisher, integration, version)
if err != nil {
return nil, fmt.Errorf("error downloading integration %s: %w", longName, err)
}
log.Info(logger, "downloaded", "integration", integrationExecutable)
}
return startIntegration(ctx, logger, integrationExecutable, cmdargs)
}
func startIntegration(ctx context.Context, logger log.Logger, integrationExecutable string, cmdargs []string) (*exec.Cmd, error) {
log.Info(logger, "starting", "file", integrationExecutable)
cm := exec.CommandContext(ctx, integrationExecutable, cmdargs...)
cm.Stdout = os.Stdout
cm.Stderr = os.Stderr
cm.Stdin = os.Stdin
cm.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if err := cm.Start(); err != nil {
return nil, err
}
return cm, nil
}
func configFilename(cmd *cobra.Command) (string, error) {
fn, _ := cmd.Flags().GetString("config")
if fn == "" {
fn = filepath.Join(os.Getenv("HOME"), ".pinpoint-agent/config.json")
}
return filepath.Abs(fn)
}
// clientFromConfig will use the contents of ConfigFile to make a client
func clientFromConfig(config *runner.ConfigFile) (graphql.Client, error) {
gclient, err := graphql.NewClient(config.CustomerID, "", "", api.BackendURL(api.GraphService, config.Channel))
if err != nil {
return nil, err
}
gclient.SetHeader("Authorization", config.APIKey)
return gclient, nil
}
func validateConfig(config *runner.ConfigFile, channel string, force bool) (bool, error) {
var resp struct {
Expired bool `json:"expired"`
Valid bool `json:"valid"`
}
if !force {
res, err := api.Get(context.Background(), channel, api.AuthService, "/validate?customer_id="+config.CustomerID, config.APIKey)
if err != nil {
return false, err
}
defer res.Body.Close()
if err := json.NewDecoder(res.Body).Decode(&resp); err != nil {
return false, err
}
} else {
resp.Expired = true
}
if resp.Expired {
newToken, err := util.RefreshOAuth2Token(http.DefaultClient, channel, "pinpoint", config.RefreshKey)
if err != nil {
return false, err
}
config.APIKey = newToken // update the new token
return true, nil
}
if resp.Valid {
return false, fmt.Errorf("the apikey or refresh token is no longer valid")
}
return false, nil
}
func saveConfig(cmd *cobra.Command, config *runner.ConfigFile) error {
cfg, err := configFilename(cmd)
if err != nil {
return err
}
of, err := os.Open(cfg)
if err != nil {
return err
}
defer of.Close()
// save our channels back to the config
if err := json.NewEncoder(of).Encode(config); err != nil {
return err
}
return nil
}
func loadConfig(cmd *cobra.Command, logger log.Logger, channel string) (string, *runner.ConfigFile) {
cfg, err := configFilename(cmd)
if err != nil {
log.Fatal(logger, "error getting config file name", "err", err)
}
if fileutil.FileExists(cfg) {
var config runner.ConfigFile
of, err := os.Open(cfg)
if err != nil {
log.Fatal(logger, "error opening config file at "+cfg, "err", err)
}
defer of.Close()
if err := json.NewDecoder(of).Decode(&config); err != nil {
log.Fatal(logger, "error parsing config file at "+cfg, "err", err)
}
of.Close()
updated, err := validateConfig(&config, channel, false)
if err != nil {
log.Fatal(logger, "error validating the apikey", "err", err)
}
if updated {
// save our changes back to the config
if err := saveConfig(cmd, &config); err != nil {
log.Fatal(logger, "error opening config file for writing at "+cfg, "err", err)
}
}
client, err := clientFromConfig(&config)
if err != nil {
log.Fatal(logger, "error creating client", "err", err)
}
exists, err := enrollmentExists(client, config.EnrollmentID)
if err != nil {
log.Fatal(logger, "error checking enrollment", "err", err)
}
if exists {
return cfg, &config
}
log.Info(logger, "agent configuration found, but not known to Pinpoint, re-enrolling now", "path", cfg)
} else {
log.Info(logger, "no agent configuration found, enrolling now", "path", cfg)
}
config, err := enrollAgent(logger, channel, cfg)
if err != nil {
log.Fatal(logger, "error enrolling new agent", "err", err)
}
return cfg, config
}
type integrationInstruction int
const (
doNothing integrationInstruction = iota
shouldStart
shouldStop
)
func vetDBChange(evt event.SubscriptionEvent, enrollmentID string) (integrationInstruction, *agent.IntegrationInstance, error) {
var dbchange DBChange
if err := json.Unmarshal([]byte(evt.Data), &dbchange); err != nil {
return 0, nil, fmt.Errorf("error decoding dbchange: %w", err)
}
var instance agent.IntegrationInstance
if err := json.Unmarshal([]byte(dbchange.Data), &instance); err != nil {
return 0, nil, fmt.Errorf("error decoding integration instance: %w", err)
}
if instance.EnrollmentID == nil || *instance.EnrollmentID == "" || *instance.EnrollmentID != enrollmentID {
return doNothing, nil, nil
}
if instance.Active == true && instance.Setup == agent.IntegrationInstanceSetupReady {
return shouldStart, &instance, nil
}
if instance.Active == false && instance.Deleted == true {
return shouldStop, &instance, nil
}
return doNothing, nil, nil
}
type integrationResult struct {
Data *struct {
Integration struct {
RefType string `json:"ref_type"`
Publisher struct {
Identifier string `json:"identifier"`
} `json:"publisher"`
} `json:"Integration"`
} `json:"registry"`
}
var integrationQuery = `query findIntegration($id: ID!) {
registry {
Integration(_id: $id) {
ref_type
publisher {
identifier
}
}
}
}`
func pingEnrollment(logger log.Logger, client graphql.Client, enrollmentID string, datefield string, active bool) error |
func setIntegrationRunning(client graphql.Client, integrationInstanceID string) error {
vars := graphql.Variables{
agent.IntegrationInstanceModelSetupColumn: agent.IntegrationInstanceSetupRunning,
}
if err := agent.ExecIntegrationInstanceSilentUpdateMutation(client, integrationInstanceID, vars, false); err != nil {
return fmt.Errorf("error updating integration instance to running: %w", err)
}
return nil
}
func runIntegrationMonitor(ctx context.Context, logger log.Logger, cmd *cobra.Command) {
channel, _ := cmd.Flags().GetString("channel")
args := []string{}
cmd.Flags().Visit(func(f *pflag.Flag) {
args = append(args, "--"+f.Name, f.Value.String())
})
var gclient graphql.Client
integrations := make(map[string]string) // id -> identifier/ref_type
processes := make(map[string]*exec.Cmd)
var processLock sync.Mutex
getIntegration := func(id string) (string, error) {
processLock.Lock()
val := integrations[id]
if val != "" {
processLock.Unlock()
return val, nil
}
var res integrationResult
if err := gclient.Query(integrationQuery, graphql.Variables{"id": id}, &res); err != nil {
processLock.Unlock()
return "", err
}
if res.Data == nil {
processLock.Unlock()
return "", fmt.Errorf("couldn't find integration with id: %s", id)
}
val = res.Data.Integration.Publisher.Identifier + "/" + res.Data.Integration.RefType
integrations[id] = val
processLock.Unlock()
return val, nil
}
cfg, config := loadConfig(cmd, logger, channel)
if channel == "" {
channel = config.Channel
}
args = append(args, "--config", cfg)
gclient, err := graphql.NewClient(config.CustomerID, "", "", api.BackendURL(api.GraphService, channel))
if err != nil {
log.Fatal(logger, "error creating graphql client", "err", err)
}
gclient.SetHeader("Authorization", config.APIKey)
errors := make(chan error)
go func() {
for err := range errors {
if err != nil {
log.Fatal(logger, err.Error())
}
}
}()
groupID := "agent-run-monitor"
if config.EnrollmentID != "" {
// if self managed, we need to use a different group id than the cloud
groupID += "-" + config.EnrollmentID
}
ch, err := event.NewSubscription(ctx, event.Subscription{
GroupID: "agent-run-monitor-" + config.EnrollmentID,
Topics: []string{"ops.db.Change"},
Channel: channel,
APIKey: config.APIKey,
DisablePing: true,
Logger: logger,
Errors: errors,
DisableAutoCommit: true,
Filter: &event.SubscriptionFilter{
ObjectExpr: `model:"agent.IntegrationInstance" AND action:"update"`,
},
})
if err != nil {
log.Fatal(logger, "error creating montior subscription", "err", err)
}
ch.WaitForReady()
defer ch.Close()
// set startup date
if err := pingEnrollment(logger, gclient, config.EnrollmentID, agent.EnrollmentModelLastStartupDateColumn, true); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
runIntegration := func(name string) {
log.Info(logger, "running integration", "name", name)
processLock.Lock()
startFile, _ := ioutil.TempFile("", "")
defer os.Remove(startFile.Name())
args = append(args, "--start-file", startFile.Name())
c := exec.CommandContext(ctx, os.Args[0], append([]string{"run", name}, args...)...)
var wg sync.WaitGroup
wg.Add(1)
c.Stdin = os.Stdin
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if err := c.Start(); err != nil {
processLock.Unlock()
log.Fatal(logger, "error starting "+name, "err", err)
}
exited := make(chan bool)
pos.OnExit(func(_ int) {
log.Debug(logger, "exit")
close(exited)
})
go func() {
for {
select {
case <-exited:
wg.Done()
return
case <-time.After(time.Second):
if fileutil.FileExists(startFile.Name()) {
wg.Done()
os.Remove(startFile.Name())
return
}
case <-time.After(5 * time.Minute):
log.Fatal(logger, "failed to start integration "+name+" after 5 minutes")
}
}
}()
processes[name] = c
processLock.Unlock()
log.Debug(logger, "waiting for integration to start")
wg.Wait()
if c != nil && c.ProcessState != nil && c.ProcessState.Exited() {
log.Info(logger, "integration is not running")
} else {
log.Info(logger, "integration is running")
}
}
// find all the integrations we have setup
query := &agent.IntegrationInstanceQuery{
Filters: []string{
agent.IntegrationInstanceModelDeletedColumn + " = ?",
agent.IntegrationInstanceModelEnrollmentIDColumn + " = ?",
},
Params: []interface{}{
false,
config.EnrollmentID,
},
}
q := &agent.IntegrationInstanceQueryInput{Query: query}
instances, err := agent.FindIntegrationInstances(gclient, q)
if err != nil {
log.Fatal(logger, "error finding integration instances", "err", err)
}
if instances != nil {
for _, edge := range instances.Edges {
name, err := getIntegration(edge.Node.IntegrationID)
if err != nil {
log.Fatal(logger, "error fetching integration name for integration", "err", err, "integration_id", edge.Node.IntegrationID, "id", edge.Node.ID)
}
runIntegration(name)
if edge.Node.Setup == agent.IntegrationInstanceSetupReady {
log.Info(logger, "setting integration to running 🏃♀️", "integration_instance_id", edge.Node.ID)
if err := setIntegrationRunning(gclient, edge.Node.ID); err != nil {
log.Fatal(logger, "error updating integration instance", "err", err, "integration_id", edge.Node.IntegrationID, "id", edge.Node.ID)
}
}
}
}
var restarting bool
done := make(chan bool)
finished := make(chan bool)
pos.OnExit(func(_ int) {
if !restarting {
done <- true
<-finished
log.Info(logger, "👯")
}
})
if err := pingEnrollment(logger, gclient, config.EnrollmentID, "", true); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
// calculate the duration of time left before the
refreshDuration := config.Expires.Sub(time.Now().Add(time.Minute * 30))
var shutdownWg sync.WaitGroup
// run a loop waiting for exit or an updated integration instance
completed:
for {
select {
case <-time.After(refreshDuration):
// if we get here, we need to refresh our expiring apikey and restart all the integrations
log.Info(logger, "need to update apikey before it expires and restart 🤞🏽")
// 1. fetch our new apikey
if _, err := validateConfig(config, channel, true); err != nil {
log.Fatal(logger, "error refreshing the expiring apikey", "err", err)
}
// 2. save the config file changes
if err := saveConfig(cmd, config); err != nil {
log.Fatal(logger, "error saving the new apikey", "err", err)
}
// 3. stop all of our current integrations
restarting = true
done <- true
// 4. restart ourself which should re-entrant this function
go runIntegrationMonitor(ctx, logger, cmd)
case <-time.After(time.Minute * 5):
if err := pingEnrollment(logger, gclient, config.EnrollmentID, "", true); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
case <-done:
processLock.Lock()
for k, c := range processes {
log.Debug(logger, "stopping "+k, "pid", c.Process.Pid)
shutdownWg.Add(1)
go func(c *exec.Cmd, name string) {
defer shutdownWg.Done()
syscall.Kill(-c.Process.Pid, syscall.SIGINT)
exited := make(chan bool)
go func() {
c.Wait()
log.Debug(logger, "exited "+name)
exited <- true
}()
select {
case <-time.After(time.Second * 15):
log.Debug(logger, "timed out on exit for "+name)
if c.Process != nil {
c.Process.Kill()
}
return
case <-exited:
return
}
}(c, k)
delete(processes, k)
}
processLock.Unlock()
break completed
case evt := <-ch.Channel():
instruction, instance, err := vetDBChange(evt, config.EnrollmentID)
if err != nil {
log.Fatal(logger, "error decoding db change", "err", err)
}
switch instruction {
case shouldStart:
log.Info(logger, "db change received, need to create a new process", "id", instance.ID)
if err := setIntegrationRunning(gclient, instance.ID); err != nil {
log.Fatal(logger, "error updating integration", "err", err)
}
name, err := getIntegration(instance.ID)
if err != nil {
log.Fatal(logger, "error fetching integration detail", "err", err)
}
processLock.Lock()
c := processes[name]
if c == nil {
processLock.Unlock()
runIntegration(name)
} else {
processLock.Unlock()
}
case shouldStop:
log.Info(logger, "db change delete received, need to delete process", "id", instance.ID)
name, err := getIntegration(instance.ID)
if err != nil {
log.Fatal(logger, "error fetching integration detail", "err", err)
}
processLock.Lock()
c := processes[name]
if c != nil {
c.Process.Kill()
delete(processes, instance.RefType)
}
processLock.Unlock()
}
evt.Commit()
}
}
shutdownWg.Wait()
// if restarting, dont send a shutdown or block on finished
if restarting {
return
}
if err := pingEnrollment(logger, gclient, config.EnrollmentID, agent.EnrollmentModelLastShutdownDateColumn, false); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
finished <- true
}
func enrollmentExists(client graphql.Client, enrollmentID string) (bool, error) {
enrollment, err := agent.FindEnrollment(client, enrollmentID)
return enrollment != nil, err
}
func enrollAgent(logger log.Logger, channel string, configFileName string) (*runner.ConfigFile, error) {
var config runner.ConfigFile
config.Channel = channel
url := sdk.JoinURL(api.BackendURL(api.AppService, channel), "/enroll")
var userID string
err := util.WaitForRedirect(url, func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
config.APIKey = q.Get("apikey")
config.RefreshKey = q.Get("refresh_token")
config.CustomerID = q.Get("customer_id")
expires := q.Get("expires")
if expires != "" {
e, _ := strconv.ParseInt(expires, 10, 64)
config.Expires = datetime.DateFromEpoch(e)
} else {
config.Expires = time.Now().Add(time.Hour * 23)
}
userID = q.Get("user_id")
w.WriteHeader(http.StatusOK)
})
if err != nil {
return nil, fmt.Errorf("error waiting for browser: %w", err)
}
log.Info(logger, "logged in", "customer_id", config.CustomerID)
log.Info(logger, "enrolling agent...", "customer_id", config.CustomerID)
client, err := graphql.NewClient(config.CustomerID, "", "", api.BackendURL(api.GraphService, channel))
if err != nil {
return nil, fmt.Errorf("error creating graphql client: %w", err)
}
client.SetHeader("Authorization", config.APIKey)
info, err := sysinfo.GetSystemInfo()
if err != nil {
return nil, fmt.Errorf("error getting system info: %w", err)
}
config.SystemID = info.ID
created := agent.EnrollmentCreatedDate(datetime.NewDateNow())
enr := agent.Enrollment{
AgentVersion: commit, // TODO(robin): when we start versioning, switch this to version
CreatedDate: created,
SystemID: info.ID,
Hostname: info.Hostname,
NumCPU: info.NumCPU,
OS: info.OS,
Architecture: info.Architecture,
GoVersion: info.GoVersion,
CustomerID: config.CustomerID,
UserID: userID,
ID: agent.NewEnrollmentID(config.CustomerID, info.ID),
}
config.EnrollmentID = enr.ID
if err := agent.CreateEnrollment(client, enr); err != nil {
if strings.Contains(err.Error(), "duplicate key error") {
log.Info(logger, "looks like this system has already been enrolled, recreating local config")
} else {
return nil, fmt.Errorf("error creating enrollment: %w", err)
}
}
os.MkdirAll(filepath.Dir(configFileName), 0700)
if err := ioutil.WriteFile(configFileName, []byte(pjson.Stringify(config)), 0644); err != nil {
return nil, fmt.Errorf("error writing config file: %w", err)
}
log.Info(logger, "agent enrolled 🎉", "customer_id", config.CustomerID)
return &config, nil
}
// enrollAgentCmd will authenticate with pinpoint and create an agent enrollment
var enrollAgentCmd = &cobra.Command{
Use: "enroll",
Short: "connect this agent to Pinpoint's backend",
Hidden: true,
Run: func(cmd *cobra.Command, args []string) {
logger := log.NewCommandLogger(cmd)
defer logger.Close()
channel, _ := cmd.Flags().GetString("channel")
fn, err := configFilename(cmd)
if err != nil {
log.Fatal(logger, "error getting config file name", "err", err)
}
if _, err := enrollAgent(logger, channel, fn); err != nil {
log.Fatal(logger, "error enrolling this agent", "err", err)
}
},
}
func copyFile(from, to string) error {
in, err := os.Open(from)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(to)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, in)
return err
}
// runCmd represents the run command
var runCmd = &cobra.Command{
Use: "run <integration> <version>",
Short: "run a published integration",
Args: cobra.MinimumNArgs(0),
Run: func(cmd *cobra.Command, args []string) {
_logger := log.NewCommandLogger(cmd)
defer _logger.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if len(args) == 0 {
log.Info(_logger, "starting main process")
// need to figure out all our configured integrations and run each one
runIntegrationMonitor(ctx, _logger, cmd)
return
}
fullIntegration := args[0]
var version string
if len(args) > 1 {
version = args[1]
}
tok := strings.Split(fullIntegration, "/")
if len(tok) != 2 {
log.Fatal(_logger, "integration should be in the format: publisher/integration such as pinpt/github")
}
publisher := tok[0]
integration := tok[1]
logger := log.With(_logger, "pkg", integration)
channel, _ := cmd.Flags().GetString("channel")
dir, _ := cmd.Flags().GetString("dir")
secret, _ := cmd.Flags().GetString("secret")
dir, err := filepath.Abs(dir)
if err != nil {
log.Fatal(logger, "error getting dir absolute path", "err", err)
}
var selfManaged bool
var ch *event.SubscriptionChannel
var cmdargs []string
if secret != "" {
log.Debug(logger, "creating internal subscription")
if channel == "" {
channel = "stable"
}
// each replica agent should recieve updates
groupSuffix, err := os.Hostname()
if err != nil {
groupSuffix = pstrings.NewUUIDV4()
log.Warn(logger, "unable to get hostname, using random uuid", "uuid", groupSuffix, "err", err)
}
ch, err = event.NewSubscription(ctx, event.Subscription{
GroupID: "agent-run-" + publisher + "-" + integration + "-" + groupSuffix,
Topics: []string{"ops.db.Change"},
Channel: channel,
HTTPHeaders: map[string]string{"x-api-key": secret},
DisablePing: true,
Temporary: true,
Logger: logger,
Filter: &event.SubscriptionFilter{
ObjectExpr: `model:"registry.Integration"`,
},
})
cmdargs = append(cmdargs, "--secret="+secret, "--channel="+channel)
} else {
selfManaged = true
log.Debug(logger, "creating external subscription")
cfg, config := loadConfig(cmd, logger, channel)
apikey := config.APIKey
if channel == "" {
channel = config.Channel
}
ch, err = event.NewSubscription(ctx, event.Subscription{
GroupID: "agent-run-" + publisher + "-" + integration + "-" + config.EnrollmentID,
Topics: []string{"ops.db.Change"},
Channel: channel,
APIKey: apikey,
DisablePing: true,
Logger: logger,
Filter: &event.SubscriptionFilter{
ObjectExpr: `model:"registry.Integration"`,
},
})
cmdargs = append(cmdargs, "--config="+cfg, "--channel="+channel)
}
if err != nil {
log.Fatal(logger, "error creating subscription", "err", err)
}
// start file is used to signal to the monitor when we're running
startfile, _ := cmd.Flags().GetString("start-file")
if startfile != "" {
os.Remove(startfile)
cmdargs = append(cmdargs, "--start-file", startfile)
defer os.Remove(startfile)
}
log.Info(logger, "waiting for subscription to be ready", "channel", channel)
ch.WaitForReady()
log.Info(logger, "subscription is ready")
defer ch.Close()
var stopped, restarting bool
var stoppedLock, restartLock sync.Mutex
done := make(chan bool)
finished := make(chan bool, 1)
restart := make(chan bool, 2)
exited := make(chan bool)
var currentCmd *exec.Cmd
var restarted int
if selfManaged {
pos.OnExit(func(_ int) {
stoppedLock.Lock()
stopped = true
stoppedLock.Unlock()
done <- true
<-finished
})
}
integrationBinary := filepath.Join(dir, integration)
previousIntegrationBinary := filepath.Join(dir, "old-"+integration)
restart <- true // start it up
exit:
for {
stoppedLock.Lock()
s := stopped
stoppedLock.Unlock()
if s {
break
}
select {
case evt := <-ch.Channel():
var dbchange DBChange
json.Unmarshal([]byte(evt.Data), &dbchange)
var instance Integration
json.Unmarshal([]byte(dbchange.Data), &instance)
log.Debug(logger, "db change event received", "ref_type", instance.RefType, "integration", integration)
if instance.RefType == integration {
switch dbchange.Action {
case "update", "UPDATE", "upsert", "UPSERT":
// copy the binary so we can rollback if needed
if err := copyFile(integrationBinary, previousIntegrationBinary); err != nil {
log.Error(logger, "error copying integration", "err", err)
break exit
}
log.Info(logger, "new integration detected, will restart in 15s", "version", instance.Version)
restartLock.Lock()
restarting = true
restarted = 0
version = instance.Version
time.Sleep(time.Second * 15)
restart <- true // force a new download
restartLock.Unlock()
case "delete", "DELETE":
// TODO -- exit with a special code to indicate we don't need to restart this integration
}
}
go evt.Commit() // we need to put in a separate thread since we're holding the sub thread
case <-done:
if currentCmd != nil {
syscall.Kill(-currentCmd.Process.Pid, syscall.SIGINT)
select {
case <-time.After(time.Second * 10):
break
case <-exited:
break
}
}
break
case force := <-restart:
log.Info(logger, "restart requested")
if currentCmd != nil && currentCmd.Process != nil {
currentCmd.Process.Kill()
currentCmd = nil
}
stoppedLock.Lock()
s := stopped
stoppedLock.Unlock()
log.Info(logger, "need to start", "stopped", s)
if !s {
restarted++
c, err := getIntegration(ctx, logger, channel, dir, publisher, integration, version, cmdargs, force)
if err != nil {
log.Error(logger, "error running integration", "err", err)
if !fileutil.FileExists(previousIntegrationBinary) {
break exit
} else {
log.Info(logger, "attempting to roll back to previous version of integration", "integration", integration)
os.Remove(integrationBinary)
os.Rename(previousIntegrationBinary, integrationBinary)
os.Chmod(integrationBinary, 0775)
c, err = startIntegration(ctx, logger, integrationBinary, cmdargs)
if err != nil {
log.Error(logger, "error running rolled back integration", "err", err)
break exit
}
}
}
currentCmd = c
os.Remove(previousIntegrationBinary)
log.Info(logger, "started", "pid", c.Process.Pid)
go func() {
// monitor the exit
err := currentCmd.Wait()
if err != nil {
if currentCmd != nil && currentCmd.ProcessState != nil {
if currentCmd.ProcessState.ExitCode() != 0 {
if !selfManaged {
// in cloud we should just end the process
log.Fatal(logger, "integration has exited", "restarted", restarted, "code", currentCmd.ProcessState.ExitCode(), "err", err)
} else {
log.Error(logger, "integration has exited", "restarted", restarted, "code", currentCmd.ProcessState.ExitCode(), "err", err)
}
}
}
log.Info(logger, "pausing", "duration", time.Second*time.Duration(restarted))
time.Sleep(time.Second * time.Duration(restarted))
} else {
restarted = 0
}
// try and restart if we're not in the stopping mode
stoppedLock.Lock()
s := stopped
stoppedLock.Unlock()
if !s {
restartLock.Lock()
r := restarting
restartLock.Unlock()
if !r {
restart <- false // restart but don't force a new download
}
} else {
exited <- true
}
}()
}
}
}
log.Info(logger, "👋")
finished <- true
},
}
func init() {
rootCmd.AddCommand(runCmd)
runCmd.Flags().String("channel", pos.Getenv("PP_CHANNEL", ""), "the channel which can be set")
runCmd.Flags().String("config", "", "the location of the config file")
runCmd.Flags().StringP("dir", "d", "", "directory inside of which to run the integration")
runCmd.Flags().String("secret", pos.Getenv("PP_AUTH_SHARED_SECRET", ""), "internal shared secret")
runCmd.Flags().String("start-file", "", "the start file to write once running")
runCmd.Flags().MarkHidden("secret")
runCmd.Flags().MarkHidden("start-file")
rootCmd.AddCommand(enrollAgentCmd)
enrollAgentCmd.Flags().String("channel", pos.Getenv("PP_CHANNEL", ""), "the channel which can be set")
enrollAgentCmd.Flags().String("config", "", "the location of the config file")
}
| {
log.Info(logger, "updating enrollment", "setting", datefield, "enrollment_id", enrollmentID, "active", active)
now := datetime.NewDateNow()
vars := make(graphql.Variables)
if datefield != "" {
vars[datefield] = now
vars[agent.EnrollmentModelRunningColumn] = active
}
vars[agent.EnrollmentModelLastPingDateColumn] = now
return agent.ExecEnrollmentSilentUpdateMutation(client, enrollmentID, vars, false)
} | identifier_body |
run.go | package cmd
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/pinpt/agent/v4/internal/util"
"github.com/pinpt/agent/v4/runner"
"github.com/pinpt/agent/v4/sdk"
"github.com/pinpt/agent/v4/sysinfo"
"github.com/pinpt/go-common/v10/api"
"github.com/pinpt/go-common/v10/datetime"
"github.com/pinpt/go-common/v10/event"
"github.com/pinpt/go-common/v10/fileutil"
"github.com/pinpt/go-common/v10/graphql"
pjson "github.com/pinpt/go-common/v10/json"
"github.com/pinpt/go-common/v10/log"
pos "github.com/pinpt/go-common/v10/os"
pstrings "github.com/pinpt/go-common/v10/strings"
"github.com/pinpt/integration-sdk/agent"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// DBChange event
type DBChange struct {
// Action the action that was taken
Action string `json:"action" codec:"action" bson:"action" yaml:"action" faker:"-"`
// Data the data payload of the change
Data string `json:"data" codec:"data" bson:"data" yaml:"data" faker:"-"`
}
// Integration A registry integration
type Integration struct {
// RefType the reference type
RefType string `json:"ref_type" codec:"ref_type" bson:"ref_type" yaml:"ref_type" faker:"-"`
// UpdatedAt the date the integration was last updated
UpdatedAt int64 `json:"updated_ts" codec:"updated_ts" bson:"updated_ts" yaml:"updated_ts" faker:"-"`
// Version the latest version that was published
Version string `json:"version" codec:"version" bson:"version" yaml:"version" faker:"-"`
}
func getIntegration(ctx context.Context, logger log.Logger, channel string, dir string, publisher string, integration string, version string, cmdargs []string, force bool) (*exec.Cmd, error) {
if publisher == "" {
return nil, fmt.Errorf("missing publisher")
}
if integration == "" {
return nil, fmt.Errorf("missing integration")
}
longName := fmt.Sprintf("%s/%s", publisher, integration)
if version != "" {
longName += "/" + version
}
integrationExecutable, _ := filepath.Abs(filepath.Join(dir, integration))
if force || !fileutil.FileExists(integrationExecutable) {
log.Info(logger, "need to download integration", "integration", longName, "force", force)
var err error
integrationExecutable, err = downloadIntegration(logger, channel, dir, publisher, integration, version)
if err != nil {
return nil, fmt.Errorf("error downloading integration %s: %w", longName, err)
}
log.Info(logger, "downloaded", "integration", integrationExecutable)
}
return startIntegration(ctx, logger, integrationExecutable, cmdargs)
}
func startIntegration(ctx context.Context, logger log.Logger, integrationExecutable string, cmdargs []string) (*exec.Cmd, error) {
log.Info(logger, "starting", "file", integrationExecutable)
cm := exec.CommandContext(ctx, integrationExecutable, cmdargs...)
cm.Stdout = os.Stdout
cm.Stderr = os.Stderr
cm.Stdin = os.Stdin
cm.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if err := cm.Start(); err != nil {
return nil, err
}
return cm, nil
}
func configFilename(cmd *cobra.Command) (string, error) {
fn, _ := cmd.Flags().GetString("config")
if fn == "" {
fn = filepath.Join(os.Getenv("HOME"), ".pinpoint-agent/config.json")
}
return filepath.Abs(fn)
}
// clientFromConfig will use the contents of ConfigFile to make a client
func clientFromConfig(config *runner.ConfigFile) (graphql.Client, error) {
gclient, err := graphql.NewClient(config.CustomerID, "", "", api.BackendURL(api.GraphService, config.Channel))
if err != nil {
return nil, err
}
gclient.SetHeader("Authorization", config.APIKey)
return gclient, nil
}
func | (config *runner.ConfigFile, channel string, force bool) (bool, error) {
var resp struct {
Expired bool `json:"expired"`
Valid bool `json:"valid"`
}
if !force {
res, err := api.Get(context.Background(), channel, api.AuthService, "/validate?customer_id="+config.CustomerID, config.APIKey)
if err != nil {
return false, err
}
defer res.Body.Close()
if err := json.NewDecoder(res.Body).Decode(&resp); err != nil {
return false, err
}
} else {
resp.Expired = true
}
if resp.Expired {
newToken, err := util.RefreshOAuth2Token(http.DefaultClient, channel, "pinpoint", config.RefreshKey)
if err != nil {
return false, err
}
config.APIKey = newToken // update the new token
return true, nil
}
if resp.Valid {
return false, fmt.Errorf("the apikey or refresh token is no longer valid")
}
return false, nil
}
func saveConfig(cmd *cobra.Command, config *runner.ConfigFile) error {
cfg, err := configFilename(cmd)
if err != nil {
return err
}
of, err := os.Open(cfg)
if err != nil {
return err
}
defer of.Close()
// save our channels back to the config
if err := json.NewEncoder(of).Encode(config); err != nil {
return err
}
return nil
}
func loadConfig(cmd *cobra.Command, logger log.Logger, channel string) (string, *runner.ConfigFile) {
cfg, err := configFilename(cmd)
if err != nil {
log.Fatal(logger, "error getting config file name", "err", err)
}
if fileutil.FileExists(cfg) {
var config runner.ConfigFile
of, err := os.Open(cfg)
if err != nil {
log.Fatal(logger, "error opening config file at "+cfg, "err", err)
}
defer of.Close()
if err := json.NewDecoder(of).Decode(&config); err != nil {
log.Fatal(logger, "error parsing config file at "+cfg, "err", err)
}
of.Close()
updated, err := validateConfig(&config, channel, false)
if err != nil {
log.Fatal(logger, "error validating the apikey", "err", err)
}
if updated {
// save our changes back to the config
if err := saveConfig(cmd, &config); err != nil {
log.Fatal(logger, "error opening config file for writing at "+cfg, "err", err)
}
}
client, err := clientFromConfig(&config)
if err != nil {
log.Fatal(logger, "error creating client", "err", err)
}
exists, err := enrollmentExists(client, config.EnrollmentID)
if err != nil {
log.Fatal(logger, "error checking enrollment", "err", err)
}
if exists {
return cfg, &config
}
log.Info(logger, "agent configuration found, but not known to Pinpoint, re-enrolling now", "path", cfg)
} else {
log.Info(logger, "no agent configuration found, enrolling now", "path", cfg)
}
config, err := enrollAgent(logger, channel, cfg)
if err != nil {
log.Fatal(logger, "error enrolling new agent", "err", err)
}
return cfg, config
}
type integrationInstruction int
const (
doNothing integrationInstruction = iota
shouldStart
shouldStop
)
func vetDBChange(evt event.SubscriptionEvent, enrollmentID string) (integrationInstruction, *agent.IntegrationInstance, error) {
var dbchange DBChange
if err := json.Unmarshal([]byte(evt.Data), &dbchange); err != nil {
return 0, nil, fmt.Errorf("error decoding dbchange: %w", err)
}
var instance agent.IntegrationInstance
if err := json.Unmarshal([]byte(dbchange.Data), &instance); err != nil {
return 0, nil, fmt.Errorf("error decoding integration instance: %w", err)
}
if instance.EnrollmentID == nil || *instance.EnrollmentID == "" || *instance.EnrollmentID != enrollmentID {
return doNothing, nil, nil
}
if instance.Active == true && instance.Setup == agent.IntegrationInstanceSetupReady {
return shouldStart, &instance, nil
}
if instance.Active == false && instance.Deleted == true {
return shouldStop, &instance, nil
}
return doNothing, nil, nil
}
type integrationResult struct {
Data *struct {
Integration struct {
RefType string `json:"ref_type"`
Publisher struct {
Identifier string `json:"identifier"`
} `json:"publisher"`
} `json:"Integration"`
} `json:"registry"`
}
var integrationQuery = `query findIntegration($id: ID!) {
registry {
Integration(_id: $id) {
ref_type
publisher {
identifier
}
}
}
}`
func pingEnrollment(logger log.Logger, client graphql.Client, enrollmentID string, datefield string, active bool) error {
log.Info(logger, "updating enrollment", "setting", datefield, "enrollment_id", enrollmentID, "active", active)
now := datetime.NewDateNow()
vars := make(graphql.Variables)
if datefield != "" {
vars[datefield] = now
vars[agent.EnrollmentModelRunningColumn] = active
}
vars[agent.EnrollmentModelLastPingDateColumn] = now
return agent.ExecEnrollmentSilentUpdateMutation(client, enrollmentID, vars, false)
}
func setIntegrationRunning(client graphql.Client, integrationInstanceID string) error {
vars := graphql.Variables{
agent.IntegrationInstanceModelSetupColumn: agent.IntegrationInstanceSetupRunning,
}
if err := agent.ExecIntegrationInstanceSilentUpdateMutation(client, integrationInstanceID, vars, false); err != nil {
return fmt.Errorf("error updating integration instance to running: %w", err)
}
return nil
}
func runIntegrationMonitor(ctx context.Context, logger log.Logger, cmd *cobra.Command) {
channel, _ := cmd.Flags().GetString("channel")
args := []string{}
cmd.Flags().Visit(func(f *pflag.Flag) {
args = append(args, "--"+f.Name, f.Value.String())
})
var gclient graphql.Client
integrations := make(map[string]string) // id -> identifier/ref_type
processes := make(map[string]*exec.Cmd)
var processLock sync.Mutex
getIntegration := func(id string) (string, error) {
processLock.Lock()
val := integrations[id]
if val != "" {
processLock.Unlock()
return val, nil
}
var res integrationResult
if err := gclient.Query(integrationQuery, graphql.Variables{"id": id}, &res); err != nil {
processLock.Unlock()
return "", err
}
if res.Data == nil {
processLock.Unlock()
return "", fmt.Errorf("couldn't find integration with id: %s", id)
}
val = res.Data.Integration.Publisher.Identifier + "/" + res.Data.Integration.RefType
integrations[id] = val
processLock.Unlock()
return val, nil
}
cfg, config := loadConfig(cmd, logger, channel)
if channel == "" {
channel = config.Channel
}
args = append(args, "--config", cfg)
gclient, err := graphql.NewClient(config.CustomerID, "", "", api.BackendURL(api.GraphService, channel))
if err != nil {
log.Fatal(logger, "error creating graphql client", "err", err)
}
gclient.SetHeader("Authorization", config.APIKey)
errors := make(chan error)
go func() {
for err := range errors {
if err != nil {
log.Fatal(logger, err.Error())
}
}
}()
groupID := "agent-run-monitor"
if config.EnrollmentID != "" {
// if self managed, we need to use a different group id than the cloud
groupID += "-" + config.EnrollmentID
}
ch, err := event.NewSubscription(ctx, event.Subscription{
GroupID: "agent-run-monitor-" + config.EnrollmentID,
Topics: []string{"ops.db.Change"},
Channel: channel,
APIKey: config.APIKey,
DisablePing: true,
Logger: logger,
Errors: errors,
DisableAutoCommit: true,
Filter: &event.SubscriptionFilter{
ObjectExpr: `model:"agent.IntegrationInstance" AND action:"update"`,
},
})
if err != nil {
log.Fatal(logger, "error creating montior subscription", "err", err)
}
ch.WaitForReady()
defer ch.Close()
// set startup date
if err := pingEnrollment(logger, gclient, config.EnrollmentID, agent.EnrollmentModelLastStartupDateColumn, true); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
runIntegration := func(name string) {
log.Info(logger, "running integration", "name", name)
processLock.Lock()
startFile, _ := ioutil.TempFile("", "")
defer os.Remove(startFile.Name())
args = append(args, "--start-file", startFile.Name())
c := exec.CommandContext(ctx, os.Args[0], append([]string{"run", name}, args...)...)
var wg sync.WaitGroup
wg.Add(1)
c.Stdin = os.Stdin
c.Stdout = os.Stdout
c.Stderr = os.Stderr
c.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if err := c.Start(); err != nil {
processLock.Unlock()
log.Fatal(logger, "error starting "+name, "err", err)
}
exited := make(chan bool)
pos.OnExit(func(_ int) {
log.Debug(logger, "exit")
close(exited)
})
go func() {
for {
select {
case <-exited:
wg.Done()
return
case <-time.After(time.Second):
if fileutil.FileExists(startFile.Name()) {
wg.Done()
os.Remove(startFile.Name())
return
}
case <-time.After(5 * time.Minute):
log.Fatal(logger, "failed to start integration "+name+" after 5 minutes")
}
}
}()
processes[name] = c
processLock.Unlock()
log.Debug(logger, "waiting for integration to start")
wg.Wait()
if c != nil && c.ProcessState != nil && c.ProcessState.Exited() {
log.Info(logger, "integration is not running")
} else {
log.Info(logger, "integration is running")
}
}
// find all the integrations we have setup
query := &agent.IntegrationInstanceQuery{
Filters: []string{
agent.IntegrationInstanceModelDeletedColumn + " = ?",
agent.IntegrationInstanceModelEnrollmentIDColumn + " = ?",
},
Params: []interface{}{
false,
config.EnrollmentID,
},
}
q := &agent.IntegrationInstanceQueryInput{Query: query}
instances, err := agent.FindIntegrationInstances(gclient, q)
if err != nil {
log.Fatal(logger, "error finding integration instances", "err", err)
}
if instances != nil {
for _, edge := range instances.Edges {
name, err := getIntegration(edge.Node.IntegrationID)
if err != nil {
log.Fatal(logger, "error fetching integration name for integration", "err", err, "integration_id", edge.Node.IntegrationID, "id", edge.Node.ID)
}
runIntegration(name)
if edge.Node.Setup == agent.IntegrationInstanceSetupReady {
log.Info(logger, "setting integration to running 🏃♀️", "integration_instance_id", edge.Node.ID)
if err := setIntegrationRunning(gclient, edge.Node.ID); err != nil {
log.Fatal(logger, "error updating integration instance", "err", err, "integration_id", edge.Node.IntegrationID, "id", edge.Node.ID)
}
}
}
}
var restarting bool
done := make(chan bool)
finished := make(chan bool)
pos.OnExit(func(_ int) {
if !restarting {
done <- true
<-finished
log.Info(logger, "👯")
}
})
if err := pingEnrollment(logger, gclient, config.EnrollmentID, "", true); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
// calculate the duration of time left before the
refreshDuration := config.Expires.Sub(time.Now().Add(time.Minute * 30))
var shutdownWg sync.WaitGroup
// run a loop waiting for exit or an updated integration instance
completed:
for {
select {
case <-time.After(refreshDuration):
// if we get here, we need to refresh our expiring apikey and restart all the integrations
log.Info(logger, "need to update apikey before it expires and restart 🤞🏽")
// 1. fetch our new apikey
if _, err := validateConfig(config, channel, true); err != nil {
log.Fatal(logger, "error refreshing the expiring apikey", "err", err)
}
// 2. save the config file changes
if err := saveConfig(cmd, config); err != nil {
log.Fatal(logger, "error saving the new apikey", "err", err)
}
// 3. stop all of our current integrations
restarting = true
done <- true
// 4. restart ourself which should re-entrant this function
go runIntegrationMonitor(ctx, logger, cmd)
case <-time.After(time.Minute * 5):
if err := pingEnrollment(logger, gclient, config.EnrollmentID, "", true); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
case <-done:
processLock.Lock()
for k, c := range processes {
log.Debug(logger, "stopping "+k, "pid", c.Process.Pid)
shutdownWg.Add(1)
go func(c *exec.Cmd, name string) {
defer shutdownWg.Done()
syscall.Kill(-c.Process.Pid, syscall.SIGINT)
exited := make(chan bool)
go func() {
c.Wait()
log.Debug(logger, "exited "+name)
exited <- true
}()
select {
case <-time.After(time.Second * 15):
log.Debug(logger, "timed out on exit for "+name)
if c.Process != nil {
c.Process.Kill()
}
return
case <-exited:
return
}
}(c, k)
delete(processes, k)
}
processLock.Unlock()
break completed
case evt := <-ch.Channel():
instruction, instance, err := vetDBChange(evt, config.EnrollmentID)
if err != nil {
log.Fatal(logger, "error decoding db change", "err", err)
}
switch instruction {
case shouldStart:
log.Info(logger, "db change received, need to create a new process", "id", instance.ID)
if err := setIntegrationRunning(gclient, instance.ID); err != nil {
log.Fatal(logger, "error updating integration", "err", err)
}
name, err := getIntegration(instance.ID)
if err != nil {
log.Fatal(logger, "error fetching integration detail", "err", err)
}
processLock.Lock()
c := processes[name]
if c == nil {
processLock.Unlock()
runIntegration(name)
} else {
processLock.Unlock()
}
case shouldStop:
log.Info(logger, "db change delete received, need to delete process", "id", instance.ID)
name, err := getIntegration(instance.ID)
if err != nil {
log.Fatal(logger, "error fetching integration detail", "err", err)
}
processLock.Lock()
c := processes[name]
if c != nil {
c.Process.Kill()
delete(processes, instance.RefType)
}
processLock.Unlock()
}
evt.Commit()
}
}
shutdownWg.Wait()
// if restarting, dont send a shutdown or block on finished
if restarting {
return
}
if err := pingEnrollment(logger, gclient, config.EnrollmentID, agent.EnrollmentModelLastShutdownDateColumn, false); err != nil {
log.Error(logger, "unable to update enrollment", "enrollment_id", config.EnrollmentID, "err", err)
}
finished <- true
}
func enrollmentExists(client graphql.Client, enrollmentID string) (bool, error) {
enrollment, err := agent.FindEnrollment(client, enrollmentID)
return enrollment != nil, err
}
func enrollAgent(logger log.Logger, channel string, configFileName string) (*runner.ConfigFile, error) {
var config runner.ConfigFile
config.Channel = channel
url := sdk.JoinURL(api.BackendURL(api.AppService, channel), "/enroll")
var userID string
err := util.WaitForRedirect(url, func(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
config.APIKey = q.Get("apikey")
config.RefreshKey = q.Get("refresh_token")
config.CustomerID = q.Get("customer_id")
expires := q.Get("expires")
if expires != "" {
e, _ := strconv.ParseInt(expires, 10, 64)
config.Expires = datetime.DateFromEpoch(e)
} else {
config.Expires = time.Now().Add(time.Hour * 23)
}
userID = q.Get("user_id")
w.WriteHeader(http.StatusOK)
})
if err != nil {
return nil, fmt.Errorf("error waiting for browser: %w", err)
}
log.Info(logger, "logged in", "customer_id", config.CustomerID)
log.Info(logger, "enrolling agent...", "customer_id", config.CustomerID)
client, err := graphql.NewClient(config.CustomerID, "", "", api.BackendURL(api.GraphService, channel))
if err != nil {
return nil, fmt.Errorf("error creating graphql client: %w", err)
}
client.SetHeader("Authorization", config.APIKey)
info, err := sysinfo.GetSystemInfo()
if err != nil {
return nil, fmt.Errorf("error getting system info: %w", err)
}
config.SystemID = info.ID
created := agent.EnrollmentCreatedDate(datetime.NewDateNow())
enr := agent.Enrollment{
AgentVersion: commit, // TODO(robin): when we start versioning, switch this to version
CreatedDate: created,
SystemID: info.ID,
Hostname: info.Hostname,
NumCPU: info.NumCPU,
OS: info.OS,
Architecture: info.Architecture,
GoVersion: info.GoVersion,
CustomerID: config.CustomerID,
UserID: userID,
ID: agent.NewEnrollmentID(config.CustomerID, info.ID),
}
config.EnrollmentID = enr.ID
if err := agent.CreateEnrollment(client, enr); err != nil {
if strings.Contains(err.Error(), "duplicate key error") {
log.Info(logger, "looks like this system has already been enrolled, recreating local config")
} else {
return nil, fmt.Errorf("error creating enrollment: %w", err)
}
}
os.MkdirAll(filepath.Dir(configFileName), 0700)
if err := ioutil.WriteFile(configFileName, []byte(pjson.Stringify(config)), 0644); err != nil {
return nil, fmt.Errorf("error writing config file: %w", err)
}
log.Info(logger, "agent enrolled 🎉", "customer_id", config.CustomerID)
return &config, nil
}
// enrollAgentCmd will authenticate with pinpoint and create an agent enrollment
var enrollAgentCmd = &cobra.Command{
Use: "enroll",
Short: "connect this agent to Pinpoint's backend",
Hidden: true,
Run: func(cmd *cobra.Command, args []string) {
logger := log.NewCommandLogger(cmd)
defer logger.Close()
channel, _ := cmd.Flags().GetString("channel")
fn, err := configFilename(cmd)
if err != nil {
log.Fatal(logger, "error getting config file name", "err", err)
}
if _, err := enrollAgent(logger, channel, fn); err != nil {
log.Fatal(logger, "error enrolling this agent", "err", err)
}
},
}
func copyFile(from, to string) error {
in, err := os.Open(from)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(to)
if err != nil {
return err
}
defer out.Close()
_, err = io.Copy(out, in)
return err
}
// runCmd represents the run command
var runCmd = &cobra.Command{
Use: "run <integration> <version>",
Short: "run a published integration",
Args: cobra.MinimumNArgs(0),
Run: func(cmd *cobra.Command, args []string) {
_logger := log.NewCommandLogger(cmd)
defer _logger.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
if len(args) == 0 {
log.Info(_logger, "starting main process")
// need to figure out all our configured integrations and run each one
runIntegrationMonitor(ctx, _logger, cmd)
return
}
fullIntegration := args[0]
var version string
if len(args) > 1 {
version = args[1]
}
tok := strings.Split(fullIntegration, "/")
if len(tok) != 2 {
log.Fatal(_logger, "integration should be in the format: publisher/integration such as pinpt/github")
}
publisher := tok[0]
integration := tok[1]
logger := log.With(_logger, "pkg", integration)
channel, _ := cmd.Flags().GetString("channel")
dir, _ := cmd.Flags().GetString("dir")
secret, _ := cmd.Flags().GetString("secret")
dir, err := filepath.Abs(dir)
if err != nil {
log.Fatal(logger, "error getting dir absolute path", "err", err)
}
var selfManaged bool
var ch *event.SubscriptionChannel
var cmdargs []string
if secret != "" {
log.Debug(logger, "creating internal subscription")
if channel == "" {
channel = "stable"
}
// each replica agent should recieve updates
groupSuffix, err := os.Hostname()
if err != nil {
groupSuffix = pstrings.NewUUIDV4()
log.Warn(logger, "unable to get hostname, using random uuid", "uuid", groupSuffix, "err", err)
}
ch, err = event.NewSubscription(ctx, event.Subscription{
GroupID: "agent-run-" + publisher + "-" + integration + "-" + groupSuffix,
Topics: []string{"ops.db.Change"},
Channel: channel,
HTTPHeaders: map[string]string{"x-api-key": secret},
DisablePing: true,
Temporary: true,
Logger: logger,
Filter: &event.SubscriptionFilter{
ObjectExpr: `model:"registry.Integration"`,
},
})
cmdargs = append(cmdargs, "--secret="+secret, "--channel="+channel)
} else {
selfManaged = true
log.Debug(logger, "creating external subscription")
cfg, config := loadConfig(cmd, logger, channel)
apikey := config.APIKey
if channel == "" {
channel = config.Channel
}
ch, err = event.NewSubscription(ctx, event.Subscription{
GroupID: "agent-run-" + publisher + "-" + integration + "-" + config.EnrollmentID,
Topics: []string{"ops.db.Change"},
Channel: channel,
APIKey: apikey,
DisablePing: true,
Logger: logger,
Filter: &event.SubscriptionFilter{
ObjectExpr: `model:"registry.Integration"`,
},
})
cmdargs = append(cmdargs, "--config="+cfg, "--channel="+channel)
}
if err != nil {
log.Fatal(logger, "error creating subscription", "err", err)
}
// start file is used to signal to the monitor when we're running
startfile, _ := cmd.Flags().GetString("start-file")
if startfile != "" {
os.Remove(startfile)
cmdargs = append(cmdargs, "--start-file", startfile)
defer os.Remove(startfile)
}
log.Info(logger, "waiting for subscription to be ready", "channel", channel)
ch.WaitForReady()
log.Info(logger, "subscription is ready")
defer ch.Close()
var stopped, restarting bool
var stoppedLock, restartLock sync.Mutex
done := make(chan bool)
finished := make(chan bool, 1)
restart := make(chan bool, 2)
exited := make(chan bool)
var currentCmd *exec.Cmd
var restarted int
if selfManaged {
pos.OnExit(func(_ int) {
stoppedLock.Lock()
stopped = true
stoppedLock.Unlock()
done <- true
<-finished
})
}
integrationBinary := filepath.Join(dir, integration)
previousIntegrationBinary := filepath.Join(dir, "old-"+integration)
restart <- true // start it up
exit:
for {
stoppedLock.Lock()
s := stopped
stoppedLock.Unlock()
if s {
break
}
select {
case evt := <-ch.Channel():
var dbchange DBChange
json.Unmarshal([]byte(evt.Data), &dbchange)
var instance Integration
json.Unmarshal([]byte(dbchange.Data), &instance)
log.Debug(logger, "db change event received", "ref_type", instance.RefType, "integration", integration)
if instance.RefType == integration {
switch dbchange.Action {
case "update", "UPDATE", "upsert", "UPSERT":
// copy the binary so we can rollback if needed
if err := copyFile(integrationBinary, previousIntegrationBinary); err != nil {
log.Error(logger, "error copying integration", "err", err)
break exit
}
log.Info(logger, "new integration detected, will restart in 15s", "version", instance.Version)
restartLock.Lock()
restarting = true
restarted = 0
version = instance.Version
time.Sleep(time.Second * 15)
restart <- true // force a new download
restartLock.Unlock()
case "delete", "DELETE":
// TODO -- exit with a special code to indicate we don't need to restart this integration
}
}
go evt.Commit() // we need to put in a separate thread since we're holding the sub thread
case <-done:
if currentCmd != nil {
syscall.Kill(-currentCmd.Process.Pid, syscall.SIGINT)
select {
case <-time.After(time.Second * 10):
break
case <-exited:
break
}
}
break
case force := <-restart:
log.Info(logger, "restart requested")
if currentCmd != nil && currentCmd.Process != nil {
currentCmd.Process.Kill()
currentCmd = nil
}
stoppedLock.Lock()
s := stopped
stoppedLock.Unlock()
log.Info(logger, "need to start", "stopped", s)
if !s {
restarted++
c, err := getIntegration(ctx, logger, channel, dir, publisher, integration, version, cmdargs, force)
if err != nil {
log.Error(logger, "error running integration", "err", err)
if !fileutil.FileExists(previousIntegrationBinary) {
break exit
} else {
log.Info(logger, "attempting to roll back to previous version of integration", "integration", integration)
os.Remove(integrationBinary)
os.Rename(previousIntegrationBinary, integrationBinary)
os.Chmod(integrationBinary, 0775)
c, err = startIntegration(ctx, logger, integrationBinary, cmdargs)
if err != nil {
log.Error(logger, "error running rolled back integration", "err", err)
break exit
}
}
}
currentCmd = c
os.Remove(previousIntegrationBinary)
log.Info(logger, "started", "pid", c.Process.Pid)
go func() {
// monitor the exit
err := currentCmd.Wait()
if err != nil {
if currentCmd != nil && currentCmd.ProcessState != nil {
if currentCmd.ProcessState.ExitCode() != 0 {
if !selfManaged {
// in cloud we should just end the process
log.Fatal(logger, "integration has exited", "restarted", restarted, "code", currentCmd.ProcessState.ExitCode(), "err", err)
} else {
log.Error(logger, "integration has exited", "restarted", restarted, "code", currentCmd.ProcessState.ExitCode(), "err", err)
}
}
}
log.Info(logger, "pausing", "duration", time.Second*time.Duration(restarted))
time.Sleep(time.Second * time.Duration(restarted))
} else {
restarted = 0
}
// try and restart if we're not in the stopping mode
stoppedLock.Lock()
s := stopped
stoppedLock.Unlock()
if !s {
restartLock.Lock()
r := restarting
restartLock.Unlock()
if !r {
restart <- false // restart but don't force a new download
}
} else {
exited <- true
}
}()
}
}
}
log.Info(logger, "👋")
finished <- true
},
}
func init() {
rootCmd.AddCommand(runCmd)
runCmd.Flags().String("channel", pos.Getenv("PP_CHANNEL", ""), "the channel which can be set")
runCmd.Flags().String("config", "", "the location of the config file")
runCmd.Flags().StringP("dir", "d", "", "directory inside of which to run the integration")
runCmd.Flags().String("secret", pos.Getenv("PP_AUTH_SHARED_SECRET", ""), "internal shared secret")
runCmd.Flags().String("start-file", "", "the start file to write once running")
runCmd.Flags().MarkHidden("secret")
runCmd.Flags().MarkHidden("start-file")
rootCmd.AddCommand(enrollAgentCmd)
enrollAgentCmd.Flags().String("channel", pos.Getenv("PP_CHANNEL", ""), "the channel which can be set")
enrollAgentCmd.Flags().String("config", "", "the location of the config file")
}
| validateConfig | identifier_name |
admin-bro.ts | import * as _ from 'lodash'
import * as path from 'path'
import * as fs from 'fs'
import i18n, { i18n as I18n } from 'i18next'
import slash from 'slash'
import AdminBroOptions, { AdminBroOptionsWithDefault } from './admin-bro-options.interface'
import BaseResource from './backend/adapters/base-resource'
import BaseDatabase from './backend/adapters/base-database'
import BaseRecord from './backend/adapters/base-record'
import BaseProperty from './backend/adapters/base-property'
import Filter from './backend/utils/filter'
import ValidationError from './backend/utils/validation-error'
import ConfigurationError from './backend/utils/configuration-error'
import ResourcesFactory from './backend/utils/resources-factory'
import userComponentsBundler from './backend/bundler/user-components-bundler'
import { RouterType } from './backend/router'
import Action, { RecordActionResponse } from './backend/actions/action.interface'
import { DEFAULT_PATHS } from './constants'
import loginTemplate from './frontend/login-template'
import { ListActionResponse } from './backend/actions/list-action'
import { combineTranslations, Locale } from './locale/config'
import en from './locale/en'
import { TranslateFunctions, createFunctions } from './utils/translate-functions.factory'
const pkg = JSON.parse(fs.readFileSync(path.join(__dirname, '../package.json'), 'utf-8'))
export const VERSION = pkg.version
const defaults: AdminBroOptionsWithDefault = {
rootPath: DEFAULT_PATHS.rootPath,
logoutPath: DEFAULT_PATHS.logoutPath,
loginPath: DEFAULT_PATHS.loginPath,
databases: [],
resources: [],
branding: {
companyName: 'Company',
softwareBrothers: true,
},
dashboard: {},
assets: {
styles: [],
scripts: [],
},
pages: {},
}
type ActionsMap = {
show: Action<RecordActionResponse>;
edit: Action<RecordActionResponse>;
delete: Action<RecordActionResponse>;
new: Action<RecordActionResponse>;
list: Action<ListActionResponse>;
}
type UserComponentsMap = {[key: string]: string}
export type Adapter = { Database: typeof BaseDatabase; Resource: typeof BaseResource }
/**
* Main class for AdminBro extension. It takes {@link AdminBroOptions} as a
* parameter and creates an admin instance.
*
* Its main responsibility is to fetch all the resources and/or databases given by a
* user. Its instance is a currier - injected in all other classes.
*
* @example
* const AdminBro = require('admin-bro')
* const admin = new AdminBro(AdminBroOptions)
*/
class AdminBro {
public resources: Array<BaseResource>
public options: AdminBroOptionsWithDefault
public locale!: Locale
public i18n!: I18n
public translateFunctions!: TranslateFunctions
public static registeredAdapters: Array<Adapter>
/**
* Contains set of routes available within the application.
* It is used by external plugins.
*
* @example
* const { Router } = require('admin-bro')
* Router.routes.forEach(route => {
* // map your framework routes to admin-bro routes
* // see how `admin-bro-expressjs` plugin does it.
* })
*/
public static Router: RouterType
/**
* An abstract class for all Database Adapters.
* External adapters have to implement it.
*
* @example <caption>Creating Database Adapter for some ORM</caption>
* const { BaseDatabase } = require('admin-bro')
*
* class Database extends BaseDatabase {
* constructor(ormInstance) {
* this.ormInstance = ormInstance
* }
* resources() {
* // fetch resources from your orm and convert to BaseResource
* }
* }
*/
public static BaseDatabase: typeof BaseDatabase
/**
* Class representing all records. External adapters have to implement that or at least
* their {@link BaseResource} implementation should return records of this type.
*/
public static BaseRecord: typeof BaseRecord
/**
* An abstract class for all resources. External adapters have to implement that.
*/
public static BaseResource: typeof BaseResource
/**
* Class for all properties. External adapters have to implement that or at least
* their {@link BaseResource} implementation should return records of this type.
*/
public static BaseProperty: typeof BaseProperty
/**
* Filter object passed to find method of {@link BaseResource}.
* External adapters have to use it
*/
public static Filter: typeof Filter
/**
* Validation error which is thrown when record fails validation. External adapters have
* to use it, so AdminBro can print validation errors
*/
public static ValidationError: typeof ValidationError
/**
* List of all default actions. If you want to change the behavior for all actions like:
* _list_, _edit_, _show_, _delete_ and _bulkDelete_ you can do this here.
*
* @example <caption>Modifying accessibility rules for all show actions</caption>
* const { ACTIONS } = require('admin-bro')
* ACTIONS.show.isAccessible = () => {...}
*/
public static ACTIONS: ActionsMap
/**
* AdminBro version
*/
public static VERSION: string
/**
* List of all bundled components
*/
public static UserComponents: UserComponentsMap
/**
* @param {AdminBroOptions} options Options passed to AdminBro
*/
constructor(options: AdminBroOptions = {}) {
/**
* @type {BaseResource[]}
* @description List of all resources available for the AdminBro.
* They can be fetched with the {@link AdminBro#findResource} method
*/
this.resources = []
/**
* @type {AdminBroOptions}
* @description Options given by a user
*/
this.options = _.merge({}, defaults, options)
const defaultLogo = slash(path.join(this.options.rootPath, '/frontend/assets/logo-mini.svg'))
this.options.branding = this.options.branding || {}
this.options.branding.logo = this.options.branding.logo !== undefined
? this.options.branding.logo
: defaultLogo
this.initI18n()
const { databases, resources } = this.options
const resourcesFactory = new ResourcesFactory(this, AdminBro.registeredAdapters)
this.resources = resourcesFactory.buildResources({ databases, resources })
}
initI18n(): void {
this.locale = {
translations: combineTranslations(en.translations, this.options.locale?.translations),
language: this.options.locale?.language || en.language,
}
i18n.init({
lng: this.locale.language,
initImmediate: false, // loads translations immediately
resources: {
[this.locale.language]: {
translation: this.locale.translations,
},
},
})
// mixin translate functions to AdminBro instance so users will be able to
// call adminBro.translateMessage(...)
this.translateFunctions = createFunctions(i18n)
Object.getOwnPropertyNames(this.translateFunctions).forEach((translateFunctionName) => {
this[translateFunctionName] = this.translateFunctions[translateFunctionName]
})
}
/**
* Registers various database adapters written for AdminBro.
*
* @example
* const AdminBro = require('admin-bro')
* const MongooseAdapter = require('admin-bro-mongoose')
* AdminBro.registerAdapter(MongooseAdapter)
*
* @param {Object} options
* @param {typeof BaseDatabase} options.Database subclass of {@link BaseDatabase}
* @param {typeof BaseResource} options.Resource subclass of {@link BaseResource}
*/
static registerAdapter({ Database, Resource }: {
Database: typeof BaseDatabase;
Resource: typeof BaseResource;
}): void {
if (!Database || !Resource) {
throw new Error('Adapter has to have both Database and Resource')
}
// checking if both Database and Resource have at least isAdapterFor method
if (Database.isAdapterFor && Resource.isAdapterFor) {
AdminBro.registeredAdapters.push({ Database, Resource })
} else {
throw new Error('Adapter elements has to be a subclass of AdminBro.BaseResource and AdminBro.BaseDatabase')
}
}
/**
* Initializes AdminBro instance in production. This function should be called by
* all external plugins.
*/
async initialize(): Promise<void> {
if (process.env.NODE_ENV === 'production'
&& !(process.env.ADMIN_BRO_SKIP_BUNDLE === 'true')) {
// eslint-disable-next-line no-console
console.log('AdminBro: bundling user components...')
await userComponentsBundler(this, { write: true })
}
}
/**
* Watches for local changes in files imported via {@link AdminBro.bundle}.
* It doesn't work on production environment.
*
* @return {Promise<never>}
*/
async watch(): Promise<string | undefined> {
if (process.env.NODE_ENV !== 'production') {
return userComponentsBundler(this, { write: true, watch: true })
}
return undefined
}
/**
* Renders an entire login page with email and password fields
* using {@link Renderer}.
*
* Used by external plugins
*
* @param {Object} options
* @param {String} options.action Login form action url - it could be
* '/admin/login'
* @param {String} [options.errorMessage] Optional error message. When set,
* renderer will print this message in | }
/**
* Returns resource base on its ID
*
* @example
* const User = admin.findResource('users')
* await User.findOne(userId)
*
* @param {String} resourceId ID of a resource defined under {@link BaseResource#id}
* @return {BaseResource} found resource
* @throws {Error} When resource with given id cannot be found
*/
findResource(resourceId): BaseResource {
const resource = this.resources.find(m => m._decorated?.id() === resourceId)
if (!resource) {
throw new Error([
`There are no resources with given id: "${resourceId}"`,
'This is the list of all registered resources you can use:',
this.resources.map(r => r._decorated?.id() || r.id()).join(', '),
].join('\n'))
}
return resource
}
/**
* Requires given .jsx/.tsx file, that it can be bundled to the frontend.
* It will be available under AdminBro.UserComponents[componentId].
*
* @param {String} src Path to a file containing react component.
*
* @return {String} componentId - uniq id of a component
*
* @example
* const adminBroOptions = {
* dashboard: {
* component: AdminBro.bundle('./path/to/component'),
* }
* }
*/
public static bundle(src: string): string {
const extensions = ['.jsx', '.js', '.ts', '.tsx']
let filePath = ''
const componentId = _.uniqueId('Component')
if (src[0] === '/') {
filePath = src
} else {
const stack = ((new Error()).stack || '').split('\n')
// Node = 8 shows stack like that: '(/path/to/file.ts:77:27)
const pathNode8 = stack[2].match(/\((.*):[0-9]+:[0-9]+\)/)
// Node >= 10 shows stack like that: 'at /path/to/file.ts:77:27
const pathNode10 = stack[2].match(/at (.*):[0-9]+:[0-9]+/)
if (!pathNode8 && !pathNode10) {
throw new Error('STACK does not have a file url. Check out if the node version >= 8')
}
const executionPath = (pathNode8 && pathNode8[1]) || (pathNode10 && pathNode10[1])
filePath = path.join(path.dirname(executionPath as string), src)
}
const { root, dir, name } = path.parse(filePath)
if (!extensions.find((ext) => {
const fileName = path.format({ root, dir, name, ext })
return fs.existsSync(fileName)
})) {
throw new ConfigurationError(`Given file "${src}", doesn't exist.`, 'AdminBro.html')
}
AdminBro.UserComponents[componentId] = path.format({ root, dir, name })
return componentId
}
}
AdminBro.UserComponents = {}
AdminBro.registeredAdapters = []
AdminBro.VERSION = VERSION
// eslint-disable-next-line @typescript-eslint/no-empty-interface
interface AdminBro extends TranslateFunctions {}
export const { registerAdapter } = AdminBro
export const { bundle } = AdminBro
export default AdminBro | * the form
* @return {Promise<string>} HTML of the rendered page
*/
async renderLogin({ action, errorMessage }): Promise<string> {
return loginTemplate(this, { action, errorMessage }) | random_line_split |
admin-bro.ts | import * as _ from 'lodash'
import * as path from 'path'
import * as fs from 'fs'
import i18n, { i18n as I18n } from 'i18next'
import slash from 'slash'
import AdminBroOptions, { AdminBroOptionsWithDefault } from './admin-bro-options.interface'
import BaseResource from './backend/adapters/base-resource'
import BaseDatabase from './backend/adapters/base-database'
import BaseRecord from './backend/adapters/base-record'
import BaseProperty from './backend/adapters/base-property'
import Filter from './backend/utils/filter'
import ValidationError from './backend/utils/validation-error'
import ConfigurationError from './backend/utils/configuration-error'
import ResourcesFactory from './backend/utils/resources-factory'
import userComponentsBundler from './backend/bundler/user-components-bundler'
import { RouterType } from './backend/router'
import Action, { RecordActionResponse } from './backend/actions/action.interface'
import { DEFAULT_PATHS } from './constants'
import loginTemplate from './frontend/login-template'
import { ListActionResponse } from './backend/actions/list-action'
import { combineTranslations, Locale } from './locale/config'
import en from './locale/en'
import { TranslateFunctions, createFunctions } from './utils/translate-functions.factory'
const pkg = JSON.parse(fs.readFileSync(path.join(__dirname, '../package.json'), 'utf-8'))
export const VERSION = pkg.version
const defaults: AdminBroOptionsWithDefault = {
rootPath: DEFAULT_PATHS.rootPath,
logoutPath: DEFAULT_PATHS.logoutPath,
loginPath: DEFAULT_PATHS.loginPath,
databases: [],
resources: [],
branding: {
companyName: 'Company',
softwareBrothers: true,
},
dashboard: {},
assets: {
styles: [],
scripts: [],
},
pages: {},
}
type ActionsMap = {
show: Action<RecordActionResponse>;
edit: Action<RecordActionResponse>;
delete: Action<RecordActionResponse>;
new: Action<RecordActionResponse>;
list: Action<ListActionResponse>;
}
type UserComponentsMap = {[key: string]: string}
export type Adapter = { Database: typeof BaseDatabase; Resource: typeof BaseResource }
/**
* Main class for AdminBro extension. It takes {@link AdminBroOptions} as a
* parameter and creates an admin instance.
*
* Its main responsibility is to fetch all the resources and/or databases given by a
* user. Its instance is a currier - injected in all other classes.
*
* @example
* const AdminBro = require('admin-bro')
* const admin = new AdminBro(AdminBroOptions)
*/
class AdminBro {
public resources: Array<BaseResource>
public options: AdminBroOptionsWithDefault
public locale!: Locale
public i18n!: I18n
public translateFunctions!: TranslateFunctions
public static registeredAdapters: Array<Adapter>
/**
* Contains set of routes available within the application.
* It is used by external plugins.
*
* @example
* const { Router } = require('admin-bro')
* Router.routes.forEach(route => {
* // map your framework routes to admin-bro routes
* // see how `admin-bro-expressjs` plugin does it.
* })
*/
public static Router: RouterType
/**
* An abstract class for all Database Adapters.
* External adapters have to implement it.
*
* @example <caption>Creating Database Adapter for some ORM</caption>
* const { BaseDatabase } = require('admin-bro')
*
* class Database extends BaseDatabase {
* constructor(ormInstance) {
* this.ormInstance = ormInstance
* }
* resources() {
* // fetch resources from your orm and convert to BaseResource
* }
* }
*/
public static BaseDatabase: typeof BaseDatabase
/**
* Class representing all records. External adapters have to implement that or at least
* their {@link BaseResource} implementation should return records of this type.
*/
public static BaseRecord: typeof BaseRecord
/**
* An abstract class for all resources. External adapters have to implement that.
*/
public static BaseResource: typeof BaseResource
/**
* Class for all properties. External adapters have to implement that or at least
* their {@link BaseResource} implementation should return records of this type.
*/
public static BaseProperty: typeof BaseProperty
/**
* Filter object passed to find method of {@link BaseResource}.
* External adapters have to use it
*/
public static Filter: typeof Filter
/**
* Validation error which is thrown when record fails validation. External adapters have
* to use it, so AdminBro can print validation errors
*/
public static ValidationError: typeof ValidationError
/**
* List of all default actions. If you want to change the behavior for all actions like:
* _list_, _edit_, _show_, _delete_ and _bulkDelete_ you can do this here.
*
* @example <caption>Modifying accessibility rules for all show actions</caption>
* const { ACTIONS } = require('admin-bro')
* ACTIONS.show.isAccessible = () => {...}
*/
public static ACTIONS: ActionsMap
/**
* AdminBro version
*/
public static VERSION: string
/**
* List of all bundled components
*/
public static UserComponents: UserComponentsMap
/**
* @param {AdminBroOptions} options Options passed to AdminBro
*/
constructor(options: AdminBroOptions = {}) {
/**
* @type {BaseResource[]}
* @description List of all resources available for the AdminBro.
* They can be fetched with the {@link AdminBro#findResource} method
*/
this.resources = []
/**
* @type {AdminBroOptions}
* @description Options given by a user
*/
this.options = _.merge({}, defaults, options)
const defaultLogo = slash(path.join(this.options.rootPath, '/frontend/assets/logo-mini.svg'))
this.options.branding = this.options.branding || {}
this.options.branding.logo = this.options.branding.logo !== undefined
? this.options.branding.logo
: defaultLogo
this.initI18n()
const { databases, resources } = this.options
const resourcesFactory = new ResourcesFactory(this, AdminBro.registeredAdapters)
this.resources = resourcesFactory.buildResources({ databases, resources })
}
initI18n(): void {
this.locale = {
translations: combineTranslations(en.translations, this.options.locale?.translations),
language: this.options.locale?.language || en.language,
}
i18n.init({
lng: this.locale.language,
initImmediate: false, // loads translations immediately
resources: {
[this.locale.language]: {
translation: this.locale.translations,
},
},
})
// mixin translate functions to AdminBro instance so users will be able to
// call adminBro.translateMessage(...)
this.translateFunctions = createFunctions(i18n)
Object.getOwnPropertyNames(this.translateFunctions).forEach((translateFunctionName) => {
this[translateFunctionName] = this.translateFunctions[translateFunctionName]
})
}
/**
* Registers various database adapters written for AdminBro.
*
* @example
* const AdminBro = require('admin-bro')
* const MongooseAdapter = require('admin-bro-mongoose')
* AdminBro.registerAdapter(MongooseAdapter)
*
* @param {Object} options
* @param {typeof BaseDatabase} options.Database subclass of {@link BaseDatabase}
* @param {typeof BaseResource} options.Resource subclass of {@link BaseResource}
*/
static registerAdapter({ Database, Resource }: {
Database: typeof BaseDatabase;
Resource: typeof BaseResource;
}): void {
if (!Database || !Resource) {
throw new Error('Adapter has to have both Database and Resource')
}
// checking if both Database and Resource have at least isAdapterFor method
if (Database.isAdapterFor && Resource.isAdapterFor) {
AdminBro.registeredAdapters.push({ Database, Resource })
} else {
throw new Error('Adapter elements has to be a subclass of AdminBro.BaseResource and AdminBro.BaseDatabase')
}
}
/**
* Initializes AdminBro instance in production. This function should be called by
* all external plugins.
*/
async initialize(): Promise<void> {
if (process.env.NODE_ENV === 'production'
&& !(process.env.ADMIN_BRO_SKIP_BUNDLE === 'true')) {
// eslint-disable-next-line no-console
console.log('AdminBro: bundling user components...')
await userComponentsBundler(this, { write: true })
}
}
/**
* Watches for local changes in files imported via {@link AdminBro.bundle}.
* It doesn't work on production environment.
*
* @return {Promise<never>}
*/
async watch(): Promise<string | undefined> {
if (process.env.NODE_ENV !== 'production') {
return userComponentsBundler(this, { write: true, watch: true })
}
return undefined
}
/**
* Renders an entire login page with email and password fields
* using {@link Renderer}.
*
* Used by external plugins
*
* @param {Object} options
* @param {String} options.action Login form action url - it could be
* '/admin/login'
* @param {String} [options.errorMessage] Optional error message. When set,
* renderer will print this message in
* the form
* @return {Promise<string>} HTML of the rendered page
*/
async renderLogin({ action, errorMessage }): Promise<string> {
return loginTemplate(this, { action, errorMessage })
}
/**
* Returns resource base on its ID
*
* @example
* const User = admin.findResource('users')
* await User.findOne(userId)
*
* @param {String} resourceId ID of a resource defined under {@link BaseResource#id}
* @return {BaseResource} found resource
* @throws {Error} When resource with given id cannot be found
*/
findResource(resourceId): BaseResource |
/**
* Requires given .jsx/.tsx file, that it can be bundled to the frontend.
* It will be available under AdminBro.UserComponents[componentId].
*
* @param {String} src Path to a file containing react component.
*
* @return {String} componentId - uniq id of a component
*
* @example
* const adminBroOptions = {
* dashboard: {
* component: AdminBro.bundle('./path/to/component'),
* }
* }
*/
public static bundle(src: string): string {
const extensions = ['.jsx', '.js', '.ts', '.tsx']
let filePath = ''
const componentId = _.uniqueId('Component')
if (src[0] === '/') {
filePath = src
} else {
const stack = ((new Error()).stack || '').split('\n')
// Node = 8 shows stack like that: '(/path/to/file.ts:77:27)
const pathNode8 = stack[2].match(/\((.*):[0-9]+:[0-9]+\)/)
// Node >= 10 shows stack like that: 'at /path/to/file.ts:77:27
const pathNode10 = stack[2].match(/at (.*):[0-9]+:[0-9]+/)
if (!pathNode8 && !pathNode10) {
throw new Error('STACK does not have a file url. Check out if the node version >= 8')
}
const executionPath = (pathNode8 && pathNode8[1]) || (pathNode10 && pathNode10[1])
filePath = path.join(path.dirname(executionPath as string), src)
}
const { root, dir, name } = path.parse(filePath)
if (!extensions.find((ext) => {
const fileName = path.format({ root, dir, name, ext })
return fs.existsSync(fileName)
})) {
throw new ConfigurationError(`Given file "${src}", doesn't exist.`, 'AdminBro.html')
}
AdminBro.UserComponents[componentId] = path.format({ root, dir, name })
return componentId
}
}
AdminBro.UserComponents = {}
AdminBro.registeredAdapters = []
AdminBro.VERSION = VERSION
// eslint-disable-next-line @typescript-eslint/no-empty-interface
interface AdminBro extends TranslateFunctions {}
export const { registerAdapter } = AdminBro
export const { bundle } = AdminBro
export default AdminBro
| {
const resource = this.resources.find(m => m._decorated?.id() === resourceId)
if (!resource) {
throw new Error([
`There are no resources with given id: "${resourceId}"`,
'This is the list of all registered resources you can use:',
this.resources.map(r => r._decorated?.id() || r.id()).join(', '),
].join('\n'))
}
return resource
} | identifier_body |
admin-bro.ts | import * as _ from 'lodash'
import * as path from 'path'
import * as fs from 'fs'
import i18n, { i18n as I18n } from 'i18next'
import slash from 'slash'
import AdminBroOptions, { AdminBroOptionsWithDefault } from './admin-bro-options.interface'
import BaseResource from './backend/adapters/base-resource'
import BaseDatabase from './backend/adapters/base-database'
import BaseRecord from './backend/adapters/base-record'
import BaseProperty from './backend/adapters/base-property'
import Filter from './backend/utils/filter'
import ValidationError from './backend/utils/validation-error'
import ConfigurationError from './backend/utils/configuration-error'
import ResourcesFactory from './backend/utils/resources-factory'
import userComponentsBundler from './backend/bundler/user-components-bundler'
import { RouterType } from './backend/router'
import Action, { RecordActionResponse } from './backend/actions/action.interface'
import { DEFAULT_PATHS } from './constants'
import loginTemplate from './frontend/login-template'
import { ListActionResponse } from './backend/actions/list-action'
import { combineTranslations, Locale } from './locale/config'
import en from './locale/en'
import { TranslateFunctions, createFunctions } from './utils/translate-functions.factory'
const pkg = JSON.parse(fs.readFileSync(path.join(__dirname, '../package.json'), 'utf-8'))
export const VERSION = pkg.version
const defaults: AdminBroOptionsWithDefault = {
rootPath: DEFAULT_PATHS.rootPath,
logoutPath: DEFAULT_PATHS.logoutPath,
loginPath: DEFAULT_PATHS.loginPath,
databases: [],
resources: [],
branding: {
companyName: 'Company',
softwareBrothers: true,
},
dashboard: {},
assets: {
styles: [],
scripts: [],
},
pages: {},
}
type ActionsMap = {
show: Action<RecordActionResponse>;
edit: Action<RecordActionResponse>;
delete: Action<RecordActionResponse>;
new: Action<RecordActionResponse>;
list: Action<ListActionResponse>;
}
type UserComponentsMap = {[key: string]: string}
export type Adapter = { Database: typeof BaseDatabase; Resource: typeof BaseResource }
/**
* Main class for AdminBro extension. It takes {@link AdminBroOptions} as a
* parameter and creates an admin instance.
*
* Its main responsibility is to fetch all the resources and/or databases given by a
* user. Its instance is a currier - injected in all other classes.
*
* @example
* const AdminBro = require('admin-bro')
* const admin = new AdminBro(AdminBroOptions)
*/
class AdminBro {
public resources: Array<BaseResource>
public options: AdminBroOptionsWithDefault
public locale!: Locale
public i18n!: I18n
public translateFunctions!: TranslateFunctions
public static registeredAdapters: Array<Adapter>
/**
* Contains set of routes available within the application.
* It is used by external plugins.
*
* @example
* const { Router } = require('admin-bro')
* Router.routes.forEach(route => {
* // map your framework routes to admin-bro routes
* // see how `admin-bro-expressjs` plugin does it.
* })
*/
public static Router: RouterType
/**
* An abstract class for all Database Adapters.
* External adapters have to implement it.
*
* @example <caption>Creating Database Adapter for some ORM</caption>
* const { BaseDatabase } = require('admin-bro')
*
* class Database extends BaseDatabase {
* constructor(ormInstance) {
* this.ormInstance = ormInstance
* }
* resources() {
* // fetch resources from your orm and convert to BaseResource
* }
* }
*/
public static BaseDatabase: typeof BaseDatabase
/**
* Class representing all records. External adapters have to implement that or at least
* their {@link BaseResource} implementation should return records of this type.
*/
public static BaseRecord: typeof BaseRecord
/**
* An abstract class for all resources. External adapters have to implement that.
*/
public static BaseResource: typeof BaseResource
/**
* Class for all properties. External adapters have to implement that or at least
* their {@link BaseResource} implementation should return records of this type.
*/
public static BaseProperty: typeof BaseProperty
/**
* Filter object passed to find method of {@link BaseResource}.
* External adapters have to use it
*/
public static Filter: typeof Filter
/**
* Validation error which is thrown when record fails validation. External adapters have
* to use it, so AdminBro can print validation errors
*/
public static ValidationError: typeof ValidationError
/**
* List of all default actions. If you want to change the behavior for all actions like:
* _list_, _edit_, _show_, _delete_ and _bulkDelete_ you can do this here.
*
* @example <caption>Modifying accessibility rules for all show actions</caption>
* const { ACTIONS } = require('admin-bro')
* ACTIONS.show.isAccessible = () => {...}
*/
public static ACTIONS: ActionsMap
/**
* AdminBro version
*/
public static VERSION: string
/**
* List of all bundled components
*/
public static UserComponents: UserComponentsMap
/**
* @param {AdminBroOptions} options Options passed to AdminBro
*/
constructor(options: AdminBroOptions = {}) {
/**
* @type {BaseResource[]}
* @description List of all resources available for the AdminBro.
* They can be fetched with the {@link AdminBro#findResource} method
*/
this.resources = []
/**
* @type {AdminBroOptions}
* @description Options given by a user
*/
this.options = _.merge({}, defaults, options)
const defaultLogo = slash(path.join(this.options.rootPath, '/frontend/assets/logo-mini.svg'))
this.options.branding = this.options.branding || {}
this.options.branding.logo = this.options.branding.logo !== undefined
? this.options.branding.logo
: defaultLogo
this.initI18n()
const { databases, resources } = this.options
const resourcesFactory = new ResourcesFactory(this, AdminBro.registeredAdapters)
this.resources = resourcesFactory.buildResources({ databases, resources })
}
initI18n(): void {
this.locale = {
translations: combineTranslations(en.translations, this.options.locale?.translations),
language: this.options.locale?.language || en.language,
}
i18n.init({
lng: this.locale.language,
initImmediate: false, // loads translations immediately
resources: {
[this.locale.language]: {
translation: this.locale.translations,
},
},
})
// mixin translate functions to AdminBro instance so users will be able to
// call adminBro.translateMessage(...)
this.translateFunctions = createFunctions(i18n)
Object.getOwnPropertyNames(this.translateFunctions).forEach((translateFunctionName) => {
this[translateFunctionName] = this.translateFunctions[translateFunctionName]
})
}
/**
* Registers various database adapters written for AdminBro.
*
* @example
* const AdminBro = require('admin-bro')
* const MongooseAdapter = require('admin-bro-mongoose')
* AdminBro.registerAdapter(MongooseAdapter)
*
* @param {Object} options
* @param {typeof BaseDatabase} options.Database subclass of {@link BaseDatabase}
* @param {typeof BaseResource} options.Resource subclass of {@link BaseResource}
*/
static registerAdapter({ Database, Resource }: {
Database: typeof BaseDatabase;
Resource: typeof BaseResource;
}): void {
if (!Database || !Resource) {
throw new Error('Adapter has to have both Database and Resource')
}
// checking if both Database and Resource have at least isAdapterFor method
if (Database.isAdapterFor && Resource.isAdapterFor) {
AdminBro.registeredAdapters.push({ Database, Resource })
} else {
throw new Error('Adapter elements has to be a subclass of AdminBro.BaseResource and AdminBro.BaseDatabase')
}
}
/**
* Initializes AdminBro instance in production. This function should be called by
* all external plugins.
*/
async initialize(): Promise<void> {
if (process.env.NODE_ENV === 'production'
&& !(process.env.ADMIN_BRO_SKIP_BUNDLE === 'true')) {
// eslint-disable-next-line no-console
console.log('AdminBro: bundling user components...')
await userComponentsBundler(this, { write: true })
}
}
/**
* Watches for local changes in files imported via {@link AdminBro.bundle}.
* It doesn't work on production environment.
*
* @return {Promise<never>}
*/
async watch(): Promise<string | undefined> {
if (process.env.NODE_ENV !== 'production') {
return userComponentsBundler(this, { write: true, watch: true })
}
return undefined
}
/**
* Renders an entire login page with email and password fields
* using {@link Renderer}.
*
* Used by external plugins
*
* @param {Object} options
* @param {String} options.action Login form action url - it could be
* '/admin/login'
* @param {String} [options.errorMessage] Optional error message. When set,
* renderer will print this message in
* the form
* @return {Promise<string>} HTML of the rendered page
*/
async renderLogin({ action, errorMessage }): Promise<string> {
return loginTemplate(this, { action, errorMessage })
}
/**
* Returns resource base on its ID
*
* @example
* const User = admin.findResource('users')
* await User.findOne(userId)
*
* @param {String} resourceId ID of a resource defined under {@link BaseResource#id}
* @return {BaseResource} found resource
* @throws {Error} When resource with given id cannot be found
*/
findResource(resourceId): BaseResource {
const resource = this.resources.find(m => m._decorated?.id() === resourceId)
if (!resource) {
throw new Error([
`There are no resources with given id: "${resourceId}"`,
'This is the list of all registered resources you can use:',
this.resources.map(r => r._decorated?.id() || r.id()).join(', '),
].join('\n'))
}
return resource
}
/**
* Requires given .jsx/.tsx file, that it can be bundled to the frontend.
* It will be available under AdminBro.UserComponents[componentId].
*
* @param {String} src Path to a file containing react component.
*
* @return {String} componentId - uniq id of a component
*
* @example
* const adminBroOptions = {
* dashboard: {
* component: AdminBro.bundle('./path/to/component'),
* }
* }
*/
public static bundle(src: string): string {
const extensions = ['.jsx', '.js', '.ts', '.tsx']
let filePath = ''
const componentId = _.uniqueId('Component')
if (src[0] === '/') {
filePath = src
} else |
const { root, dir, name } = path.parse(filePath)
if (!extensions.find((ext) => {
const fileName = path.format({ root, dir, name, ext })
return fs.existsSync(fileName)
})) {
throw new ConfigurationError(`Given file "${src}", doesn't exist.`, 'AdminBro.html')
}
AdminBro.UserComponents[componentId] = path.format({ root, dir, name })
return componentId
}
}
AdminBro.UserComponents = {}
AdminBro.registeredAdapters = []
AdminBro.VERSION = VERSION
// eslint-disable-next-line @typescript-eslint/no-empty-interface
interface AdminBro extends TranslateFunctions {}
export const { registerAdapter } = AdminBro
export const { bundle } = AdminBro
export default AdminBro
| {
const stack = ((new Error()).stack || '').split('\n')
// Node = 8 shows stack like that: '(/path/to/file.ts:77:27)
const pathNode8 = stack[2].match(/\((.*):[0-9]+:[0-9]+\)/)
// Node >= 10 shows stack like that: 'at /path/to/file.ts:77:27
const pathNode10 = stack[2].match(/at (.*):[0-9]+:[0-9]+/)
if (!pathNode8 && !pathNode10) {
throw new Error('STACK does not have a file url. Check out if the node version >= 8')
}
const executionPath = (pathNode8 && pathNode8[1]) || (pathNode10 && pathNode10[1])
filePath = path.join(path.dirname(executionPath as string), src)
} | conditional_block |
admin-bro.ts | import * as _ from 'lodash'
import * as path from 'path'
import * as fs from 'fs'
import i18n, { i18n as I18n } from 'i18next'
import slash from 'slash'
import AdminBroOptions, { AdminBroOptionsWithDefault } from './admin-bro-options.interface'
import BaseResource from './backend/adapters/base-resource'
import BaseDatabase from './backend/adapters/base-database'
import BaseRecord from './backend/adapters/base-record'
import BaseProperty from './backend/adapters/base-property'
import Filter from './backend/utils/filter'
import ValidationError from './backend/utils/validation-error'
import ConfigurationError from './backend/utils/configuration-error'
import ResourcesFactory from './backend/utils/resources-factory'
import userComponentsBundler from './backend/bundler/user-components-bundler'
import { RouterType } from './backend/router'
import Action, { RecordActionResponse } from './backend/actions/action.interface'
import { DEFAULT_PATHS } from './constants'
import loginTemplate from './frontend/login-template'
import { ListActionResponse } from './backend/actions/list-action'
import { combineTranslations, Locale } from './locale/config'
import en from './locale/en'
import { TranslateFunctions, createFunctions } from './utils/translate-functions.factory'
const pkg = JSON.parse(fs.readFileSync(path.join(__dirname, '../package.json'), 'utf-8'))
export const VERSION = pkg.version
const defaults: AdminBroOptionsWithDefault = {
rootPath: DEFAULT_PATHS.rootPath,
logoutPath: DEFAULT_PATHS.logoutPath,
loginPath: DEFAULT_PATHS.loginPath,
databases: [],
resources: [],
branding: {
companyName: 'Company',
softwareBrothers: true,
},
dashboard: {},
assets: {
styles: [],
scripts: [],
},
pages: {},
}
type ActionsMap = {
show: Action<RecordActionResponse>;
edit: Action<RecordActionResponse>;
delete: Action<RecordActionResponse>;
new: Action<RecordActionResponse>;
list: Action<ListActionResponse>;
}
type UserComponentsMap = {[key: string]: string}
export type Adapter = { Database: typeof BaseDatabase; Resource: typeof BaseResource }
/**
* Main class for AdminBro extension. It takes {@link AdminBroOptions} as a
* parameter and creates an admin instance.
*
* Its main responsibility is to fetch all the resources and/or databases given by a
* user. Its instance is a currier - injected in all other classes.
*
* @example
* const AdminBro = require('admin-bro')
* const admin = new AdminBro(AdminBroOptions)
*/
class AdminBro {
public resources: Array<BaseResource>
public options: AdminBroOptionsWithDefault
public locale!: Locale
public i18n!: I18n
public translateFunctions!: TranslateFunctions
public static registeredAdapters: Array<Adapter>
/**
* Contains set of routes available within the application.
* It is used by external plugins.
*
* @example
* const { Router } = require('admin-bro')
* Router.routes.forEach(route => {
* // map your framework routes to admin-bro routes
* // see how `admin-bro-expressjs` plugin does it.
* })
*/
public static Router: RouterType
/**
* An abstract class for all Database Adapters.
* External adapters have to implement it.
*
* @example <caption>Creating Database Adapter for some ORM</caption>
* const { BaseDatabase } = require('admin-bro')
*
* class Database extends BaseDatabase {
* constructor(ormInstance) {
* this.ormInstance = ormInstance
* }
* resources() {
* // fetch resources from your orm and convert to BaseResource
* }
* }
*/
public static BaseDatabase: typeof BaseDatabase
/**
* Class representing all records. External adapters have to implement that or at least
* their {@link BaseResource} implementation should return records of this type.
*/
public static BaseRecord: typeof BaseRecord
/**
* An abstract class for all resources. External adapters have to implement that.
*/
public static BaseResource: typeof BaseResource
/**
* Class for all properties. External adapters have to implement that or at least
* their {@link BaseResource} implementation should return records of this type.
*/
public static BaseProperty: typeof BaseProperty
/**
* Filter object passed to find method of {@link BaseResource}.
* External adapters have to use it
*/
public static Filter: typeof Filter
/**
* Validation error which is thrown when record fails validation. External adapters have
* to use it, so AdminBro can print validation errors
*/
public static ValidationError: typeof ValidationError
/**
* List of all default actions. If you want to change the behavior for all actions like:
* _list_, _edit_, _show_, _delete_ and _bulkDelete_ you can do this here.
*
* @example <caption>Modifying accessibility rules for all show actions</caption>
* const { ACTIONS } = require('admin-bro')
* ACTIONS.show.isAccessible = () => {...}
*/
public static ACTIONS: ActionsMap
/**
* AdminBro version
*/
public static VERSION: string
/**
* List of all bundled components
*/
public static UserComponents: UserComponentsMap
/**
* @param {AdminBroOptions} options Options passed to AdminBro
*/
constructor(options: AdminBroOptions = {}) {
/**
* @type {BaseResource[]}
* @description List of all resources available for the AdminBro.
* They can be fetched with the {@link AdminBro#findResource} method
*/
this.resources = []
/**
* @type {AdminBroOptions}
* @description Options given by a user
*/
this.options = _.merge({}, defaults, options)
const defaultLogo = slash(path.join(this.options.rootPath, '/frontend/assets/logo-mini.svg'))
this.options.branding = this.options.branding || {}
this.options.branding.logo = this.options.branding.logo !== undefined
? this.options.branding.logo
: defaultLogo
this.initI18n()
const { databases, resources } = this.options
const resourcesFactory = new ResourcesFactory(this, AdminBro.registeredAdapters)
this.resources = resourcesFactory.buildResources({ databases, resources })
}
initI18n(): void {
this.locale = {
translations: combineTranslations(en.translations, this.options.locale?.translations),
language: this.options.locale?.language || en.language,
}
i18n.init({
lng: this.locale.language,
initImmediate: false, // loads translations immediately
resources: {
[this.locale.language]: {
translation: this.locale.translations,
},
},
})
// mixin translate functions to AdminBro instance so users will be able to
// call adminBro.translateMessage(...)
this.translateFunctions = createFunctions(i18n)
Object.getOwnPropertyNames(this.translateFunctions).forEach((translateFunctionName) => {
this[translateFunctionName] = this.translateFunctions[translateFunctionName]
})
}
/**
* Registers various database adapters written for AdminBro.
*
* @example
* const AdminBro = require('admin-bro')
* const MongooseAdapter = require('admin-bro-mongoose')
* AdminBro.registerAdapter(MongooseAdapter)
*
* @param {Object} options
* @param {typeof BaseDatabase} options.Database subclass of {@link BaseDatabase}
* @param {typeof BaseResource} options.Resource subclass of {@link BaseResource}
*/
static registerAdapter({ Database, Resource }: {
Database: typeof BaseDatabase;
Resource: typeof BaseResource;
}): void {
if (!Database || !Resource) {
throw new Error('Adapter has to have both Database and Resource')
}
// checking if both Database and Resource have at least isAdapterFor method
if (Database.isAdapterFor && Resource.isAdapterFor) {
AdminBro.registeredAdapters.push({ Database, Resource })
} else {
throw new Error('Adapter elements has to be a subclass of AdminBro.BaseResource and AdminBro.BaseDatabase')
}
}
/**
* Initializes AdminBro instance in production. This function should be called by
* all external plugins.
*/
async initialize(): Promise<void> {
if (process.env.NODE_ENV === 'production'
&& !(process.env.ADMIN_BRO_SKIP_BUNDLE === 'true')) {
// eslint-disable-next-line no-console
console.log('AdminBro: bundling user components...')
await userComponentsBundler(this, { write: true })
}
}
/**
* Watches for local changes in files imported via {@link AdminBro.bundle}.
* It doesn't work on production environment.
*
* @return {Promise<never>}
*/
async watch(): Promise<string | undefined> {
if (process.env.NODE_ENV !== 'production') {
return userComponentsBundler(this, { write: true, watch: true })
}
return undefined
}
/**
* Renders an entire login page with email and password fields
* using {@link Renderer}.
*
* Used by external plugins
*
* @param {Object} options
* @param {String} options.action Login form action url - it could be
* '/admin/login'
* @param {String} [options.errorMessage] Optional error message. When set,
* renderer will print this message in
* the form
* @return {Promise<string>} HTML of the rendered page
*/
async | ({ action, errorMessage }): Promise<string> {
return loginTemplate(this, { action, errorMessage })
}
/**
* Returns resource base on its ID
*
* @example
* const User = admin.findResource('users')
* await User.findOne(userId)
*
* @param {String} resourceId ID of a resource defined under {@link BaseResource#id}
* @return {BaseResource} found resource
* @throws {Error} When resource with given id cannot be found
*/
findResource(resourceId): BaseResource {
const resource = this.resources.find(m => m._decorated?.id() === resourceId)
if (!resource) {
throw new Error([
`There are no resources with given id: "${resourceId}"`,
'This is the list of all registered resources you can use:',
this.resources.map(r => r._decorated?.id() || r.id()).join(', '),
].join('\n'))
}
return resource
}
/**
* Requires given .jsx/.tsx file, that it can be bundled to the frontend.
* It will be available under AdminBro.UserComponents[componentId].
*
* @param {String} src Path to a file containing react component.
*
* @return {String} componentId - uniq id of a component
*
* @example
* const adminBroOptions = {
* dashboard: {
* component: AdminBro.bundle('./path/to/component'),
* }
* }
*/
public static bundle(src: string): string {
const extensions = ['.jsx', '.js', '.ts', '.tsx']
let filePath = ''
const componentId = _.uniqueId('Component')
if (src[0] === '/') {
filePath = src
} else {
const stack = ((new Error()).stack || '').split('\n')
// Node = 8 shows stack like that: '(/path/to/file.ts:77:27)
const pathNode8 = stack[2].match(/\((.*):[0-9]+:[0-9]+\)/)
// Node >= 10 shows stack like that: 'at /path/to/file.ts:77:27
const pathNode10 = stack[2].match(/at (.*):[0-9]+:[0-9]+/)
if (!pathNode8 && !pathNode10) {
throw new Error('STACK does not have a file url. Check out if the node version >= 8')
}
const executionPath = (pathNode8 && pathNode8[1]) || (pathNode10 && pathNode10[1])
filePath = path.join(path.dirname(executionPath as string), src)
}
const { root, dir, name } = path.parse(filePath)
if (!extensions.find((ext) => {
const fileName = path.format({ root, dir, name, ext })
return fs.existsSync(fileName)
})) {
throw new ConfigurationError(`Given file "${src}", doesn't exist.`, 'AdminBro.html')
}
AdminBro.UserComponents[componentId] = path.format({ root, dir, name })
return componentId
}
}
AdminBro.UserComponents = {}
AdminBro.registeredAdapters = []
AdminBro.VERSION = VERSION
// eslint-disable-next-line @typescript-eslint/no-empty-interface
interface AdminBro extends TranslateFunctions {}
export const { registerAdapter } = AdminBro
export const { bundle } = AdminBro
export default AdminBro
| renderLogin | identifier_name |
movies.go | package data
import (
"context"
"database/sql"
"errors"
"fmt"
"time"
"github.com/ahojo/greenlight/internal/validator"
"github.com/lib/pq"
)
var (
ErrRecordNotFound = errors.New("no record matching request")
ErrEditConflict = errors.New("edit conflict")
)
// Models wraps all of our database models
type Models struct {
Movies MovieModel
Users UserModel
Token TokenModel
Permissions PermissionModel
}
// Creates a Models that holds all of our database models.
func NewModels(db *sql.DB) Models {
return Models{
Movies: MovieModel{DB: db},
Users: UserModel{DB: db},
Token: TokenModel{DB: db},
Permissions: PermissionModel{DB: db},
}
}
// Movie data that we will reutrn as JSON
// The props all need to be exported
type Movie struct {
ID int64 `json:"id"` // Unique int ID for the movie
CreatedAt time.Time `json:"-"` // Timestamp for when the movie is added to our db - not relevant so "-" means to never show it.
Title string `json:"title"`
Year int32 `json:"int32,omitempty"` // Release year
// The Runtime MarshalJSON() receiver will be called now.
Runtime Runtime `json:"runtime,omitempty"` // omitempty means to not show it if there is no data.
// If you want to use omitempty and not change the key name then you can leave it blank in the struct tag — like this: json:",omitempty". Notice that the leading comma is still required.
Genres []string `json:"genres,omitempty"`
Version int32 `json:"version"` // incremented everytime the movie info is updated
}
func ValidateMovie(v *validator.Validator, movie *Movie) {
// Check() method to execute the validation checks. Adds the provided key and error message to the errors map.
v.Check(movie.Title != "", "title", "must be provided")
v.Check(len(movie.Title) <= 500, "title", "must not be more than 500 bytes long")
v.Check(movie.Runtime != 0, "runtime", "must be provided")
v.Check(movie.Runtime > 0, "runtime", "must be a positive integer")
v.Check(movie.Genres != nil, "genres", "must be provided")
v.Check(len(movie.Genres) >= 1, "genres", "must contain at least 1 genre")
v.Check(len(movie.Genres) <= 5, "genres", "must not contain more than 5 genres")
v.Check(validator.Unique(movie.Genres), "genres", "must not contain duplicate values")
v.Check(movie.Year != 0, "year", "must be provided")
v.Check(movie.Year >= 1888, "year", "must be greater than 1888")
v.Check(movie.Year <= int32(time.Now().Year()), "year", "must not be in the future")
}
// MovieModel wraps our db connection
type MovieModel struct {
DB *sql.DB
}
/* DATABASE QUERIES */
/**
GET METHODS
*/
// Get gets a specific movie from our database
func (m *MovieModel) Get(id int64) (*Movie, error) {
// The postgresql bigserial type starts autoincrementing at 1.
// No movies will have a value below 1.
if id < 1 {
return nil, ErrRecordNotFound
}
// Sql query
// pq_sleep(10) to simulate a long running query
// stmt := `SELECT pg_sleep(10),id,created_at,title,year,runtime,genres,version
// FROM movies
// WHERE id = $1`
stmt := `SELECT id,created_at,title,year,runtime,genres,version
FROM movies
WHERE id = $1`
// declare a movie
var movie Movie
// ctx.WithTimeout() funciton to carry a 3 second timeout deadline.
// emtpy context.Background() is the parent context
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
// IMPORTANT, use defer cancel() so we can cancel the context before Get() returns.
defer cancel()
// Execute the query NOTE: that we have to use pg.Array() here
// err := m.DB.QueryRow(stmt, id).Scan(
// &[]byte{}, // for the pg_sleep(10)
// &movie.ID,
// &movie.CreatedAt,
// &movie.Title,
// &movie.Year,
// &movie.Runtime,
// pq.Array(&movie.Genres),
// &movie.Version,
// )
err := m.DB.QueryRowContext(ctx, stmt, id).Scan(
&movie.ID,
&movie.CreatedAt,
&movie.Title,
&movie.Year,
&movie.Runtime,
pq.Array(&movie.Genres),
&movie.Version,
)
if err != nil {
switch {
case errors.Is(err, sql.ErrNoRows):
return nil, ErrRecordNotFound
default:
return nil, err
}
}
return &movie, err
}
// GetAll returns a slice of Movies.
func (m *MovieModel) GetAll(title string, gernres []string, filters Filters) ([]*Movie, Metadata, error) {
// Sql Query
// query := `
// SELECT id, created_at, title, year, runtime, genres, version
// FROM movies
// ORDER BY id
// `
/*
This SQL query is designed so that each of the filters behaves like it is ‘optional’. For
example, the condition (LOWER(title) = LOWER($1) OR $1 = '') will evaluate as true if
the placeholder parameter $1 is a case-insensitive match for the movie title or the
placeholder parameter equals ''. So this filter condition will essentially be ‘skipped’ when
movie title being searched for is the empty string "".
The (genres @> $2 OR $2 = '{}') condition works in the same way. The @> symbol is the
‘contains’ operator for PostgreSQL arrays, and this condition will return true if all values in
the placeholder parameter $2 are contained in the database genres field or the placeholder
parameter contains an empty array.
https://www.postgresql.org/docs/9.6/functions-array.html
*/
// query := `
// SELECT id, created_at, title, year, runtime, genres, version
// FROM movies
// WHERE (LOWER(title) = LOWER($1) OR $1 = '')
// AND (genres @> $2 OR $2 = '{}')
// ORDER BY id`
/* Add FULL TEXT SEARCH PostgreSQL feature
The to_tsvector('simple', title) function takes a movie title and splits it into lexemes.
We specify the simple configuration, which means that the lexemes are just lowercase
versions of the words in the title
The plainto_tsquery('simple', $1) function takes a search value and turns it into a
formatted query term.
It normalizes the search value (again using the simple configuration), strips any special characters, and
inserts the and operator & between the words.
The @@ operator is the matches operator. In our statement we are using it to check whether
the generated query term matches the lexemes.
*/
// query := `
// SELECT id, created_at, title, year, runtime, genres, version
// FROM movies
// WHERE (to_tsvector('simple', title) @@ plainto_tsquery('simple', $1) OR $1 = '')
// AND (genres @> $2 OR $2 = '{}')
// ORDER BY id
// `
/* could have also used ILIKE
SELECT id, created_at, title, year, runtime, genres, version
FROM movies
WHERE (title ILIKE $1 OR $1 = '')
AND (genres @> $2 OR $2 = '{}')
ORDER BY id
*/
// Add an ORDER BY clause and interpolate the sort column and direction. Importantly
// notice that we also include a secondary sort on the movie ID to ensure a
// consistent ordering.
// Added the window function to count the number of (filtered) records
query := fmt.Sprintf(`
SELECT COUNT(*) OVER(),id, created_at, title, year, runtime, genres, version
FROM movies
WHERE (to_tsvector('simple', title) @@ plainto_tsquery('simple', $1) OR $1 = '')
AND (genres @> $2 OR $2 = '{}')
ORDER BY %s %s, id ASC
LIMIT $3 OFFSET $4`, filters.sortColumn(), filters.sortDirection())
// 3 second context timeout
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
args := []interface{}{
title,
pq.Array(gernres),
filters.limit(),
filters.offset(),
}
// Get back the data from the database. Cancels if takes too long
// Title and genres have the default params.
rows, err := m.DB.QueryContext(ctx, query, args...)
if err != nil {
return nil, Metadata{}, err
}
// Make sure to close the rows stream return
defer rows.Close()
totalRecords := 0
// data structure to hold all of our movies
var movies = []*Movie{}
// Iterate through the rows returned
for rows.Next() {
var movie Movie
// Scan the values from the row into the Movie
// Note: pq.Array() again
err := rows.Scan(
&totalRecords, // Scan the count from the window function into total records
&movie.ID,
&movie.CreatedAt,
&movie.Title,
&movie.Year,
&movie.Runtime,
pq.Array(&movie.Genres),
&movie.Version,
)
if err != nil {
return nil, Metadata{}, err
}
movies = append(movies, &movie)
}
// When the rows.Next() finishes, if there is an error it's in rows.Err()
if err = rows.Err(); err != nil {
return nil, Metadata{}, err
}
// Generate a Metadata struct, passing in the total record count and pagination params from the client
metadata := calculateMetadata(totalRecords, filters.Page, filters.PageSize)
return movies, metadata, nil
}
// Insert inserts a new movie record
func (m *MovieModel) Insert(movie *Movie) error {
// Define the SQL query for inserting a new record in the movies table and returning the system generated data
query := `INSERT INTO movies (title, year, runtime, genres)
VALUES ($1, $2, $3, $4)
RETURNING id, created_at, version`
// Create an args slice containing the values for the placeholder parameters from the movie struct
args := []interface{}{movie.Title, movie.Year, movie.Runtime, pq.Array(movie.Genres)}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// Execute the query.
return m.DB.QueryRowContext(ctx, query, args...).Scan(&movie.ID, &movie.CreatedAt, &movie.Version)
}
// Update updates a specific movie from our database
func (m *MovieModel) Update(movie *Movie) error {
/* potential to use uuid here
UPDATE movies
SET title = $1, year = $2, runtime = $3, genres = $4, version = uuid_generate_v4()
WHERE id = $5 AND
**/
// Add version = $6, so we can stop race conditions
query := `
UPDATE movies
SET title = $1, year = $2, runtime = $3, genres = $4, version = version + 1
WHERE id = $5 AND version = $6
RETURNING version
`
// create the arg slice contaninig the values for the placeholder params.
args := []interface{}{
movie.Title,
movie.Year,
movie.Runtime,
pq.Array(movie.Genres),
movie.ID,
movie.Version, // Add the expected movie version
}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// If no matching row could be found (version has been changed)
err := m.DB.QueryRowContext(ctx, query, args...).Scan(&movie.Version)
if err != nil {
switch {
case errors.Is(err, sql.ErrNoRows):
return ErrEditConflict
default:
return err
}
}
return nil
}
// Delete
func (m *MovieModel) Delete(id int64) error {
// ids can't be less than 1
if id < 1 {
return ErrRecordNotFound
}
query := `
DELETE FROM movies
WHERE id = $1`
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// Returns sql.Result for how many rows affected
result, err := m.DB.ExecContext(ctx, query, id)
if err != nil {
return err
}
// Call the RowsAffected() to get the # of rows
rowsAffected, err := result.RowsAffected()
if err != nil {
return err
}
if rowsAffected == 0 {
return Err | }
| RecordNotFound
}
return nil
| conditional_block |
movies.go | package data
import (
"context"
"database/sql"
"errors"
"fmt"
"time"
"github.com/ahojo/greenlight/internal/validator"
"github.com/lib/pq"
)
var (
ErrRecordNotFound = errors.New("no record matching request")
ErrEditConflict = errors.New("edit conflict")
)
// Models wraps all of our database models
type Models struct {
Movies MovieModel
Users UserModel
Token TokenModel
Permissions PermissionModel
}
// Creates a Models that holds all of our database models.
func NewModels(db *sql.DB) Models {
return Models{
Movies: MovieModel{DB: db},
Users: UserModel{DB: db},
Token: TokenModel{DB: db},
Permissions: PermissionModel{DB: db},
}
}
// Movie data that we will reutrn as JSON
// The props all need to be exported
type Movie struct {
ID int64 `json:"id"` // Unique int ID for the movie
CreatedAt time.Time `json:"-"` // Timestamp for when the movie is added to our db - not relevant so "-" means to never show it.
Title string `json:"title"`
Year int32 `json:"int32,omitempty"` // Release year
// The Runtime MarshalJSON() receiver will be called now.
Runtime Runtime `json:"runtime,omitempty"` // omitempty means to not show it if there is no data.
// If you want to use omitempty and not change the key name then you can leave it blank in the struct tag — like this: json:",omitempty". Notice that the leading comma is still required.
Genres []string `json:"genres,omitempty"`
Version int32 `json:"version"` // incremented everytime the movie info is updated
}
func ValidateMovie(v *validator.Validator, movie *Movie) {
// Check() method to execute the validation checks. Adds the provided key and error message to the errors map.
v.Check(movie.Title != "", "title", "must be provided")
v.Check(len(movie.Title) <= 500, "title", "must not be more than 500 bytes long")
v.Check(movie.Runtime != 0, "runtime", "must be provided")
v.Check(movie.Runtime > 0, "runtime", "must be a positive integer")
v.Check(movie.Genres != nil, "genres", "must be provided")
v.Check(len(movie.Genres) >= 1, "genres", "must contain at least 1 genre")
v.Check(len(movie.Genres) <= 5, "genres", "must not contain more than 5 genres")
v.Check(validator.Unique(movie.Genres), "genres", "must not contain duplicate values")
v.Check(movie.Year != 0, "year", "must be provided")
v.Check(movie.Year >= 1888, "year", "must be greater than 1888")
v.Check(movie.Year <= int32(time.Now().Year()), "year", "must not be in the future")
}
// MovieModel wraps our db connection
type MovieModel struct {
DB *sql.DB
}
/* DATABASE QUERIES */
/**
GET METHODS
*/
// Get gets a specific movie from our database
func (m *MovieModel) Get(id int64) (*Movie, error) {
// The postgresql bigserial type starts autoincrementing at 1.
// No movies will have a value below 1.
if id < 1 {
return nil, ErrRecordNotFound
}
// Sql query
// pq_sleep(10) to simulate a long running query
// stmt := `SELECT pg_sleep(10),id,created_at,title,year,runtime,genres,version
// FROM movies
// WHERE id = $1`
stmt := `SELECT id,created_at,title,year,runtime,genres,version
FROM movies
WHERE id = $1`
// declare a movie
var movie Movie
// ctx.WithTimeout() funciton to carry a 3 second timeout deadline.
// emtpy context.Background() is the parent context
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
// IMPORTANT, use defer cancel() so we can cancel the context before Get() returns.
defer cancel()
// Execute the query NOTE: that we have to use pg.Array() here
// err := m.DB.QueryRow(stmt, id).Scan(
// &[]byte{}, // for the pg_sleep(10)
// &movie.ID,
// &movie.CreatedAt,
// &movie.Title,
// &movie.Year,
// &movie.Runtime,
// pq.Array(&movie.Genres),
// &movie.Version,
// )
err := m.DB.QueryRowContext(ctx, stmt, id).Scan(
&movie.ID,
&movie.CreatedAt,
&movie.Title,
&movie.Year,
&movie.Runtime,
pq.Array(&movie.Genres),
&movie.Version,
)
if err != nil {
switch {
case errors.Is(err, sql.ErrNoRows):
return nil, ErrRecordNotFound
default:
return nil, err
}
}
return &movie, err
}
// GetAll returns a slice of Movies.
func (m *MovieModel) GetAll(title string, gernres []string, filters Filters) ([]*Movie, Metadata, error) {
// Sql Query
// query := `
// SELECT id, created_at, title, year, runtime, genres, version
// FROM movies
// ORDER BY id
// `
/*
This SQL query is designed so that each of the filters behaves like it is ‘optional’. For
example, the condition (LOWER(title) = LOWER($1) OR $1 = '') will evaluate as true if
the placeholder parameter $1 is a case-insensitive match for the movie title or the
placeholder parameter equals ''. So this filter condition will essentially be ‘skipped’ when
movie title being searched for is the empty string "".
The (genres @> $2 OR $2 = '{}') condition works in the same way. The @> symbol is the
‘contains’ operator for PostgreSQL arrays, and this condition will return true if all values in
the placeholder parameter $2 are contained in the database genres field or the placeholder
parameter contains an empty array.
https://www.postgresql.org/docs/9.6/functions-array.html
*/
// query := `
// SELECT id, created_at, title, year, runtime, genres, version
// FROM movies
// WHERE (LOWER(title) = LOWER($1) OR $1 = '')
// AND (genres @> $2 OR $2 = '{}')
// ORDER BY id`
/* Add FULL TEXT SEARCH PostgreSQL feature
The to_tsvector('simple', title) function takes a movie title and splits it into lexemes.
We specify the simple configuration, which means that the lexemes are just lowercase
versions of the words in the title
The plainto_tsquery('simple', $1) function takes a search value and turns it into a
formatted query term.
It normalizes the search value (again using the simple configuration), strips any special characters, and
inserts the and operator & between the words.
The @@ operator is the matches operator. In our statement we are using it to check whether
the generated query term matches the lexemes.
*/
// query := `
// SELECT id, created_at, title, year, runtime, genres, version
// FROM movies
// WHERE (to_tsvector('simple', title) @@ plainto_tsquery('simple', $1) OR $1 = '')
// AND (genres @> $2 OR $2 = '{}')
// ORDER BY id
// `
/* could have also used ILIKE
SELECT id, created_at, title, year, runtime, genres, version
FROM movies
WHERE (title ILIKE $1 OR $1 = '')
AND (genres @> $2 OR $2 = '{}')
ORDER BY id
*/
// Add an ORDER BY clause and interpolate the sort column and direction. Importantly
// notice that we also include a secondary sort on the movie ID to ensure a
// consistent ordering.
// Added the window function to count the number of (filtered) records
query := fmt.Sprintf(`
SELECT COUNT(*) OVER(),id, created_at, title, year, runtime, genres, version
FROM movies
WHERE (to_tsvector('simple', title) @@ plainto_tsquery('simple', $1) OR $1 = '')
AND (genres @> $2 OR $2 = '{}')
ORDER BY %s %s, id ASC
LIMIT $3 OFFSET $4`, filters.sortColumn(), filters.sortDirection())
// 3 second context timeout
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
args := []interface{}{
title,
pq.Array(gernres),
filters.limit(),
filters.offset(),
}
// Get back the data from the database. Cancels if takes too long
// Title and genres have the default params.
rows, err := m.DB.QueryContext(ctx, query, args...)
if err != nil {
return nil, Metadata{}, err
}
// Make sure to close the rows stream return
defer rows.Close()
totalRecords := 0
// data structure to hold all of our movies
var movies = []*Movie{}
// Iterate through the rows returned
for rows.Next() {
var movie Movie
// Scan the values from the row into the Movie
// Note: pq.Array() again
err := rows.Scan(
&totalRecords, // Scan the count from the window function into total records
&movie.ID,
&movie.CreatedAt,
&movie.Title,
&movie.Year,
&movie.Runtime,
pq.Array(&movie.Genres),
&movie.Version,
)
if err != nil {
return nil, Metadata{}, err
}
movies = append(movies, &movie)
}
// When the rows.Next() finishes, if there is an error it's in rows.Err()
if err = rows.Err(); err != nil {
return nil, Metadata{}, err
}
// Generate a Metadata struct, passing in the total record count and pagination params from the client
metadata := calculateMetadata(totalRecords, filters.Page, filters.PageSize)
return movies, metadata, nil
}
// Insert inserts a new movie record
func (m *MovieModel) Insert(movie *Movie) error {
// Define the SQL query for inserting a new record in the movies table and returning the system generated data
query := `INSERT INTO movies (title, year, runtime, genres)
VALUES ($1, $2, $3, $4)
RETURNING id, created_at, version`
// Create an args slice containing the values for the placeholder parameters from the movie struct
args := []interface{}{movie.Title, movie.Year, movie.Runtime, pq.Array(movie.Genres)}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// Execute the query.
return m.DB.QueryRowContext(ctx, query, args...).Scan(&movie.ID, &movie.CreatedAt, &movie.Version)
}
// Update updates a specific movie from our database
func (m *MovieModel) Update(movie *Movie) error {
/* potential to use uuid here
UPDATE movies
SET title = $1, year = $2, runtime = $3, genres = $4, version = uuid_generate_v4()
WHERE id = $5 AND
**/
// Add version = $6, so we can stop race conditions
query := `
UPDATE movies
SET title = $1, year = $2, runtime = $3, genres = $4, version = version + 1
WHERE id = $5 AND version = $6
RETURNING version
`
// create the arg slice contaninig the values for the placeholder params.
args := []interface{}{
movie.Title,
movie.Year,
movie.Runtime,
pq.Array(movie.Genres),
movie.ID,
movie.Version, // Add the expected movie version
}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// If no matching row could be found (version has been changed)
err := m.DB.QueryRowContext(ctx, query, args...).Scan(&movie.Version)
if err != nil {
switch {
case errors.Is(err, sql.ErrNoRows):
return ErrEditConflict
default:
return err
}
}
return nil
}
// Delete
func (m *MovieModel) Delete(id int6 | or {
// ids can't be less than 1
if id < 1 {
return ErrRecordNotFound
}
query := `
DELETE FROM movies
WHERE id = $1`
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// Returns sql.Result for how many rows affected
result, err := m.DB.ExecContext(ctx, query, id)
if err != nil {
return err
}
// Call the RowsAffected() to get the # of rows
rowsAffected, err := result.RowsAffected()
if err != nil {
return err
}
if rowsAffected == 0 {
return ErrRecordNotFound
}
return nil
}
| 4) err | identifier_name |
movies.go | package data
import (
"context"
"database/sql"
"errors"
"fmt"
"time"
"github.com/ahojo/greenlight/internal/validator"
"github.com/lib/pq"
)
var (
ErrRecordNotFound = errors.New("no record matching request")
ErrEditConflict = errors.New("edit conflict")
)
// Models wraps all of our database models
type Models struct {
Movies MovieModel
Users UserModel
Token TokenModel
Permissions PermissionModel
}
// Creates a Models that holds all of our database models.
func NewModels(db *sql.DB) Models {
return Models{
Movies: MovieModel{DB: db},
Users: UserModel{DB: db},
Token: TokenModel{DB: db},
Permissions: PermissionModel{DB: db},
}
}
// Movie data that we will reutrn as JSON
// The props all need to be exported
type Movie struct {
ID int64 `json:"id"` // Unique int ID for the movie
CreatedAt time.Time `json:"-"` // Timestamp for when the movie is added to our db - not relevant so "-" means to never show it.
Title string `json:"title"`
Year int32 `json:"int32,omitempty"` // Release year
// The Runtime MarshalJSON() receiver will be called now.
Runtime Runtime `json:"runtime,omitempty"` // omitempty means to not show it if there is no data.
// If you want to use omitempty and not change the key name then you can leave it blank in the struct tag — like this: json:",omitempty". Notice that the leading comma is still required.
Genres []string `json:"genres,omitempty"`
Version int32 `json:"version"` // incremented everytime the movie info is updated
}
func ValidateMovie(v *validator.Validator, movie *Movie) {
// Check() method to execute the validation checks. Adds the provided key and error message to the errors map.
v.Check(movie.Title != "", "title", "must be provided")
v.Check(len(movie.Title) <= 500, "title", "must not be more than 500 bytes long")
v.Check(movie.Runtime != 0, "runtime", "must be provided")
v.Check(movie.Runtime > 0, "runtime", "must be a positive integer")
v.Check(movie.Genres != nil, "genres", "must be provided")
v.Check(len(movie.Genres) >= 1, "genres", "must contain at least 1 genre")
v.Check(len(movie.Genres) <= 5, "genres", "must not contain more than 5 genres")
v.Check(validator.Unique(movie.Genres), "genres", "must not contain duplicate values")
v.Check(movie.Year != 0, "year", "must be provided")
v.Check(movie.Year >= 1888, "year", "must be greater than 1888")
v.Check(movie.Year <= int32(time.Now().Year()), "year", "must not be in the future")
}
// MovieModel wraps our db connection
type MovieModel struct {
DB *sql.DB
}
/* DATABASE QUERIES */
/**
GET METHODS
*/
// Get gets a specific movie from our database
func (m *MovieModel) Get(id int64) (*Movie, error) {
// The postgresql bigserial type starts autoincrementing at 1.
// No movies will have a value below 1.
if id < 1 {
return nil, ErrRecordNotFound
}
// Sql query
// pq_sleep(10) to simulate a long running query
// stmt := `SELECT pg_sleep(10),id,created_at,title,year,runtime,genres,version
// FROM movies
// WHERE id = $1`
stmt := `SELECT id,created_at,title,year,runtime,genres,version
FROM movies
WHERE id = $1`
// declare a movie
var movie Movie
// ctx.WithTimeout() funciton to carry a 3 second timeout deadline.
// emtpy context.Background() is the parent context
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
// IMPORTANT, use defer cancel() so we can cancel the context before Get() returns.
defer cancel()
// Execute the query NOTE: that we have to use pg.Array() here
// err := m.DB.QueryRow(stmt, id).Scan(
// &[]byte{}, // for the pg_sleep(10)
// &movie.ID,
// &movie.CreatedAt,
// &movie.Title,
// &movie.Year,
// &movie.Runtime,
// pq.Array(&movie.Genres),
// &movie.Version,
// )
err := m.DB.QueryRowContext(ctx, stmt, id).Scan(
&movie.ID,
&movie.CreatedAt,
&movie.Title,
&movie.Year,
&movie.Runtime,
pq.Array(&movie.Genres),
&movie.Version,
)
if err != nil {
switch {
case errors.Is(err, sql.ErrNoRows):
return nil, ErrRecordNotFound
default:
return nil, err
}
}
return &movie, err
}
// GetAll returns a slice of Movies.
func (m *MovieModel) GetAll(title string, gernres []string, filters Filters) ([]*Movie, Metadata, error) {
// Sql Query
// query := `
// SELECT id, created_at, title, year, runtime, genres, version
// FROM movies
// ORDER BY id
// `
/*
This SQL query is designed so that each of the filters behaves like it is ‘optional’. For
example, the condition (LOWER(title) = LOWER($1) OR $1 = '') will evaluate as true if
the placeholder parameter $1 is a case-insensitive match for the movie title or the
placeholder parameter equals ''. So this filter condition will essentially be ‘skipped’ when
movie title being searched for is the empty string "".
The (genres @> $2 OR $2 = '{}') condition works in the same way. The @> symbol is the
‘contains’ operator for PostgreSQL arrays, and this condition will return true if all values in
the placeholder parameter $2 are contained in the database genres field or the placeholder
parameter contains an empty array.
https://www.postgresql.org/docs/9.6/functions-array.html
*/
// query := `
// SELECT id, created_at, title, year, runtime, genres, version
// FROM movies
// WHERE (LOWER(title) = LOWER($1) OR $1 = '')
// AND (genres @> $2 OR $2 = '{}')
// ORDER BY id`
/* Add FULL TEXT SEARCH PostgreSQL feature
The to_tsvector('simple', title) function takes a movie title and splits it into lexemes.
We specify the simple configuration, which means that the lexemes are just lowercase
versions of the words in the title
The plainto_tsquery('simple', $1) function takes a search value and turns it into a
formatted query term.
It normalizes the search value (again using the simple configuration), strips any special characters, and
inserts the and operator & between the words.
The @@ operator is the matches operator. In our statement we are using it to check whether
the generated query term matches the lexemes.
*/
// query := `
// SELECT id, created_at, title, year, runtime, genres, version
// FROM movies
// WHERE (to_tsvector('simple', title) @@ plainto_tsquery('simple', $1) OR $1 = '')
// AND (genres @> $2 OR $2 = '{}')
// ORDER BY id
// `
/* could have also used ILIKE
SELECT id, created_at, title, year, runtime, genres, version
FROM movies
WHERE (title ILIKE $1 OR $1 = '')
AND (genres @> $2 OR $2 = '{}')
ORDER BY id
*/
// Add an ORDER BY clause and interpolate the sort column and direction. Importantly
// notice that we also include a secondary sort on the movie ID to ensure a
// consistent ordering.
// Added the window function to count the number of (filtered) records
query := fmt.Sprintf(`
SELECT COUNT(*) OVER(),id, created_at, title, year, runtime, genres, version
FROM movies
WHERE (to_tsvector('simple', title) @@ plainto_tsquery('simple', $1) OR $1 = '')
AND (genres @> $2 OR $2 = '{}')
ORDER BY %s %s, id ASC
LIMIT $3 OFFSET $4`, filters.sortColumn(), filters.sortDirection())
// 3 second context timeout
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
args := []interface{}{
title,
pq.Array(gernres),
filters.limit(),
filters.offset(),
}
// Get back the data from the database. Cancels if takes too long
// Title and genres have the default params.
rows, err := m.DB.QueryContext(ctx, query, args...)
if err != nil {
return nil, Metadata{}, err
}
// Make sure to close the rows stream return
defer rows.Close()
totalRecords := 0
// data structure to hold all of our movies
var movies = []*Movie{}
// Iterate through the rows returned
for rows.Next() {
var movie Movie
// Scan the values from the row into the Movie
// Note: pq.Array() again
err := rows.Scan(
&totalRecords, // Scan the count from the window function into total records
&movie.ID,
&movie.CreatedAt,
&movie.Title,
&movie.Year,
&movie.Runtime,
pq.Array(&movie.Genres),
&movie.Version,
)
if err != nil {
return nil, Metadata{}, err
}
movies = append(movies, &movie)
}
// When the rows.Next() finishes, if there is an error it's in rows.Err()
if err = rows.Err(); err != nil {
return nil, Metadata{}, err
}
// Generate a Metadata struct, passing in the total record count and pagination params from the client
metadata := calculateMetadata(totalRecords, filters.Page, filters.PageSize)
return movies, metadata, nil
}
// Insert inserts a new movie record
func (m *MovieModel) Insert(movie *Movie) error {
// Define t | dates a specific movie from our database
func (m *MovieModel) Update(movie *Movie) error {
/* potential to use uuid here
UPDATE movies
SET title = $1, year = $2, runtime = $3, genres = $4, version = uuid_generate_v4()
WHERE id = $5 AND
**/
// Add version = $6, so we can stop race conditions
query := `
UPDATE movies
SET title = $1, year = $2, runtime = $3, genres = $4, version = version + 1
WHERE id = $5 AND version = $6
RETURNING version
`
// create the arg slice contaninig the values for the placeholder params.
args := []interface{}{
movie.Title,
movie.Year,
movie.Runtime,
pq.Array(movie.Genres),
movie.ID,
movie.Version, // Add the expected movie version
}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// If no matching row could be found (version has been changed)
err := m.DB.QueryRowContext(ctx, query, args...).Scan(&movie.Version)
if err != nil {
switch {
case errors.Is(err, sql.ErrNoRows):
return ErrEditConflict
default:
return err
}
}
return nil
}
// Delete
func (m *MovieModel) Delete(id int64) error {
// ids can't be less than 1
if id < 1 {
return ErrRecordNotFound
}
query := `
DELETE FROM movies
WHERE id = $1`
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// Returns sql.Result for how many rows affected
result, err := m.DB.ExecContext(ctx, query, id)
if err != nil {
return err
}
// Call the RowsAffected() to get the # of rows
rowsAffected, err := result.RowsAffected()
if err != nil {
return err
}
if rowsAffected == 0 {
return ErrRecordNotFound
}
return nil
}
| he SQL query for inserting a new record in the movies table and returning the system generated data
query := `INSERT INTO movies (title, year, runtime, genres)
VALUES ($1, $2, $3, $4)
RETURNING id, created_at, version`
// Create an args slice containing the values for the placeholder parameters from the movie struct
args := []interface{}{movie.Title, movie.Year, movie.Runtime, pq.Array(movie.Genres)}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// Execute the query.
return m.DB.QueryRowContext(ctx, query, args...).Scan(&movie.ID, &movie.CreatedAt, &movie.Version)
}
// Update up | identifier_body |
movies.go | package data
import (
"context"
"database/sql"
"errors"
"fmt"
"time"
"github.com/ahojo/greenlight/internal/validator"
"github.com/lib/pq"
)
var (
ErrRecordNotFound = errors.New("no record matching request")
ErrEditConflict = errors.New("edit conflict")
)
// Models wraps all of our database models
type Models struct {
Movies MovieModel
Users UserModel
Token TokenModel
Permissions PermissionModel
}
// Creates a Models that holds all of our database models.
func NewModels(db *sql.DB) Models {
return Models{
Movies: MovieModel{DB: db},
Users: UserModel{DB: db},
Token: TokenModel{DB: db},
Permissions: PermissionModel{DB: db},
}
}
// Movie data that we will reutrn as JSON
// The props all need to be exported
type Movie struct {
ID int64 `json:"id"` // Unique int ID for the movie
CreatedAt time.Time `json:"-"` // Timestamp for when the movie is added to our db - not relevant so "-" means to never show it.
Title string `json:"title"`
Year int32 `json:"int32,omitempty"` // Release year
// The Runtime MarshalJSON() receiver will be called now.
Runtime Runtime `json:"runtime,omitempty"` // omitempty means to not show it if there is no data.
// If you want to use omitempty and not change the key name then you can leave it blank in the struct tag — like this: json:",omitempty". Notice that the leading comma is still required.
Genres []string `json:"genres,omitempty"`
Version int32 `json:"version"` // incremented everytime the movie info is updated
}
func ValidateMovie(v *validator.Validator, movie *Movie) {
// Check() method to execute the validation checks. Adds the provided key and error message to the errors map.
v.Check(movie.Title != "", "title", "must be provided")
v.Check(len(movie.Title) <= 500, "title", "must not be more than 500 bytes long")
v.Check(movie.Runtime != 0, "runtime", "must be provided")
v.Check(movie.Runtime > 0, "runtime", "must be a positive integer")
v.Check(movie.Genres != nil, "genres", "must be provided")
v.Check(len(movie.Genres) >= 1, "genres", "must contain at least 1 genre")
v.Check(len(movie.Genres) <= 5, "genres", "must not contain more than 5 genres")
v.Check(validator.Unique(movie.Genres), "genres", "must not contain duplicate values")
v.Check(movie.Year != 0, "year", "must be provided")
v.Check(movie.Year >= 1888, "year", "must be greater than 1888")
v.Check(movie.Year <= int32(time.Now().Year()), "year", "must not be in the future")
}
// MovieModel wraps our db connection
type MovieModel struct {
DB *sql.DB
}
/* DATABASE QUERIES */
/**
GET METHODS
*/
// Get gets a specific movie from our database
func (m *MovieModel) Get(id int64) (*Movie, error) {
// The postgresql bigserial type starts autoincrementing at 1.
// No movies will have a value below 1.
if id < 1 {
return nil, ErrRecordNotFound
}
// Sql query
// pq_sleep(10) to simulate a long running query
// stmt := `SELECT pg_sleep(10),id,created_at,title,year,runtime,genres,version
// FROM movies
// WHERE id = $1`
stmt := `SELECT id,created_at,title,year,runtime,genres,version
FROM movies
WHERE id = $1`
// declare a movie
var movie Movie
// ctx.WithTimeout() funciton to carry a 3 second timeout deadline.
// emtpy context.Background() is the parent context
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
// IMPORTANT, use defer cancel() so we can cancel the context before Get() returns.
defer cancel()
// Execute the query NOTE: that we have to use pg.Array() here
// err := m.DB.QueryRow(stmt, id).Scan(
// &[]byte{}, // for the pg_sleep(10)
// &movie.ID,
// &movie.CreatedAt,
// &movie.Title,
// &movie.Year,
// &movie.Runtime,
// pq.Array(&movie.Genres),
// &movie.Version,
// )
err := m.DB.QueryRowContext(ctx, stmt, id).Scan(
&movie.ID,
&movie.CreatedAt,
&movie.Title,
&movie.Year,
&movie.Runtime,
pq.Array(&movie.Genres),
&movie.Version,
)
if err != nil {
switch {
case errors.Is(err, sql.ErrNoRows):
return nil, ErrRecordNotFound
default:
return nil, err
}
}
return &movie, err
}
// GetAll returns a slice of Movies.
func (m *MovieModel) GetAll(title string, gernres []string, filters Filters) ([]*Movie, Metadata, error) {
// Sql Query
// query := `
// SELECT id, created_at, title, year, runtime, genres, version
// FROM movies
// ORDER BY id
// `
/*
This SQL query is designed so that each of the filters behaves like it is ‘optional’. For
example, the condition (LOWER(title) = LOWER($1) OR $1 = '') will evaluate as true if
the placeholder parameter $1 is a case-insensitive match for the movie title or the
placeholder parameter equals ''. So this filter condition will essentially be ‘skipped’ when
movie title being searched for is the empty string "".
The (genres @> $2 OR $2 = '{}') condition works in the same way. The @> symbol is the
‘contains’ operator for PostgreSQL arrays, and this condition will return true if all values in
the placeholder parameter $2 are contained in the database genres field or the placeholder
parameter contains an empty array.
https://www.postgresql.org/docs/9.6/functions-array.html
*/
// query := `
// SELECT id, created_at, title, year, runtime, genres, version
// FROM movies
// WHERE (LOWER(title) = LOWER($1) OR $1 = '')
// AND (genres @> $2 OR $2 = '{}')
// ORDER BY id`
/* Add FULL TEXT SEARCH PostgreSQL feature
The to_tsvector('simple', title) function takes a movie title and splits it into lexemes.
We specify the simple configuration, which means that the lexemes are just lowercase
versions of the words in the title
The plainto_tsquery('simple', $1) function takes a search value and turns it into a
formatted query term.
It normalizes the search value (again using the simple configuration), strips any special characters, and
inserts the and operator & between the words.
The @@ operator is the matches operator. In our statement we are using it to check whether
the generated query term matches the lexemes.
*/
// query := `
// SELECT id, created_at, title, year, runtime, genres, version
// FROM movies
// WHERE (to_tsvector('simple', title) @@ plainto_tsquery('simple', $1) OR $1 = '')
// AND (genres @> $2 OR $2 = '{}')
// ORDER BY id
// `
/* could have also used ILIKE
SELECT id, created_at, title, year, runtime, genres, version
FROM movies
WHERE (title ILIKE $1 OR $1 = '')
AND (genres @> $2 OR $2 = '{}')
ORDER BY id
*/
// Add an ORDER BY clause and interpolate the sort column and direction. Importantly
// notice that we also include a secondary sort on the movie ID to ensure a
// consistent ordering.
// Added the window function to count the number of (filtered) records
query := fmt.Sprintf(`
SELECT COUNT(*) OVER(),id, created_at, title, year, runtime, genres, version
FROM movies
WHERE (to_tsvector('simple', title) @@ plainto_tsquery('simple', $1) OR $1 = '')
AND (genres @> $2 OR $2 = '{}')
ORDER BY %s %s, id ASC
LIMIT $3 OFFSET $4`, filters.sortColumn(), filters.sortDirection())
// 3 second context timeout
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
args := []interface{}{
title,
pq.Array(gernres),
filters.limit(),
filters.offset(),
}
// Get back the data from the database. Cancels if takes too long
// Title and genres have the default params.
rows, err := m.DB.QueryContext(ctx, query, args...)
if err != nil {
return nil, Metadata{}, err
}
// Make sure to close the rows stream return
defer rows.Close()
totalRecords := 0
// data structure to hold all of our movies
var movies = []*Movie{}
// Iterate through the rows returned
for rows.Next() {
var movie Movie
// Scan the values from the row into the Movie
// Note: pq.Array() again
err := rows.Scan(
&totalRecords, // Scan the count from the window function into total records
&movie.ID,
&movie.CreatedAt,
&movie.Title,
&movie.Year,
&movie.Runtime,
pq.Array(&movie.Genres),
&movie.Version,
)
if err != nil {
return nil, Metadata{}, err
}
movies = append(movies, &movie)
}
// When the rows.Next() finishes, if there is an error it's in rows.Err()
if err = rows.Err(); err != nil {
return nil, Metadata{}, err
}
// Generate a Metadata struct, passing in the total record count and pagination params from the client
metadata := calculateMetadata(totalRecords, filters.Page, filters.PageSize)
return movies, metadata, nil
}
// Insert inserts a new movie record
func (m *MovieModel) Insert(movie *Movie) error {
// Define the SQL query for inserting a new record in the movies table and returning the system generated data
query := `INSERT INTO movies (title, year, runtime, genres)
VALUES ($1, $2, $3, $4)
RETURNING id, created_at, version`
// Create an args slice containing the values for the placeholder parameters from the movie struct
args := []interface{}{movie.Title, movie.Year, movie.Runtime, pq.Array(movie.Genres)}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// Execute the query.
return m.DB.QueryRowContext(ctx, query, args...).Scan(&movie.ID, &movie.CreatedAt, &movie.Version)
}
// Update updates a specific movie from our database
func (m *MovieModel) Update(movie *Movie) error {
/* potential to use uuid here
UPDATE movies
SET title = $1, year = $2, runtime = $3, genres = $4, version = uuid_generate_v4()
WHERE id = $5 AND
**/
// Add version = $6, so we can stop race conditions
query := `
UPDATE movies
SET title = $1, year = $2, runtime = $3, genres = $4, version = version + 1
WHERE id = $5 AND version = $6
RETURNING version
`
// create the arg slice contaninig the values for the placeholder params.
args := []interface{}{
movie.Title,
movie.Year,
movie.Runtime,
pq.Array(movie.Genres),
movie.ID,
movie.Version, // Add the expected movie version
}
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// If no matching row could be found (version has been changed)
err := m.DB.QueryRowContext(ctx, query, args...).Scan(&movie.Version)
if err != nil {
switch {
case errors.Is(err, sql.ErrNoRows):
return ErrEditConflict
default:
return err
}
}
return nil
}
// Delete
func (m *MovieModel) Delete(id int64) error {
// ids can't be less than 1
if id < 1 {
return ErrRecordNotFound
}
query := `
DELETE FROM movies
WHERE id = $1`
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
// Returns sql.Result for how many rows affected
result, err := m.DB.ExecContext(ctx, query, id)
if err != nil {
return err
}
// Call the RowsAffected() to get the # of rows
rowsAffected, err := result.RowsAffected()
if err != nil {
return err
}
| if rowsAffected == 0 {
return ErrRecordNotFound
}
return nil
} | random_line_split | |
GetSolStats.py | #!/usr/bin/python
# GetSolStats.py
#
# ABOUT
# Issue SEMP Requesst to get stats form Solace Router/VMR
# Generate CSV file with the following info:
# TIMESTAMP
# INGRESS_MSG_RATE
# EGRESS_MSG_RATE
# INGRESS_BYTES_RATE
# EGRESS_BYTES_RATE
# INGRESS_DISCARDS
# EGRESS_DISCARDS
#
# LIMITATIONS
# Not much error checking done
# Tested for SolTR 7.1.1 & 7.2 only
#
# HISTORY
# - Nov 29, 2016: nram (Solace PSG)
# Initial version
import argparse
import xml.etree.ElementTree as ET
import httplib, base64
import string, re
import time
import os.path
Verbose = 0
#----------------------------------------------------------------------------
# some log helpers
# TODO: use logger instead
#
def vprint (s):
global Verbose
if Verbose > 0:
print s
def dprint (s):
global Verbose
if Verbose > 2:
print '---\n', s
#----------------------------------------------------------------------------
# HTTP utils
# TODO: move to a lib
#
def open_http( url, user, passwd):
|
def post_http (req, url = '/SEMP'):
global Hdrs
dprint ("request: %s" % req)
dprint ("Posting to URL %s" % url)
Conn.request("POST", url, req, Hdrs)
res = Conn.getresponse()
if not res:
raise Exception ("No SEMP response")
resp = res.read()
if resp is None:
raise Exception ("Null SEMP response")
return None
return resp
#------------------------------------------------------------------------
# SEMP helpers
#
def read_semp_req(fname):
global SEMP_VERSION
global SEMP_DIR
sempfile = "%s/%s/%s" % (SEMP_DIR, SEMP_VERSION, fname)
vprint (" Reading file: %s" % sempfile )
try:
f = open(sempfile , 'r')
if not f:
raise Exception('Unable to open file', sempfile )
req = f.read()
dprint ("SEMP req template = %s" % req)
f.close()
return req
except IOError as e:
print (e)
raise e
except:
print ('Unexpected exception %s' % sys.exc_info()[0])
raise
#--------------------------------------------------------------------------------------
# Main
#--------------------------------------------------------------------------------------
p = argparse.ArgumentParser ()
pr = p.add_argument_group("Required Arguments")
pr.add_argument('--id', action='store', required=True, help='Test ID')
pr.add_argument('--url', action='store', required=True, help='Router/VPN URL in IP:PORT')
pr.add_argument('--vpn', action='store', required=True, help='VPN Name')
p.add_argument('--user', action='store', default='admin', help='Admin CLI username (default: admin)')
p.add_argument('--passwd', action='store', default='admin', help='CLI password (default: admin)')
p.add_argument('--outdir', action='store', default='out', help='Dir for output (default: out)')
p.add_argument('--sempdir', action='store', default='SEMP/Templates', help='SEMP template dir (default: SEMP/Templates)')
p.add_argument('--sempver', action='store', default='7_2', help='SEMP Version (default: 7_2)')
p.add_argument('--interval', action='store', default='5', help='Sampling interval (default: 5 seconds)')
p.add_argument('--samples', action='store', default='10', help='Max samples (default: 10)')
p.add_argument('-v','--verbose', action='count', help='Verbose mode (-vvv for debug)')
r = p.parse_args()
Verbose = r.verbose
SEMP_DIR = r.sempdir
SEMP_VERSION = r.sempver
nap = float(r.interval)
#--------------------------------------------------------------------
# Parse SEMP response file
#
#print ('Processing SEMP output file %s' % r.file)
#with open(r.file, 'r') as fd_semp:
# xmlstr = fd_semp.read()
#fd_semp.close()
# Open HTTP connection to router and get the SEMP directly
Conn = open_http (r.url, r.user, r.passwd)
#print ("Sending 'show all client message-spool detail' for vpn: %s" % r.vpn)
#semp_req = '<rpc semp-version="soltr/%s"> <show> <client> <name>*</name> <vpn-name>%s</vpn-name> <message-spool></message-spool> <detail></detail> </client> </show> </rpc>' % (SEMP_VERSION, r.vpn)
#dprint ("SEMP Req: %s" % semp_req)
#semp_resp = post_http (semp_req)
#dprint ("SEMP RESPONSE (1) : %s" % semp_resp)
#semp_req = read_semp_req ('show/version.xml') % (SEMP_VERSION)
#dprint ("SEMP Req: %s" % semp_req)
#semp_resp = post_http (semp_req)
#dprint ("SEMP RESPONSE (2): %s" % semp_resp)
#semp_req = read_semp_req ('show/client_stats.xml') % (SEMP_VERSION)
#dprint ("SEMP REQUEST (client stats): %s" % semp_req)
#semp_resp = post_http (semp_req)
#dprint ("SEMP RESPONSE (client stats): %s" % semp_resp)
# open CSV output file
#csvfile = "%s/client_info_%s.csv" % (r.outdir, re.sub('[\.:]','_',r.url))
csvfile = "%s/sol_stats_%s.csv" % (r.outdir, r.id)
print ('Writing to CSV file %s' % csvfile)
stats = {}
# Write header to CSV file
if not os.path.exists(csvfile):
with open(csvfile, 'w') as fd_csv:
print >>fd_csv, "#ID:%s" % (r.id)
print >>fd_csv, "%s,%s,%s,%s,%s,%s,%s,%s" % ('TIMESTAMP',\
'INGRESS_MSG_RATE',\
'EGRESS_MSG_RATE',\
'INGRESS_BYTES_RATE',\
'EGRESS_BYTES_RATE',\
'INGRESS_DISCARDS',\
'EGRESS_DISCARDS',\
'MSGS_SPOOLED')
fd_csv.close()
# Gather and save stats
vprint ("Collecting %s stats every %s seconds" % (r.samples, nap))
n = 0
while (n < int(r.samples)):
stats['timestamp'] = time.strftime("%Y%m%d %H:%M:%S")
print ("%-3d/%-3s) %s" % (n+1, r.samples, stats['timestamp']))
# Post SEMP Request -- vpn_stats
print (' Processing SEMP Request %s' % 'show/vpn_stats.xml')
semp_req = read_semp_req ('show/vpn_stats.xml') % (SEMP_VERSION, r.vpn)
dprint ("SEMP REQUEST (VPN stats): %s" % semp_req)
semp_resp = post_http (semp_req)
# Save SEMP response
dprint ("SEMP RESPONSE (VPN stats): %s" % semp_resp)
respfile = "%s/vpn_stats_%s.xml" % ('out/semp', time.strftime("%Y%m%d_%H%M%S"))
vprint (' Writing SEMP Response to file %s' % respfile)
with open(respfile, 'w') as fd_semp:
print >>fd_semp, semp_resp
fd_semp.close()
# Process SEMP Reponse XML
xmlroot = ET.fromstring(semp_resp)
en_stats = './rpc/show/message-vpn/vpn/stats'
e_stats = xmlroot.find(en_stats)
for tag in ['current-ingress-rate-per-second', \
'current-egress-rate-per-second', \
'current-ingress-byte-rate-per-second', \
'current-egress-byte-rate-per-second', \
'ingress-discards/total-ingress-discards', \
'egress-discards/total-egress-discards' ]:
stats[tag] = e_stats.find(tag).text
vprint (" %-40s: %s" % (tag, stats[tag]))
# Post SEMP Request -- vpn spool stats
print (' Processing SEMP Request %s' % 'show/vpn_spool_stats.xml')
semp_req = read_semp_req ('show/vpn_spool_stats.xml') % (SEMP_VERSION, r.vpn)
dprint ("SEMP REQUEST (VPN spool stats): %s" % semp_req)
semp_resp = post_http (semp_req)
# Save SEMP response
dprint ("SEMP RESPONSE (VPN spool stats): %s" % semp_resp)
respfile = "%s/vpn_spool_stats_%s.xml" % ('out/semp', time.strftime("%Y%m%d_%H%M%S"))
vprint (' Writing SEMP Response to file %s' % respfile)
with open(respfile, 'w') as fd_semp:
print >>fd_semp, semp_resp
fd_semp.close()
# Process SEMP Reponse XML
xmlroot = ET.fromstring(semp_resp)
en_stats = './rpc/show/message-spool/message-spool-stats'
e_stats = xmlroot.find(en_stats)
for tag in ['spooled-to-adb' ]:
stats[tag] = e_stats.find(tag).text
vprint (" %-40s: %s" % (tag, stats[tag]))
# Post SEMP Request -- vpn spool detail
print (' Processing SEMP Request %s' % 'show/vpn_spool_detail.xml')
semp_req = read_semp_req ('show/vpn_spool_detail.xml') % (SEMP_VERSION, r.vpn)
dprint ("SEMP REQUEST (VPN spool stats): %s" % semp_req)
semp_resp = post_http (semp_req)
# Save SEMP response
dprint ("SEMP RESPONSE (VPN spool stats): %s" % semp_resp)
respfile = "%s/vpn_spool_detail_%s.xml" % ('out/semp', time.strftime("%Y%m%d_%H%M%S"))
vprint (' Writing SEMP Response to file %s' % respfile)
with open(respfile, 'w') as fd_semp:
print >>fd_semp, semp_resp
fd_semp.close()
# Process SEMP Reponse XML
xmlroot = ET.fromstring(semp_resp)
en_stats = './rpc/show/message-spool/message-vpn/vpn'
e_stats = xmlroot.find(en_stats)
for tag in ['current-messages-spooled']:
stats[tag] = e_stats.find(tag).text
vprint (" %-40s: %s" % (tag, stats[tag]))
# Append stats to CSV file
with open(csvfile, 'a') as fd_csv:
print >>fd_csv, "%s,%s,%s,%s,%s,%s,%s,%s" % (stats['timestamp'],
stats['current-ingress-rate-per-second'], \
stats['current-egress-rate-per-second'], \
stats['current-ingress-byte-rate-per-second'], \
stats['current-ingress-byte-rate-per-second'], \
stats['ingress-discards/total-ingress-discards'], \
stats['egress-discards/total-egress-discards'], \
stats['current-messages-spooled'], \
)
dprint (stats )
time.sleep(nap)
n = n+1
fd_csv.close()
| global Hdrs
auth = string.strip(base64.encodestring(user+":"+passwd))
Hdrs = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
Hdrs["Authorization"] = "Basic %s" % auth
print ("Opening HTTP connection to [%s]" % url)
dprint ("Headers: %s" % Hdrs.items())
try:
conn = httplib.HTTPConnection(url)
except httplib.InvalidURL as e:
print (e)
raise
except:
print ("Unexpected exception: %s" % sys.exc_info()[0])
raise
return conn | identifier_body |
GetSolStats.py | #!/usr/bin/python
# GetSolStats.py
#
# ABOUT
# Issue SEMP Requesst to get stats form Solace Router/VMR
# Generate CSV file with the following info:
# TIMESTAMP
# INGRESS_MSG_RATE
# EGRESS_MSG_RATE
# INGRESS_BYTES_RATE
# EGRESS_BYTES_RATE
# INGRESS_DISCARDS
# EGRESS_DISCARDS
#
# LIMITATIONS
# Not much error checking done
# Tested for SolTR 7.1.1 & 7.2 only
#
# HISTORY
# - Nov 29, 2016: nram (Solace PSG)
# Initial version
import argparse
import xml.etree.ElementTree as ET
import httplib, base64
import string, re
import time
import os.path
Verbose = 0
#----------------------------------------------------------------------------
# some log helpers
# TODO: use logger instead
#
def | (s):
global Verbose
if Verbose > 0:
print s
def dprint (s):
global Verbose
if Verbose > 2:
print '---\n', s
#----------------------------------------------------------------------------
# HTTP utils
# TODO: move to a lib
#
def open_http( url, user, passwd):
global Hdrs
auth = string.strip(base64.encodestring(user+":"+passwd))
Hdrs = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
Hdrs["Authorization"] = "Basic %s" % auth
print ("Opening HTTP connection to [%s]" % url)
dprint ("Headers: %s" % Hdrs.items())
try:
conn = httplib.HTTPConnection(url)
except httplib.InvalidURL as e:
print (e)
raise
except:
print ("Unexpected exception: %s" % sys.exc_info()[0])
raise
return conn
def post_http (req, url = '/SEMP'):
global Hdrs
dprint ("request: %s" % req)
dprint ("Posting to URL %s" % url)
Conn.request("POST", url, req, Hdrs)
res = Conn.getresponse()
if not res:
raise Exception ("No SEMP response")
resp = res.read()
if resp is None:
raise Exception ("Null SEMP response")
return None
return resp
#------------------------------------------------------------------------
# SEMP helpers
#
def read_semp_req(fname):
global SEMP_VERSION
global SEMP_DIR
sempfile = "%s/%s/%s" % (SEMP_DIR, SEMP_VERSION, fname)
vprint (" Reading file: %s" % sempfile )
try:
f = open(sempfile , 'r')
if not f:
raise Exception('Unable to open file', sempfile )
req = f.read()
dprint ("SEMP req template = %s" % req)
f.close()
return req
except IOError as e:
print (e)
raise e
except:
print ('Unexpected exception %s' % sys.exc_info()[0])
raise
#--------------------------------------------------------------------------------------
# Main
#--------------------------------------------------------------------------------------
p = argparse.ArgumentParser ()
pr = p.add_argument_group("Required Arguments")
pr.add_argument('--id', action='store', required=True, help='Test ID')
pr.add_argument('--url', action='store', required=True, help='Router/VPN URL in IP:PORT')
pr.add_argument('--vpn', action='store', required=True, help='VPN Name')
p.add_argument('--user', action='store', default='admin', help='Admin CLI username (default: admin)')
p.add_argument('--passwd', action='store', default='admin', help='CLI password (default: admin)')
p.add_argument('--outdir', action='store', default='out', help='Dir for output (default: out)')
p.add_argument('--sempdir', action='store', default='SEMP/Templates', help='SEMP template dir (default: SEMP/Templates)')
p.add_argument('--sempver', action='store', default='7_2', help='SEMP Version (default: 7_2)')
p.add_argument('--interval', action='store', default='5', help='Sampling interval (default: 5 seconds)')
p.add_argument('--samples', action='store', default='10', help='Max samples (default: 10)')
p.add_argument('-v','--verbose', action='count', help='Verbose mode (-vvv for debug)')
r = p.parse_args()
Verbose = r.verbose
SEMP_DIR = r.sempdir
SEMP_VERSION = r.sempver
nap = float(r.interval)
#--------------------------------------------------------------------
# Parse SEMP response file
#
#print ('Processing SEMP output file %s' % r.file)
#with open(r.file, 'r') as fd_semp:
# xmlstr = fd_semp.read()
#fd_semp.close()
# Open HTTP connection to router and get the SEMP directly
Conn = open_http (r.url, r.user, r.passwd)
#print ("Sending 'show all client message-spool detail' for vpn: %s" % r.vpn)
#semp_req = '<rpc semp-version="soltr/%s"> <show> <client> <name>*</name> <vpn-name>%s</vpn-name> <message-spool></message-spool> <detail></detail> </client> </show> </rpc>' % (SEMP_VERSION, r.vpn)
#dprint ("SEMP Req: %s" % semp_req)
#semp_resp = post_http (semp_req)
#dprint ("SEMP RESPONSE (1) : %s" % semp_resp)
#semp_req = read_semp_req ('show/version.xml') % (SEMP_VERSION)
#dprint ("SEMP Req: %s" % semp_req)
#semp_resp = post_http (semp_req)
#dprint ("SEMP RESPONSE (2): %s" % semp_resp)
#semp_req = read_semp_req ('show/client_stats.xml') % (SEMP_VERSION)
#dprint ("SEMP REQUEST (client stats): %s" % semp_req)
#semp_resp = post_http (semp_req)
#dprint ("SEMP RESPONSE (client stats): %s" % semp_resp)
# open CSV output file
#csvfile = "%s/client_info_%s.csv" % (r.outdir, re.sub('[\.:]','_',r.url))
csvfile = "%s/sol_stats_%s.csv" % (r.outdir, r.id)
print ('Writing to CSV file %s' % csvfile)
stats = {}
# Write header to CSV file
if not os.path.exists(csvfile):
with open(csvfile, 'w') as fd_csv:
print >>fd_csv, "#ID:%s" % (r.id)
print >>fd_csv, "%s,%s,%s,%s,%s,%s,%s,%s" % ('TIMESTAMP',\
'INGRESS_MSG_RATE',\
'EGRESS_MSG_RATE',\
'INGRESS_BYTES_RATE',\
'EGRESS_BYTES_RATE',\
'INGRESS_DISCARDS',\
'EGRESS_DISCARDS',\
'MSGS_SPOOLED')
fd_csv.close()
# Gather and save stats
vprint ("Collecting %s stats every %s seconds" % (r.samples, nap))
n = 0
while (n < int(r.samples)):
stats['timestamp'] = time.strftime("%Y%m%d %H:%M:%S")
print ("%-3d/%-3s) %s" % (n+1, r.samples, stats['timestamp']))
# Post SEMP Request -- vpn_stats
print (' Processing SEMP Request %s' % 'show/vpn_stats.xml')
semp_req = read_semp_req ('show/vpn_stats.xml') % (SEMP_VERSION, r.vpn)
dprint ("SEMP REQUEST (VPN stats): %s" % semp_req)
semp_resp = post_http (semp_req)
# Save SEMP response
dprint ("SEMP RESPONSE (VPN stats): %s" % semp_resp)
respfile = "%s/vpn_stats_%s.xml" % ('out/semp', time.strftime("%Y%m%d_%H%M%S"))
vprint (' Writing SEMP Response to file %s' % respfile)
with open(respfile, 'w') as fd_semp:
print >>fd_semp, semp_resp
fd_semp.close()
# Process SEMP Reponse XML
xmlroot = ET.fromstring(semp_resp)
en_stats = './rpc/show/message-vpn/vpn/stats'
e_stats = xmlroot.find(en_stats)
for tag in ['current-ingress-rate-per-second', \
'current-egress-rate-per-second', \
'current-ingress-byte-rate-per-second', \
'current-egress-byte-rate-per-second', \
'ingress-discards/total-ingress-discards', \
'egress-discards/total-egress-discards' ]:
stats[tag] = e_stats.find(tag).text
vprint (" %-40s: %s" % (tag, stats[tag]))
# Post SEMP Request -- vpn spool stats
print (' Processing SEMP Request %s' % 'show/vpn_spool_stats.xml')
semp_req = read_semp_req ('show/vpn_spool_stats.xml') % (SEMP_VERSION, r.vpn)
dprint ("SEMP REQUEST (VPN spool stats): %s" % semp_req)
semp_resp = post_http (semp_req)
# Save SEMP response
dprint ("SEMP RESPONSE (VPN spool stats): %s" % semp_resp)
respfile = "%s/vpn_spool_stats_%s.xml" % ('out/semp', time.strftime("%Y%m%d_%H%M%S"))
vprint (' Writing SEMP Response to file %s' % respfile)
with open(respfile, 'w') as fd_semp:
print >>fd_semp, semp_resp
fd_semp.close()
# Process SEMP Reponse XML
xmlroot = ET.fromstring(semp_resp)
en_stats = './rpc/show/message-spool/message-spool-stats'
e_stats = xmlroot.find(en_stats)
for tag in ['spooled-to-adb' ]:
stats[tag] = e_stats.find(tag).text
vprint (" %-40s: %s" % (tag, stats[tag]))
# Post SEMP Request -- vpn spool detail
print (' Processing SEMP Request %s' % 'show/vpn_spool_detail.xml')
semp_req = read_semp_req ('show/vpn_spool_detail.xml') % (SEMP_VERSION, r.vpn)
dprint ("SEMP REQUEST (VPN spool stats): %s" % semp_req)
semp_resp = post_http (semp_req)
# Save SEMP response
dprint ("SEMP RESPONSE (VPN spool stats): %s" % semp_resp)
respfile = "%s/vpn_spool_detail_%s.xml" % ('out/semp', time.strftime("%Y%m%d_%H%M%S"))
vprint (' Writing SEMP Response to file %s' % respfile)
with open(respfile, 'w') as fd_semp:
print >>fd_semp, semp_resp
fd_semp.close()
# Process SEMP Reponse XML
xmlroot = ET.fromstring(semp_resp)
en_stats = './rpc/show/message-spool/message-vpn/vpn'
e_stats = xmlroot.find(en_stats)
for tag in ['current-messages-spooled']:
stats[tag] = e_stats.find(tag).text
vprint (" %-40s: %s" % (tag, stats[tag]))
# Append stats to CSV file
with open(csvfile, 'a') as fd_csv:
print >>fd_csv, "%s,%s,%s,%s,%s,%s,%s,%s" % (stats['timestamp'],
stats['current-ingress-rate-per-second'], \
stats['current-egress-rate-per-second'], \
stats['current-ingress-byte-rate-per-second'], \
stats['current-ingress-byte-rate-per-second'], \
stats['ingress-discards/total-ingress-discards'], \
stats['egress-discards/total-egress-discards'], \
stats['current-messages-spooled'], \
)
dprint (stats )
time.sleep(nap)
n = n+1
fd_csv.close()
| vprint | identifier_name |
GetSolStats.py | #!/usr/bin/python
# GetSolStats.py
#
# ABOUT
# Issue SEMP Requesst to get stats form Solace Router/VMR
# Generate CSV file with the following info:
# TIMESTAMP
# INGRESS_MSG_RATE
# EGRESS_MSG_RATE
# INGRESS_BYTES_RATE
# EGRESS_BYTES_RATE
# INGRESS_DISCARDS
# EGRESS_DISCARDS
#
# LIMITATIONS
# Not much error checking done
# Tested for SolTR 7.1.1 & 7.2 only
#
# HISTORY
# - Nov 29, 2016: nram (Solace PSG)
# Initial version
import argparse
import xml.etree.ElementTree as ET
import httplib, base64
import string, re
import time
import os.path
Verbose = 0
#----------------------------------------------------------------------------
# some log helpers
# TODO: use logger instead
#
def vprint (s):
global Verbose
if Verbose > 0:
|
def dprint (s):
global Verbose
if Verbose > 2:
print '---\n', s
#----------------------------------------------------------------------------
# HTTP utils
# TODO: move to a lib
#
def open_http( url, user, passwd):
global Hdrs
auth = string.strip(base64.encodestring(user+":"+passwd))
Hdrs = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
Hdrs["Authorization"] = "Basic %s" % auth
print ("Opening HTTP connection to [%s]" % url)
dprint ("Headers: %s" % Hdrs.items())
try:
conn = httplib.HTTPConnection(url)
except httplib.InvalidURL as e:
print (e)
raise
except:
print ("Unexpected exception: %s" % sys.exc_info()[0])
raise
return conn
def post_http (req, url = '/SEMP'):
global Hdrs
dprint ("request: %s" % req)
dprint ("Posting to URL %s" % url)
Conn.request("POST", url, req, Hdrs)
res = Conn.getresponse()
if not res:
raise Exception ("No SEMP response")
resp = res.read()
if resp is None:
raise Exception ("Null SEMP response")
return None
return resp
#------------------------------------------------------------------------
# SEMP helpers
#
def read_semp_req(fname):
global SEMP_VERSION
global SEMP_DIR
sempfile = "%s/%s/%s" % (SEMP_DIR, SEMP_VERSION, fname)
vprint (" Reading file: %s" % sempfile )
try:
f = open(sempfile , 'r')
if not f:
raise Exception('Unable to open file', sempfile )
req = f.read()
dprint ("SEMP req template = %s" % req)
f.close()
return req
except IOError as e:
print (e)
raise e
except:
print ('Unexpected exception %s' % sys.exc_info()[0])
raise
#--------------------------------------------------------------------------------------
# Main
#--------------------------------------------------------------------------------------
p = argparse.ArgumentParser ()
pr = p.add_argument_group("Required Arguments")
pr.add_argument('--id', action='store', required=True, help='Test ID')
pr.add_argument('--url', action='store', required=True, help='Router/VPN URL in IP:PORT')
pr.add_argument('--vpn', action='store', required=True, help='VPN Name')
p.add_argument('--user', action='store', default='admin', help='Admin CLI username (default: admin)')
p.add_argument('--passwd', action='store', default='admin', help='CLI password (default: admin)')
p.add_argument('--outdir', action='store', default='out', help='Dir for output (default: out)')
p.add_argument('--sempdir', action='store', default='SEMP/Templates', help='SEMP template dir (default: SEMP/Templates)')
p.add_argument('--sempver', action='store', default='7_2', help='SEMP Version (default: 7_2)')
p.add_argument('--interval', action='store', default='5', help='Sampling interval (default: 5 seconds)')
p.add_argument('--samples', action='store', default='10', help='Max samples (default: 10)')
p.add_argument('-v','--verbose', action='count', help='Verbose mode (-vvv for debug)')
r = p.parse_args()
Verbose = r.verbose
SEMP_DIR = r.sempdir
SEMP_VERSION = r.sempver
nap = float(r.interval)
#--------------------------------------------------------------------
# Parse SEMP response file
#
#print ('Processing SEMP output file %s' % r.file)
#with open(r.file, 'r') as fd_semp:
# xmlstr = fd_semp.read()
#fd_semp.close()
# Open HTTP connection to router and get the SEMP directly
Conn = open_http (r.url, r.user, r.passwd)
#print ("Sending 'show all client message-spool detail' for vpn: %s" % r.vpn)
#semp_req = '<rpc semp-version="soltr/%s"> <show> <client> <name>*</name> <vpn-name>%s</vpn-name> <message-spool></message-spool> <detail></detail> </client> </show> </rpc>' % (SEMP_VERSION, r.vpn)
#dprint ("SEMP Req: %s" % semp_req)
#semp_resp = post_http (semp_req)
#dprint ("SEMP RESPONSE (1) : %s" % semp_resp)
#semp_req = read_semp_req ('show/version.xml') % (SEMP_VERSION)
#dprint ("SEMP Req: %s" % semp_req)
#semp_resp = post_http (semp_req)
#dprint ("SEMP RESPONSE (2): %s" % semp_resp)
#semp_req = read_semp_req ('show/client_stats.xml') % (SEMP_VERSION)
#dprint ("SEMP REQUEST (client stats): %s" % semp_req)
#semp_resp = post_http (semp_req)
#dprint ("SEMP RESPONSE (client stats): %s" % semp_resp)
# open CSV output file
#csvfile = "%s/client_info_%s.csv" % (r.outdir, re.sub('[\.:]','_',r.url))
csvfile = "%s/sol_stats_%s.csv" % (r.outdir, r.id)
print ('Writing to CSV file %s' % csvfile)
stats = {}
# Write header to CSV file
if not os.path.exists(csvfile):
with open(csvfile, 'w') as fd_csv:
print >>fd_csv, "#ID:%s" % (r.id)
print >>fd_csv, "%s,%s,%s,%s,%s,%s,%s,%s" % ('TIMESTAMP',\
'INGRESS_MSG_RATE',\
'EGRESS_MSG_RATE',\
'INGRESS_BYTES_RATE',\
'EGRESS_BYTES_RATE',\
'INGRESS_DISCARDS',\
'EGRESS_DISCARDS',\
'MSGS_SPOOLED')
fd_csv.close()
# Gather and save stats
vprint ("Collecting %s stats every %s seconds" % (r.samples, nap))
n = 0
while (n < int(r.samples)):
stats['timestamp'] = time.strftime("%Y%m%d %H:%M:%S")
print ("%-3d/%-3s) %s" % (n+1, r.samples, stats['timestamp']))
# Post SEMP Request -- vpn_stats
print (' Processing SEMP Request %s' % 'show/vpn_stats.xml')
semp_req = read_semp_req ('show/vpn_stats.xml') % (SEMP_VERSION, r.vpn)
dprint ("SEMP REQUEST (VPN stats): %s" % semp_req)
semp_resp = post_http (semp_req)
# Save SEMP response
dprint ("SEMP RESPONSE (VPN stats): %s" % semp_resp)
respfile = "%s/vpn_stats_%s.xml" % ('out/semp', time.strftime("%Y%m%d_%H%M%S"))
vprint (' Writing SEMP Response to file %s' % respfile)
with open(respfile, 'w') as fd_semp:
print >>fd_semp, semp_resp
fd_semp.close()
# Process SEMP Reponse XML
xmlroot = ET.fromstring(semp_resp)
en_stats = './rpc/show/message-vpn/vpn/stats'
e_stats = xmlroot.find(en_stats)
for tag in ['current-ingress-rate-per-second', \
'current-egress-rate-per-second', \
'current-ingress-byte-rate-per-second', \
'current-egress-byte-rate-per-second', \
'ingress-discards/total-ingress-discards', \
'egress-discards/total-egress-discards' ]:
stats[tag] = e_stats.find(tag).text
vprint (" %-40s: %s" % (tag, stats[tag]))
# Post SEMP Request -- vpn spool stats
print (' Processing SEMP Request %s' % 'show/vpn_spool_stats.xml')
semp_req = read_semp_req ('show/vpn_spool_stats.xml') % (SEMP_VERSION, r.vpn)
dprint ("SEMP REQUEST (VPN spool stats): %s" % semp_req)
semp_resp = post_http (semp_req)
# Save SEMP response
dprint ("SEMP RESPONSE (VPN spool stats): %s" % semp_resp)
respfile = "%s/vpn_spool_stats_%s.xml" % ('out/semp', time.strftime("%Y%m%d_%H%M%S"))
vprint (' Writing SEMP Response to file %s' % respfile)
with open(respfile, 'w') as fd_semp:
print >>fd_semp, semp_resp
fd_semp.close()
# Process SEMP Reponse XML
xmlroot = ET.fromstring(semp_resp)
en_stats = './rpc/show/message-spool/message-spool-stats'
e_stats = xmlroot.find(en_stats)
for tag in ['spooled-to-adb' ]:
stats[tag] = e_stats.find(tag).text
vprint (" %-40s: %s" % (tag, stats[tag]))
# Post SEMP Request -- vpn spool detail
print (' Processing SEMP Request %s' % 'show/vpn_spool_detail.xml')
semp_req = read_semp_req ('show/vpn_spool_detail.xml') % (SEMP_VERSION, r.vpn)
dprint ("SEMP REQUEST (VPN spool stats): %s" % semp_req)
semp_resp = post_http (semp_req)
# Save SEMP response
dprint ("SEMP RESPONSE (VPN spool stats): %s" % semp_resp)
respfile = "%s/vpn_spool_detail_%s.xml" % ('out/semp', time.strftime("%Y%m%d_%H%M%S"))
vprint (' Writing SEMP Response to file %s' % respfile)
with open(respfile, 'w') as fd_semp:
print >>fd_semp, semp_resp
fd_semp.close()
# Process SEMP Reponse XML
xmlroot = ET.fromstring(semp_resp)
en_stats = './rpc/show/message-spool/message-vpn/vpn'
e_stats = xmlroot.find(en_stats)
for tag in ['current-messages-spooled']:
stats[tag] = e_stats.find(tag).text
vprint (" %-40s: %s" % (tag, stats[tag]))
# Append stats to CSV file
with open(csvfile, 'a') as fd_csv:
print >>fd_csv, "%s,%s,%s,%s,%s,%s,%s,%s" % (stats['timestamp'],
stats['current-ingress-rate-per-second'], \
stats['current-egress-rate-per-second'], \
stats['current-ingress-byte-rate-per-second'], \
stats['current-ingress-byte-rate-per-second'], \
stats['ingress-discards/total-ingress-discards'], \
stats['egress-discards/total-egress-discards'], \
stats['current-messages-spooled'], \
)
dprint (stats )
time.sleep(nap)
n = n+1
fd_csv.close()
| print s | conditional_block |
GetSolStats.py | #!/usr/bin/python
# GetSolStats.py
#
# ABOUT
# Issue SEMP Requesst to get stats form Solace Router/VMR
# Generate CSV file with the following info:
# TIMESTAMP
# INGRESS_MSG_RATE
# EGRESS_MSG_RATE
# INGRESS_BYTES_RATE
# EGRESS_BYTES_RATE
# INGRESS_DISCARDS
# EGRESS_DISCARDS
#
# LIMITATIONS
# Not much error checking done
# Tested for SolTR 7.1.1 & 7.2 only
#
# HISTORY
# - Nov 29, 2016: nram (Solace PSG)
# Initial version
import argparse
import xml.etree.ElementTree as ET
import httplib, base64
import string, re
import time
import os.path
Verbose = 0
#----------------------------------------------------------------------------
# some log helpers
# TODO: use logger instead
#
def vprint (s):
global Verbose
if Verbose > 0:
print s
def dprint (s):
global Verbose
if Verbose > 2:
print '---\n', s
#----------------------------------------------------------------------------
# HTTP utils
# TODO: move to a lib
#
def open_http( url, user, passwd):
global Hdrs
auth = string.strip(base64.encodestring(user+":"+passwd))
Hdrs = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
Hdrs["Authorization"] = "Basic %s" % auth
print ("Opening HTTP connection to [%s]" % url)
dprint ("Headers: %s" % Hdrs.items())
try:
conn = httplib.HTTPConnection(url)
except httplib.InvalidURL as e:
print (e)
raise
except:
print ("Unexpected exception: %s" % sys.exc_info()[0])
raise
return conn
def post_http (req, url = '/SEMP'):
global Hdrs
dprint ("request: %s" % req)
dprint ("Posting to URL %s" % url)
Conn.request("POST", url, req, Hdrs)
res = Conn.getresponse()
if not res:
raise Exception ("No SEMP response")
resp = res.read()
if resp is None:
raise Exception ("Null SEMP response")
return None
return resp
#------------------------------------------------------------------------
# SEMP helpers
#
def read_semp_req(fname):
global SEMP_VERSION
global SEMP_DIR
sempfile = "%s/%s/%s" % (SEMP_DIR, SEMP_VERSION, fname)
vprint (" Reading file: %s" % sempfile )
try:
f = open(sempfile , 'r')
if not f:
raise Exception('Unable to open file', sempfile )
req = f.read()
dprint ("SEMP req template = %s" % req)
f.close()
return req
except IOError as e:
print (e)
raise e
except:
print ('Unexpected exception %s' % sys.exc_info()[0])
raise
#--------------------------------------------------------------------------------------
# Main
#--------------------------------------------------------------------------------------
p = argparse.ArgumentParser ()
pr = p.add_argument_group("Required Arguments")
pr.add_argument('--id', action='store', required=True, help='Test ID')
pr.add_argument('--url', action='store', required=True, help='Router/VPN URL in IP:PORT')
pr.add_argument('--vpn', action='store', required=True, help='VPN Name')
p.add_argument('--user', action='store', default='admin', help='Admin CLI username (default: admin)')
p.add_argument('--passwd', action='store', default='admin', help='CLI password (default: admin)')
p.add_argument('--outdir', action='store', default='out', help='Dir for output (default: out)')
p.add_argument('--sempdir', action='store', default='SEMP/Templates', help='SEMP template dir (default: SEMP/Templates)')
p.add_argument('--sempver', action='store', default='7_2', help='SEMP Version (default: 7_2)')
p.add_argument('--interval', action='store', default='5', help='Sampling interval (default: 5 seconds)')
p.add_argument('--samples', action='store', default='10', help='Max samples (default: 10)')
p.add_argument('-v','--verbose', action='count', help='Verbose mode (-vvv for debug)')
r = p.parse_args()
Verbose = r.verbose
SEMP_DIR = r.sempdir
SEMP_VERSION = r.sempver
nap = float(r.interval)
#--------------------------------------------------------------------
# Parse SEMP response file
#
#print ('Processing SEMP output file %s' % r.file)
#with open(r.file, 'r') as fd_semp:
# xmlstr = fd_semp.read()
#fd_semp.close()
# Open HTTP connection to router and get the SEMP directly
Conn = open_http (r.url, r.user, r.passwd)
#print ("Sending 'show all client message-spool detail' for vpn: %s" % r.vpn)
#semp_req = '<rpc semp-version="soltr/%s"> <show> <client> <name>*</name> <vpn-name>%s</vpn-name> <message-spool></message-spool> <detail></detail> </client> </show> </rpc>' % (SEMP_VERSION, r.vpn)
#dprint ("SEMP Req: %s" % semp_req)
#semp_resp = post_http (semp_req)
#dprint ("SEMP RESPONSE (1) : %s" % semp_resp)
#semp_req = read_semp_req ('show/version.xml') % (SEMP_VERSION)
#dprint ("SEMP Req: %s" % semp_req)
#semp_resp = post_http (semp_req)
#dprint ("SEMP RESPONSE (2): %s" % semp_resp)
#semp_req = read_semp_req ('show/client_stats.xml') % (SEMP_VERSION)
#dprint ("SEMP REQUEST (client stats): %s" % semp_req)
#semp_resp = post_http (semp_req)
#dprint ("SEMP RESPONSE (client stats): %s" % semp_resp)
# open CSV output file
#csvfile = "%s/client_info_%s.csv" % (r.outdir, re.sub('[\.:]','_',r.url))
csvfile = "%s/sol_stats_%s.csv" % (r.outdir, r.id)
print ('Writing to CSV file %s' % csvfile)
stats = {}
# Write header to CSV file
if not os.path.exists(csvfile):
with open(csvfile, 'w') as fd_csv:
print >>fd_csv, "#ID:%s" % (r.id)
print >>fd_csv, "%s,%s,%s,%s,%s,%s,%s,%s" % ('TIMESTAMP',\
'INGRESS_MSG_RATE',\
'EGRESS_MSG_RATE',\
'INGRESS_BYTES_RATE',\
'EGRESS_BYTES_RATE',\
'INGRESS_DISCARDS',\
'EGRESS_DISCARDS',\
'MSGS_SPOOLED')
fd_csv.close() | stats['timestamp'] = time.strftime("%Y%m%d %H:%M:%S")
print ("%-3d/%-3s) %s" % (n+1, r.samples, stats['timestamp']))
# Post SEMP Request -- vpn_stats
print (' Processing SEMP Request %s' % 'show/vpn_stats.xml')
semp_req = read_semp_req ('show/vpn_stats.xml') % (SEMP_VERSION, r.vpn)
dprint ("SEMP REQUEST (VPN stats): %s" % semp_req)
semp_resp = post_http (semp_req)
# Save SEMP response
dprint ("SEMP RESPONSE (VPN stats): %s" % semp_resp)
respfile = "%s/vpn_stats_%s.xml" % ('out/semp', time.strftime("%Y%m%d_%H%M%S"))
vprint (' Writing SEMP Response to file %s' % respfile)
with open(respfile, 'w') as fd_semp:
print >>fd_semp, semp_resp
fd_semp.close()
# Process SEMP Reponse XML
xmlroot = ET.fromstring(semp_resp)
en_stats = './rpc/show/message-vpn/vpn/stats'
e_stats = xmlroot.find(en_stats)
for tag in ['current-ingress-rate-per-second', \
'current-egress-rate-per-second', \
'current-ingress-byte-rate-per-second', \
'current-egress-byte-rate-per-second', \
'ingress-discards/total-ingress-discards', \
'egress-discards/total-egress-discards' ]:
stats[tag] = e_stats.find(tag).text
vprint (" %-40s: %s" % (tag, stats[tag]))
# Post SEMP Request -- vpn spool stats
print (' Processing SEMP Request %s' % 'show/vpn_spool_stats.xml')
semp_req = read_semp_req ('show/vpn_spool_stats.xml') % (SEMP_VERSION, r.vpn)
dprint ("SEMP REQUEST (VPN spool stats): %s" % semp_req)
semp_resp = post_http (semp_req)
# Save SEMP response
dprint ("SEMP RESPONSE (VPN spool stats): %s" % semp_resp)
respfile = "%s/vpn_spool_stats_%s.xml" % ('out/semp', time.strftime("%Y%m%d_%H%M%S"))
vprint (' Writing SEMP Response to file %s' % respfile)
with open(respfile, 'w') as fd_semp:
print >>fd_semp, semp_resp
fd_semp.close()
# Process SEMP Reponse XML
xmlroot = ET.fromstring(semp_resp)
en_stats = './rpc/show/message-spool/message-spool-stats'
e_stats = xmlroot.find(en_stats)
for tag in ['spooled-to-adb' ]:
stats[tag] = e_stats.find(tag).text
vprint (" %-40s: %s" % (tag, stats[tag]))
# Post SEMP Request -- vpn spool detail
print (' Processing SEMP Request %s' % 'show/vpn_spool_detail.xml')
semp_req = read_semp_req ('show/vpn_spool_detail.xml') % (SEMP_VERSION, r.vpn)
dprint ("SEMP REQUEST (VPN spool stats): %s" % semp_req)
semp_resp = post_http (semp_req)
# Save SEMP response
dprint ("SEMP RESPONSE (VPN spool stats): %s" % semp_resp)
respfile = "%s/vpn_spool_detail_%s.xml" % ('out/semp', time.strftime("%Y%m%d_%H%M%S"))
vprint (' Writing SEMP Response to file %s' % respfile)
with open(respfile, 'w') as fd_semp:
print >>fd_semp, semp_resp
fd_semp.close()
# Process SEMP Reponse XML
xmlroot = ET.fromstring(semp_resp)
en_stats = './rpc/show/message-spool/message-vpn/vpn'
e_stats = xmlroot.find(en_stats)
for tag in ['current-messages-spooled']:
stats[tag] = e_stats.find(tag).text
vprint (" %-40s: %s" % (tag, stats[tag]))
# Append stats to CSV file
with open(csvfile, 'a') as fd_csv:
print >>fd_csv, "%s,%s,%s,%s,%s,%s,%s,%s" % (stats['timestamp'],
stats['current-ingress-rate-per-second'], \
stats['current-egress-rate-per-second'], \
stats['current-ingress-byte-rate-per-second'], \
stats['current-ingress-byte-rate-per-second'], \
stats['ingress-discards/total-ingress-discards'], \
stats['egress-discards/total-egress-discards'], \
stats['current-messages-spooled'], \
)
dprint (stats )
time.sleep(nap)
n = n+1
fd_csv.close() |
# Gather and save stats
vprint ("Collecting %s stats every %s seconds" % (r.samples, nap))
n = 0
while (n < int(r.samples)): | random_line_split |
minwinbase.rs | /
/
Licensed
under
the
Apache
License
Version
2
.
0
/
/
<
LICENSE
-
APACHE
or
http
:
/
/
www
.
apache
.
org
/
licenses
/
LICENSE
-
2
.
0
>
or
the
MIT
license
/
/
<
LICENSE
-
MIT
or
http
:
/
/
opensource
.
org
/
licenses
/
MIT
>
at
your
option
.
/
/
All
files
in
the
project
carrying
such
notice
may
not
be
copied
modified
or
distributed
/
/
except
according
to
those
terms
/
/
!
This
module
defines
the
32
-
Bit
Windows
Base
APIs
use
shared
:
:
basetsd
:
:
ULONG_PTR
;
use
shared
:
:
minwindef
:
:
{
BOOL
BYTE
DWORD
FILETIME
HMODULE
LPVOID
MAX_PATH
UINT
ULONG
WORD
}
;
use
shared
:
:
ntstatus
:
:
{
STATUS_ACCESS_VIOLATION
STATUS_ARRAY_BOUNDS_EXCEEDED
STATUS_BREAKPOINT
STATUS_CONTROL_C_EXIT
STATUS_DATATYPE_MISALIGNMENT
STATUS_FLOAT_DENORMAL_OPERAND
STATUS_FLOAT_DIVIDE_BY_ZERO
STATUS_FLOAT_INEXACT_RESULT
STATUS_FLOAT_INVALID_OPERATION
STATUS_FLOAT_OVERFLOW
STATUS_FLOAT_STACK_CHECK
STATUS_FLOAT_UNDERFLOW
STATUS_GUARD_PAGE_VIOLATION
STATUS_ILLEGAL_INSTRUCTION
STATUS_INTEGER_DIVIDE_BY_ZERO
STATUS_INTEGER_OVERFLOW
STATUS_INVALID_DISPOSITION
STATUS_INVALID_HANDLE
STATUS_IN_PAGE_ERROR
STATUS_NONCONTINUABLE_EXCEPTION
STATUS_PENDING
STATUS_POSSIBLE_DEADLOCK
STATUS_PRIVILEGED_INSTRUCTION
STATUS_SINGLE_STEP
STATUS_STACK_OVERFLOW
}
;
use
um
:
:
winnt
:
:
{
CHAR
EXCEPTION_RECORD
HANDLE
LPSTR
LPWSTR
PCONTEXT
PRTL_CRITICAL_SECTION
PRTL_CRITICAL_SECTION_DEBUG
PVOID
RTL_CRITICAL_SECTION
RTL_CRITICAL_SECTION_DEBUG
WCHAR
}
;
/
/
MoveMemory
/
/
CopyMemory
/
/
FillMemory
/
/
ZeroMemory
STRUCT
!
{
struct
SECURITY_ATTRIBUTES
{
nLength
:
DWORD
lpSecurityDescriptor
:
LPVOID
bInheritHandle
:
BOOL
}
}
pub
type
PSECURITY_ATTRIBUTES
=
*
mut
SECURITY_ATTRIBUTES
;
pub
type
LPSECURITY_ATTRIBUTES
=
*
mut
SECURITY_ATTRIBUTES
;
STRUCT
!
{
struct
OVERLAPPED_u_s
{
Offset
:
DWORD
OffsetHigh
:
DWORD
}
}
UNION
!
{
union
OVERLAPPED_u
{
[
u32
;
2
]
[
u64
;
1
]
s
s_mut
:
OVERLAPPED_u_s
Pointer
Pointer_mut
:
PVOID
}
}
STRUCT
!
{
struct
OVERLAPPED
{
Internal
:
ULONG_PTR
InternalHigh
:
ULONG_PTR
u
:
OVERLAPPED_u
hEvent
:
HANDLE
}
}
pub
type
LPOVERLAPPED
=
*
mut
OVERLAPPED
;
STRUCT
!
{
struct
OVERLAPPED_ENTRY
{
lpCompletionKey
:
ULONG_PTR
lpOverlapped
:
LPOVERLAPPED
Internal
:
ULONG_PTR
dwNumberOfBytesTransferred
:
DWORD
}
}
pub
type
LPOVERLAPPED_ENTRY
=
*
mut
OVERLAPPED_ENTRY
;
STRUCT
!
{
struct
SYSTEMTIME
{
wYear
:
WORD
wMonth
:
WORD
wDayOfWeek
:
WORD
wDay
:
WORD
wHour
:
WORD
wMinute
:
WORD
wSecond
:
WORD
wMilliseconds
:
WORD
}
}
pub
type
PSYSTEMTIME
=
*
mut
SYSTEMTIME
;
pub
type
LPSYSTEMTIME
=
*
mut
SYSTEMTIME
;
STRUCT
!
{
struct
WIN32_FIND_DATAA
{
dwFileAttributes
:
DWORD
ftCreationTime
:
FILETIME
ftLastAccessTime
:
FILETIME
ftLastWriteTime
:
FILETIME
nFileSizeHigh
:
DWORD
nFileSizeLow
:
DWORD
dwReserved0
:
DWORD
dwReserved1
:
DWORD
cFileName
:
[
CHAR
;
MAX_PATH
]
cAlternateFileName
:
[
CHAR
;
14
]
}
}
pub
type
PWIN32_FIND_DATAA
=
*
mut
WIN32_FIND_DATAA
;
pub
type
LPWIN32_FIND_DATAA
=
*
mut
WIN32_FIND_DATAA
;
STRUCT
!
{
struct
WIN32_FIND_DATAW
{
dwFileAttributes
:
DWORD
ftCreationTime
:
FILETIME
ftLastAccessTime
:
FILETIME
ftLastWriteTime
:
FILETIME
nFileSizeHigh
:
DWORD
nFileSizeLow
:
DWORD
dwReserved0
:
DWORD
dwReserved1
:
DWORD
cFileName
:
[
WCHAR
;
MAX_PATH
]
cAlternateFileName
:
[
WCHAR
;
14
]
}
}
pub
type
PWIN32_FIND_DATAW
=
*
mut
WIN32_FIND_DATAW
;
pub
type
LPWIN32_FIND_DATAW
=
*
mut
WIN32_FIND_DATAW
;
ENUM
!
{
enum
FINDEX_INFO_LEVELS
{
FindExInfoStandard
FindExInfoBasic
FindExInfoMaxInfoLevel
}
}
pub
const
FIND_FIRST_EX_CASE_SENSITIVE
:
DWORD
=
0x00000001
;
pub
const
FIND_FIRST_EX_LARGE_FETCH
:
DWORD
=
0x00000002
;
ENUM
!
{
enum
FINDEX_SEARCH_OPS
{
FindExSearchNameMatch
FindExSearchLimitToDirectories
FindExSearchLimitToDevices
FindExSearchMaxSearchOp
}
}
ENUM
!
{
enum
GET_FILEEX_INFO_LEVELS
{
GetFileExInfoStandard
GetFileExMaxInfoLevel
}
}
ENUM
!
{
enum
FILE_INFO_BY_HANDLE_CLASS
{
FileBasicInfo
FileStandardInfo
FileNameInfo
FileRenameInfo
FileDispositionInfo
FileAllocationInfo
FileEndOfFileInfo
FileStreamInfo
FileCompressionInfo
FileAttributeTagInfo
FileIdBothDirectoryInfo
FileIdBothDirectoryRestartInfo
FileIoPriorityHintInfo
FileRemoteProtocolInfo
FileFullDirectoryInfo
FileFullDirectoryRestartInfo
FileStorageInfo
FileAlignmentInfo
FileIdInfo
FileIdExtdDirectoryInfo
FileIdExtdDirectoryRestartInfo
FileDispositionInfoEx
FileRenameInfoEx
MaximumFileInfoByHandleClass
}
}
pub
type
PFILE_INFO_BY_HANDLE_CLASS
=
*
mut
FILE_INFO_BY_HANDLE_CLASS
;
pub
type
CRITICAL_SECTION
=
RTL_CRITICAL_SECTION
;
pub
type
PCRITICAL_SECTION
=
PRTL_CRITICAL_SECTION
;
pub
type
LPCRITICAL_SECTION
=
PRTL_CRITICAL_SECTION
;
pub
type
CRITICAL_SECTION_DEBUG
=
RTL_CRITICAL_SECTION_DEBUG
;
pub
type
PCRITICAL_SECTION_DEBUG
=
PRTL_CRITICAL_SECTION_DEBUG
;
pub
type
LPCRITICAL_SECTION_DEBUG
=
PRTL_CRITICAL_SECTION_DEBUG
;
FN
!
{
stdcall
LPOVERLAPPED_COMPLETION_ROUTINE
(
dwErrorCode
:
DWORD
dwNumberOfBytesTransfered
:
DWORD
lpOverlapped
:
LPOVERLAPPED
)
-
>
(
)
}
pub
const
LOCKFILE_FAIL_IMMEDIATELY
:
DWORD
=
0x00000001
;
pub
const
LOCKFILE_EXCLUSIVE_LOCK
:
DWORD
=
0x00000002
;
STRUCT
!
{
struct
PROCESS_HEAP_ENTRY_Block
{
hMem
:
HANDLE
dwReserved
:
[
DWORD
;
3
]
}
}
STRUCT
!
{
struct
PROCESS_HEAP_ENTRY_Region
{
dwCommittedSize
:
DWORD
dwUnCommittedSize
:
DWORD
lpFirstBlock
:
LPVOID
lpLastBlock
:
LPVOID
}
}
UNION
!
{
union
PROCESS_HEAP_ENTRY_u
{
[
u32
;
4
]
[
u64
;
3
]
Block
Block_mut
:
PROCESS_HEAP_ENTRY_Block
Region
Region_mut
:
PROCESS_HEAP_ENTRY_Region
}
}
STRUCT
!
{
struct
PROCESS_HEAP_ENTRY
{
lpData
:
PVOID
cbData
:
DWORD
cbOverhead
:
BYTE
iRegionIndex
:
BYTE
wFlags
:
WORD
u
:
PROCESS_HEAP_ENTRY_u
}
}
pub
type
LPPROCESS_HEAP_ENTRY
=
*
mut
PROCESS_HEAP_ENTRY
;
pub
type
PPROCESS_HEAP_ENTRY
=
*
mut
PROCESS_HEAP_ENTRY
;
pub
const
PROCESS_HEAP_REGION
:
WORD
=
0x0001
;
pub
const
PROCESS_HEAP_UNCOMMITTED_RANGE
:
WORD
=
0x0002
;
pub
const
PROCESS_HEAP_ENTRY_BUSY
:
WORD
=
0x0004
;
pub
const
PROCESS_HEAP_SEG_ALLOC
:
WORD
=
0x0008
;
pub
const
PROCESS_HEAP_ENTRY_MOVEABLE
:
WORD
=
0x0010
;
pub
const
PROCESS_HEAP_ENTRY_DDESHARE
:
WORD
=
0x0020
;
STRUCT
!
{
struct
REASON_CONTEXT_Detailed
{
LocalizedReasonModule
:
HMODULE
LocalizedReasonId
:
ULONG
ReasonStringCount
:
ULONG
ReasonStrings
:
*
mut
LPWSTR
}
} | REASON_CONTEXT_Reason
{
[
u32
;
4
]
[
u64
;
3
]
Detailed
Detailed_mut
:
REASON_CONTEXT_Detailed
SimpleReasonString
SimpleReasonString_mut
:
LPWSTR
}
}
STRUCT
!
{
struct
REASON_CONTEXT
{
Version
:
ULONG
Flags
:
DWORD
Reason
:
REASON_CONTEXT_Reason
}
}
pub
type
PREASON_CONTEXT
=
*
mut
REASON_CONTEXT
;
pub
const
EXCEPTION_DEBUG_EVENT
:
DWORD
=
1
;
pub
const
CREATE_THREAD_DEBUG_EVENT
:
DWORD
=
2
;
pub
const
CREATE_PROCESS_DEBUG_EVENT
:
DWORD
=
3
;
pub
const
EXIT_THREAD_DEBUG_EVENT
:
DWORD
=
4
;
pub
const
EXIT_PROCESS_DEBUG_EVENT
:
DWORD
=
5
;
pub
const
LOAD_DLL_DEBUG_EVENT
:
DWORD
=
6
;
pub
const
UNLOAD_DLL_DEBUG_EVENT
:
DWORD
=
7
;
pub
const
OUTPUT_DEBUG_STRING_EVENT
:
DWORD
=
8
;
pub
const
RIP_EVENT
:
DWORD
=
9
;
FN
!
{
stdcall
PTHREAD_START_ROUTINE
(
lpThreadParameter
:
LPVOID
)
-
>
DWORD
}
pub
type
LPTHREAD_START_ROUTINE
=
PTHREAD_START_ROUTINE
;
FN
!
{
stdcall
PENCLAVE_ROUTINE
(
lpThreadParameter
:
LPVOID
)
-
>
DWORD
}
pub
type
LPENCLAVE_ROUTINE
=
PENCLAVE_ROUTINE
;
STRUCT
!
{
struct
EXCEPTION_DEBUG_INFO
{
ExceptionRecord
:
EXCEPTION_RECORD
dwFirstChance
:
DWORD
}
}
pub
type
LPEXCEPTION_DEBUG_INFO
=
*
mut
EXCEPTION_DEBUG_INFO
;
STRUCT
!
{
struct
CREATE_THREAD_DEBUG_INFO
{
hThread
:
HANDLE
lpThreadLocalBase
:
LPVOID
lpStartAddress
:
LPTHREAD_START_ROUTINE
}
}
pub
type
LPCREATE_THREAD_DEBUG_INFO
=
*
mut
CREATE_THREAD_DEBUG_INFO
;
STRUCT
!
{
struct
CREATE_PROCESS_DEBUG_INFO
{
hFile
:
HANDLE
hProcess
:
HANDLE
hThread
:
HANDLE
lpBaseOfImage
:
LPVOID
dwDebugInfoFileOffset
:
DWORD
nDebugInfoSize
:
DWORD
lpThreadLocalBase
:
LPVOID
lpStartAddress
:
LPTHREAD_START_ROUTINE
lpImageName
:
LPVOID
fUnicode
:
WORD
}
}
pub
type
LPCREATE_PROCESS_DEBUG_INFO
=
*
mut
CREATE_PROCESS_DEBUG_INFO
;
STRUCT
!
{
struct
EXIT_THREAD_DEBUG_INFO
{
dwExitCode
:
DWORD
}
}
pub
type
LPEXIT_THREAD_DEBUG_INFO
=
*
mut
EXIT_THREAD_DEBUG_INFO
;
STRUCT
!
{
struct
EXIT_PROCESS_DEBUG_INFO
{
dwExitCode
:
DWORD
}
}
pub
type
LPEXIT_PROCESS_DEBUG_INFO
=
*
mut
EXIT_PROCESS_DEBUG_INFO
;
STRUCT
!
{
struct
LOAD_DLL_DEBUG_INFO
{
hFile
:
HANDLE
lpBaseOfDll
:
LPVOID
dwDebugInfoFileOffset
:
DWORD
nDebugInfoSize
:
DWORD
lpImageName
:
LPVOID
fUnicode
:
WORD
}
}
pub
type
LPLOAD_DLL_DEBUG_INFO
=
*
mut
LOAD_DLL_DEBUG_INFO
;
STRUCT
!
{
struct
UNLOAD_DLL_DEBUG_INFO
{
lpBaseOfDll
:
LPVOID
}
}
pub
type
LPUNLOAD_DLL_DEBUG_INFO
=
*
mut
UNLOAD_DLL_DEBUG_INFO
;
STRUCT
!
{
struct
OUTPUT_DEBUG_STRING_INFO
{
lpDebugStringData
:
LPSTR
fUnicode
:
WORD
nDebugStringLength
:
WORD
}
}
pub
type
LPOUTPUT_DEBUG_STRING_INFO
=
*
mut
OUTPUT_DEBUG_STRING_INFO
;
STRUCT
!
{
struct
RIP_INFO
{
dwError
:
DWORD
dwType
:
DWORD
}
}
pub
type
LPRIP_INFO
=
*
mut
RIP_INFO
;
UNION
!
{
union
DEBUG_EVENT_u
{
[
u32
;
21
]
[
u64
;
20
]
Exception
Exception_mut
:
EXCEPTION_DEBUG_INFO
CreateThread
CreateThread_mut
:
CREATE_THREAD_DEBUG_INFO
CreateProcessInfo
CreateProcessInfo_mut
:
CREATE_PROCESS_DEBUG_INFO
ExitThread
ExitThread_mut
:
EXIT_THREAD_DEBUG_INFO
ExitProcess
ExitProcess_mut
:
EXIT_PROCESS_DEBUG_INFO
LoadDll
LoadDll_mut
:
LOAD_DLL_DEBUG_INFO
UnloadDll
UnloadDll_mut
:
UNLOAD_DLL_DEBUG_INFO
DebugString
DebugString_mut
:
OUTPUT_DEBUG_STRING_INFO
RipInfo
RipInfo_mut
:
RIP_INFO
}
}
STRUCT
!
{
struct
DEBUG_EVENT
{
dwDebugEventCode
:
DWORD
dwProcessId
:
DWORD
dwThreadId
:
DWORD
u
:
DEBUG_EVENT_u
}
}
pub
type
LPDEBUG_EVENT
=
*
mut
DEBUG_EVENT
;
pub
type
LPCONTEXT
=
PCONTEXT
;
pub
const
STILL_ACTIVE
:
DWORD
=
STATUS_PENDING
as
u32
;
pub
const
EXCEPTION_ACCESS_VIOLATION
:
DWORD
=
STATUS_ACCESS_VIOLATION
as
u32
;
pub
const
EXCEPTION_DATATYPE_MISALIGNMENT
:
DWORD
=
STATUS_DATATYPE_MISALIGNMENT
as
u32
;
pub
const
EXCEPTION_BREAKPOINT
:
DWORD
=
STATUS_BREAKPOINT
as
u32
;
pub
const
EXCEPTION_SINGLE_STEP
:
DWORD
=
STATUS_SINGLE_STEP
as
u32
;
pub
const
EXCEPTION_ARRAY_BOUNDS_EXCEEDED
:
DWORD
=
STATUS_ARRAY_BOUNDS_EXCEEDED
as
u32
;
pub
const
EXCEPTION_FLT_DENORMAL_OPERAND
:
DWORD
=
STATUS_FLOAT_DENORMAL_OPERAND
as
u32
;
pub
const
EXCEPTION_FLT_DIVIDE_BY_ZERO
:
DWORD
=
STATUS_FLOAT_DIVIDE_BY_ZERO
as
u32
;
pub
const
EXCEPTION_FLT_INEXACT_RESULT
:
DWORD
=
STATUS_FLOAT_INEXACT_RESULT
as
u32
;
pub
const
EXCEPTION_FLT_INVALID_OPERATION
:
DWORD
=
STATUS_FLOAT_INVALID_OPERATION
as
u32
;
pub
const
EXCEPTION_FLT_OVERFLOW
:
DWORD
=
STATUS_FLOAT_OVERFLOW
as
u32
;
pub
const
EXCEPTION_FLT_STACK_CHECK
:
DWORD
=
STATUS_FLOAT_STACK_CHECK
as
u32
;
pub
const
EXCEPTION_FLT_UNDERFLOW
:
DWORD
=
STATUS_FLOAT_UNDERFLOW
as
u32
;
pub
const
EXCEPTION_INT_DIVIDE_BY_ZERO
:
DWORD
=
STATUS_INTEGER_DIVIDE_BY_ZERO
as
u32
;
pub
const
EXCEPTION_INT_OVERFLOW
:
DWORD
=
STATUS_INTEGER_OVERFLOW
as
u32
;
pub
const
EXCEPTION_PRIV_INSTRUCTION
:
DWORD
=
STATUS_PRIVILEGED_INSTRUCTION
as
u32
;
pub
const
EXCEPTION_IN_PAGE_ERROR
:
DWORD
=
STATUS_IN_PAGE_ERROR
as
u32
;
pub
const
EXCEPTION_ILLEGAL_INSTRUCTION
:
DWORD
=
STATUS_ILLEGAL_INSTRUCTION
as
u32
;
pub
const
EXCEPTION_NONCONTINUABLE_EXCEPTION
:
DWORD
=
STATUS_NONCONTINUABLE_EXCEPTION
as
u32
;
pub
const
EXCEPTION_STACK_OVERFLOW
:
DWORD
=
STATUS_STACK_OVERFLOW
as
u32
;
pub
const
EXCEPTION_INVALID_DISPOSITION
:
DWORD
=
STATUS_INVALID_DISPOSITION
as
u32
;
pub
const
EXCEPTION_GUARD_PAGE
:
DWORD
=
STATUS_GUARD_PAGE_VIOLATION
as
u32
;
pub
const
EXCEPTION_INVALID_HANDLE
:
DWORD
=
STATUS_INVALID_HANDLE
as
u32
;
pub
const
EXCEPTION_POSSIBLE_DEADLOCK
:
DWORD
=
STATUS_POSSIBLE_DEADLOCK
as
u32
;
pub
const
CONTROL_C_EXIT
:
DWORD
=
STATUS_CONTROL_C_EXIT
as
u32
;
pub
const
LMEM_FIXED
:
UINT
=
0x0000
;
pub
const
LMEM_MOVEABLE
:
UINT
=
0x0002
;
pub
const
LMEM_NOCOMPACT
:
UINT
=
0x0010
;
pub
const
LMEM_NODISCARD
:
UINT
=
0x0020
;
pub
const
LMEM_ZEROINIT
:
UINT
=
0x0040
;
pub
const
LMEM_MODIFY
:
UINT
=
0x0080
;
pub
const
LMEM_DISCARDABLE
:
UINT
=
0x0F00
;
pub
const
LMEM_VALID_FLAGS
:
UINT
=
0x0F72
;
pub
const
LMEM_INVALID_HANDLE
:
UINT
=
0x8000
;
pub
const
LHND
:
UINT
=
LMEM_MOVEABLE
|
LMEM_ZEROINIT
;
pub
const
LPTR
:
UINT
=
LMEM_FIXED
|
LMEM_ZEROINIT
;
pub
const
NONZEROLHND
:
UINT
=
LMEM_MOVEABLE
;
pub
const
NONZEROLPTR
:
UINT
=
LMEM_FIXED
;
/
/
LocalDiscard
pub
const
LMEM_DISCARDED
:
UINT
=
0x4000
;
pub
const
LMEM_LOCKCOUNT
:
UINT
=
0x00FF
;
pub
const
NUMA_NO_PREFERRED_NODE
:
DWORD
=
-
1i32
as
u32
; | UNION
!
{
union | random_line_split |
server.py | #!/usr/bin/python
import sys,os
import re
import pprint
import time
sys.path.append('lib')
import simplejson
import jsonpickle
import transcript
import joiner
import indexer
#from pdfs import create_pdf
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, make_response, jsonify
from contextlib import closing
if not os.path.exists(joiner.setupfile):
print "count not find setup file %s" % joiner.setupfile
sys.exit(1)
for k,v in jsonpickle.decode(open(joiner.setupfile, 'r').read())['server'].items():
globals()[k] = v
#CONSTANTS
#DEBUG = True
#PER_PAGE = 20
#MAX_QUERY_BUFFER = 100
SECRET_KEY = 'development key'
#VARIABLES
db = None
headers = None
queries = {}
app = Flask(__name__)
app.config.from_object(__name__)
app.jinja_env.globals['trim_blocks' ] = True
#APPLICATION CODE :: SETUP
@app.before_request
def | ():
g.db, g.headers, g.queries = getDb()
h = g.headers.keys()
h.sort()
h.reverse()
i = {}
for k in h:
i[k] = g.headers[k].keys()
i[k].sort()
g.headersNames = h
g.colsNames = i
@app.after_request
def after_request(response):
return response
@app.teardown_request
def teardown_request(exception):
pass
#APPLICATION CODE :: ROUTE
@app.route('/', methods=['GET'])
def initial():
sessionId = time.time()
if session.get('id'):
sessionId = session['id']
print "getting stored ID %s" % (sessionId)
else:
session['id'] = sessionId
print "storing new ID %s" % (sessionId)
resTemp = render_template('display_normal.html')
return resTemp
@app.route('/query/full/pdf/<int:page>', methods=['POST', 'PUT'])
@app.route('/query/full/<int:page>', methods=['POST', ])
@app.route('/query/<int:page>', methods=['POST', 'PUT'])
def query(page):
sessionId = time.time()
if session.get('id'):
sessionId = session['id']
print "getting stored ID %s" % (sessionId)
else:
session['id'] = sessionId
print "storing new ID %s" % (sessionId)
qry = {}
qrystr = request.form.keys()[0]
print "query string %s" % qrystr
qry = jsonpickle.decode(qrystr)
print "qry structure", qry
res = queryBuffer(sessionId, qry)
count = len(res)
flash(formatQuery(qry))
if count > 0:
if request.method == 'POST':
maxPage = count/PER_PAGE
if float(count)/PER_PAGE > maxPage:
maxPage += 1
if page > maxPage:
page = maxPage
perc = int((float(page) / maxPage) * 100)
beginPos = (page - 1) * PER_PAGE
print " count %d page %d max page %d perc %d%% begin pos %d" % (count, page, maxPage, perc, beginPos)
resPage = res
templateFile = 'response.html'
full = False
if str(request.path).startswith("/query/full"):
print " template display full"
templateFile = 'display_full.html'
full = True
if page > 0:
resPage = getResultForPage(res, page, PER_PAGE, count)
resKeys = resPage.keys()
resKeys.sort(key=transcript.sortNode)
resTemp = render_template(templateFile, page=page, count=count, maxPage=maxPage, perc=perc, beginPos=beginPos, resPage=resPage, resKeys=resKeys, full=full);
if str(request.path).startswith("/query/full/pdf"):
print " template display full PDF"
pdf = create_pdf(resTemp)
return pdf
else:
return resTemp
elif request.method == 'PUT':
return make_response(jsonpickle.encode(res))
else:
if request.method == 'POST':
if str(request.path).startswith("/query/full"):
resTemp = make_response("No match for query")
return resTemp
else:
resTemp = make_response("No match for query")
return resTemp
elif request.method == 'PUT':
return make_response(jsonpickle.encode(res))
#APPLICATION CODE :: ACESSORY FUNCTIONS
def queryBuffer(sessionId, qry):
if sessionId not in g.queries:
print g.queries.keys()
g.queries[sessionId] = []
res = None
if len(g.queries[sessionId]) == 0:
res = None
print "no queries to session %s" % (str(sessionId))
else:
if qry in [x[0] for x in g.queries[sessionId]]:
print "session %s has qry in store (%d)" % (str(sessionId), len(g.queries[sessionId]))
for i in range(len(g.queries[sessionId])):
lqry, lres = g.queries[sessionId][i]
print "%d: %s vs %s" % (i, lqry, qry)
if lqry == qry:
res = lres
g.queries[sessionId].pop(i)
g.queries[sessionId].append([lqry, lres])
print " session %s has qry at position %d" % (str(sessionId), i)
break
else:
print "session %s does not have qry in store" % (str(sessionId))
res = None
if res is None:
print "querying"
res = getResult(g.db, g.headers, qry)
g.queries[sessionId].append([qry, res])
if len(g.queries[sessionId]) > MAX_QUERY_BUFFER:
g.queries.pop(0)
return res
def getResult(db, headers, qry):
print "length qry %d" % len(qry)
if len(qry) > 0:
print "filtering"
dbN = {}
lists = []
for filetype in qry:
print ' analyzing filetype %s' % filetype
for fieldname in qry[filetype]:
print ' analyzing field %s' % fieldname
if ((filetype in headers) and (fieldname in headers[filetype])):
valN, qryType, index = headers[filetype][fieldname]
indexRes = index.res
qryValue = qry[filetype][fieldname]
if qryType == 'selectmultiple':
#TODO: check if variables are from the correct type
# and if the keys exists
lLists = []
for qrySelected in qryValue:
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
if indexKey == qrySelected:
qryRes = indexVal
lLists.append([filetype, fieldname, qrySelected, set(qryRes)])
break
lists.extend(lLists)
elif qryType == 'rangeminmax' or qryType == 'rangeminmaxlog':
minVal, maxVal = qryValue.split(" - ")
minVal = int(minVal)
maxVal = int(maxVal)
lLists = []
#print "MINMAX %d - %d" % (minVal, maxVal)
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
#print " VAL %d" % indexKey
if indexKey >= minVal:
#print " KEY %d <= VAL %d" % (indexKey, minVal)
if indexKey <= maxVal:
#print " KEY %d >= VAL %d" % (indexKey, maxVal)
qryRes = indexVal
lLists.extend(qryRes)
else:
break
lists.append([filetype, fieldname, qryValue, set(lLists)])
elif qryType == 'rangemin' or qryType == 'rangeminlog':
minVal = qryValue
minVal = int(minVal)
lLists = []
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
if indexKey >= minVal:
qryRes = indexVal
lLists.extend(qryRes)
lists.append([filetype, fieldname, qryValue, set(lLists)])
elif qryType == 'input':
lLists = []
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
if indexKey.find(qryValue) != -1:
qryRes = indexVal
lLists.extend(qryRes)
lists.append([filetype, fieldname, qryValue, set(lLists)])
else:
print " !! no such qry field type: %s !!" % qryType
else:
print " !! no such field %s !!" % fieldname
print headers[filetype].keys()
resIds = None
llist = [x[3] for x in lists]
resIds = set.intersection(*llist)
#for llist in lists:
# lFileType = llist[0]
# lFieldName = llist[1]
# lQryValue = llist[2]
# lResIds = llist[3]
#
# if len(lResIds) == 0:
# print " file type %s field %s qry %s yield no result" % (lFileType, lFieldName, lQryValue)
# resIds = set()
# break
#
# if resIds is None:
# print " file type %s field %s qry %s len %d is first result" % (lFileType, lFieldName, lQryValue, len(lResIds))
# resIds = set(lResIds)
# else:
# print " file type %s field %s qry %s len %d . intersecting (%d)" % (lFileType, lFieldName, lQryValue, len(lResIds), len(resIds))
# resIds = set(resIds).intersection(set(lResIds))
# print " file type %s field %s qry %s len %d . intersected (%d)" % (lFileType, lFieldName, lQryValue, len(lResIds), len(resIds))
#
# if len(resIds) == 0:
# print " colapsed to length 0"
# break
print " final list has %d entries" % (len(resIds))
for resId in resIds:
dbN[resId] = db[resId]
return dbN
else:
print "returning all"
return db
def getResultForPage(res, page, num_per_page, count):
resKeys = res.keys()
resKeys.sort(key=transcript.sortNode)
print " getting page %d" % page
begin = (page - 1) * num_per_page
end = (page * num_per_page)
lenRes = len(res)
if end > lenRes:
end = lenRes
if begin > end:
begin = end - num_per_page
print " len %d begin %d end %d" % (lenRes, begin, end)
resp = resKeys[begin: end]
outvals = {}
for k in resp:
outvals[k] = res[k]
return outvals
def formatQuery(qry):
res = None
for filetype in qry:
if res is not None:
res += " <strong>AND</strong> "
else:
res = ""
res += "<span class=\"label label-success\"><strong>%s</strong> :</span>" % filetype
for fieldname in qry[filetype]:
qryValue = qry[filetype][fieldname]
res += " <span class=\"label label-warning\">%s = '<em>%s</em>'</span>" % (fieldname, qryValue)
if res is None:
res = "<span class=\"label label-success\"><strong>All</strong></span>"
return res
#DATABASE
def init_db(dbfile, indexfile):
with app.app_context():
print "initializing db"
if not os.path.exists(dbfile):
print "NO DATABASE FILE %s" % dbfile
sys.exit(1)
if not os.path.exists(indexfile):
print "NO INDEX FILE %s" % indexfile
sys.exit(1)
global db
global headers
global queries
jsonpickle.set_preferred_backend('simplejson')
jsonpickle.set_encoder_options('simplejson', sort_keys=True, indent=1)
dataDb = open(dbfile, 'r').read()
dataIn = open(indexfile, 'r').read()
db, lHeadersPos, lKeys = jsonpickle.decode(dataDb)
headers = jsonpickle.decode(dataIn)
transcript.transcriptdata.headersPos, transcript.transcriptdata.keys = lHeadersPos, lKeys
if db is None:
print "no data in database"
sys.exit(1)
if len(db) == 0:
print "database is empty"
sys.exit(1)
if headers is None:
print "no data in index"
sys.exit(1)
if len(headers) == 0:
print "index is empty"
sys.exit(1)
print "db loaded. %d entries" % len(db)
def getDb():
if db is None:
init_db(joiner.dbfile, joiner.indexfile)
return [db, headers, queries]
if __name__ == '__main__':
if not os.path.exists(joiner.dbfile):
joiner.main()
if not os.path.exists(joiner.indexfile):
indexer.main()
app.run(port=PORT)
| before_request | identifier_name |
server.py | #!/usr/bin/python
import sys,os
import re
import pprint
import time
sys.path.append('lib')
import simplejson
import jsonpickle
import transcript
import joiner
import indexer
#from pdfs import create_pdf
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, make_response, jsonify
from contextlib import closing
if not os.path.exists(joiner.setupfile):
print "count not find setup file %s" % joiner.setupfile
sys.exit(1)
for k,v in jsonpickle.decode(open(joiner.setupfile, 'r').read())['server'].items():
globals()[k] = v
#CONSTANTS
#DEBUG = True
#PER_PAGE = 20
#MAX_QUERY_BUFFER = 100
SECRET_KEY = 'development key'
#VARIABLES
db = None
headers = None
queries = {}
app = Flask(__name__)
app.config.from_object(__name__)
app.jinja_env.globals['trim_blocks' ] = True
#APPLICATION CODE :: SETUP
@app.before_request
def before_request():
g.db, g.headers, g.queries = getDb()
h = g.headers.keys()
h.sort()
h.reverse()
i = {}
for k in h:
i[k] = g.headers[k].keys()
i[k].sort()
g.headersNames = h
g.colsNames = i
@app.after_request
def after_request(response):
return response
@app.teardown_request
def teardown_request(exception):
pass
#APPLICATION CODE :: ROUTE
@app.route('/', methods=['GET'])
def initial():
sessionId = time.time()
if session.get('id'):
sessionId = session['id']
print "getting stored ID %s" % (sessionId)
else:
session['id'] = sessionId
print "storing new ID %s" % (sessionId)
resTemp = render_template('display_normal.html')
return resTemp
@app.route('/query/full/pdf/<int:page>', methods=['POST', 'PUT'])
@app.route('/query/full/<int:page>', methods=['POST', ])
@app.route('/query/<int:page>', methods=['POST', 'PUT'])
def query(page):
sessionId = time.time()
if session.get('id'):
sessionId = session['id']
print "getting stored ID %s" % (sessionId)
else:
session['id'] = sessionId
print "storing new ID %s" % (sessionId)
qry = {}
qrystr = request.form.keys()[0]
print "query string %s" % qrystr
qry = jsonpickle.decode(qrystr)
print "qry structure", qry
res = queryBuffer(sessionId, qry)
count = len(res)
flash(formatQuery(qry))
if count > 0:
if request.method == 'POST':
maxPage = count/PER_PAGE
if float(count)/PER_PAGE > maxPage:
maxPage += 1
if page > maxPage:
page = maxPage
perc = int((float(page) / maxPage) * 100)
beginPos = (page - 1) * PER_PAGE
print " count %d page %d max page %d perc %d%% begin pos %d" % (count, page, maxPage, perc, beginPos)
resPage = res
templateFile = 'response.html'
full = False
if str(request.path).startswith("/query/full"):
print " template display full"
templateFile = 'display_full.html'
full = True
if page > 0:
resPage = getResultForPage(res, page, PER_PAGE, count)
resKeys = resPage.keys()
resKeys.sort(key=transcript.sortNode)
resTemp = render_template(templateFile, page=page, count=count, maxPage=maxPage, perc=perc, beginPos=beginPos, resPage=resPage, resKeys=resKeys, full=full);
if str(request.path).startswith("/query/full/pdf"):
print " template display full PDF"
pdf = create_pdf(resTemp)
return pdf
else:
return resTemp
elif request.method == 'PUT':
return make_response(jsonpickle.encode(res))
else:
if request.method == 'POST':
if str(request.path).startswith("/query/full"):
resTemp = make_response("No match for query")
return resTemp
else:
resTemp = make_response("No match for query")
return resTemp
elif request.method == 'PUT':
return make_response(jsonpickle.encode(res))
#APPLICATION CODE :: ACESSORY FUNCTIONS
def queryBuffer(sessionId, qry):
if sessionId not in g.queries:
print g.queries.keys()
g.queries[sessionId] = []
res = None
if len(g.queries[sessionId]) == 0:
res = None
print "no queries to session %s" % (str(sessionId))
else:
if qry in [x[0] for x in g.queries[sessionId]]:
print "session %s has qry in store (%d)" % (str(sessionId), len(g.queries[sessionId]))
for i in range(len(g.queries[sessionId])):
lqry, lres = g.queries[sessionId][i]
print "%d: %s vs %s" % (i, lqry, qry)
if lqry == qry:
res = lres
g.queries[sessionId].pop(i)
g.queries[sessionId].append([lqry, lres])
print " session %s has qry at position %d" % (str(sessionId), i)
break
else:
print "session %s does not have qry in store" % (str(sessionId))
res = None
if res is None:
print "querying"
res = getResult(g.db, g.headers, qry)
g.queries[sessionId].append([qry, res])
if len(g.queries[sessionId]) > MAX_QUERY_BUFFER:
g.queries.pop(0)
return res
def getResult(db, headers, qry):
print "length qry %d" % len(qry)
if len(qry) > 0:
print "filtering"
dbN = {}
lists = []
for filetype in qry:
print ' analyzing filetype %s' % filetype
for fieldname in qry[filetype]:
print ' analyzing field %s' % fieldname
if ((filetype in headers) and (fieldname in headers[filetype])):
valN, qryType, index = headers[filetype][fieldname]
indexRes = index.res
qryValue = qry[filetype][fieldname]
if qryType == 'selectmultiple':
#TODO: check if variables are from the correct type
# and if the keys exists
lLists = []
for qrySelected in qryValue:
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
if indexKey == qrySelected:
qryRes = indexVal
lLists.append([filetype, fieldname, qrySelected, set(qryRes)])
break
lists.extend(lLists)
elif qryType == 'rangeminmax' or qryType == 'rangeminmaxlog':
minVal, maxVal = qryValue.split(" - ")
minVal = int(minVal)
maxVal = int(maxVal)
lLists = []
#print "MINMAX %d - %d" % (minVal, maxVal)
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
#print " VAL %d" % indexKey
if indexKey >= minVal:
#print " KEY %d <= VAL %d" % (indexKey, minVal)
if indexKey <= maxVal:
#print " KEY %d >= VAL %d" % (indexKey, maxVal)
qryRes = indexVal
lLists.extend(qryRes)
else:
break
lists.append([filetype, fieldname, qryValue, set(lLists)])
elif qryType == 'rangemin' or qryType == 'rangeminlog':
minVal = qryValue
minVal = int(minVal)
lLists = []
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
if indexKey >= minVal:
qryRes = indexVal
lLists.extend(qryRes)
lists.append([filetype, fieldname, qryValue, set(lLists)])
elif qryType == 'input':
lLists = []
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
if indexKey.find(qryValue) != -1:
qryRes = indexVal
lLists.extend(qryRes)
lists.append([filetype, fieldname, qryValue, set(lLists)])
else:
print " !! no such qry field type: %s !!" % qryType
else:
print " !! no such field %s !!" % fieldname
print headers[filetype].keys()
resIds = None
llist = [x[3] for x in lists]
resIds = set.intersection(*llist)
#for llist in lists:
# lFileType = llist[0]
# lFieldName = llist[1]
# lQryValue = llist[2]
# lResIds = llist[3]
#
# if len(lResIds) == 0:
# print " file type %s field %s qry %s yield no result" % (lFileType, lFieldName, lQryValue)
# resIds = set()
# break
#
# if resIds is None:
# print " file type %s field %s qry %s len %d is first result" % (lFileType, lFieldName, lQryValue, len(lResIds))
# resIds = set(lResIds)
# else:
# print " file type %s field %s qry %s len %d . intersecting (%d)" % (lFileType, lFieldName, lQryValue, len(lResIds), len(resIds))
# resIds = set(resIds).intersection(set(lResIds))
# print " file type %s field %s qry %s len %d . intersected (%d)" % (lFileType, lFieldName, lQryValue, len(lResIds), len(resIds))
#
# if len(resIds) == 0:
# print " colapsed to length 0"
# break
print " final list has %d entries" % (len(resIds))
for resId in resIds:
dbN[resId] = db[resId]
return dbN
else:
print "returning all"
return db
def getResultForPage(res, page, num_per_page, count):
resKeys = res.keys()
resKeys.sort(key=transcript.sortNode)
print " getting page %d" % page
begin = (page - 1) * num_per_page
end = (page * num_per_page)
lenRes = len(res)
if end > lenRes:
end = lenRes
if begin > end:
begin = end - num_per_page
print " len %d begin %d end %d" % (lenRes, begin, end)
resp = resKeys[begin: end]
outvals = {}
for k in resp:
outvals[k] = res[k]
return outvals
def formatQuery(qry):
res = None
for filetype in qry:
if res is not None:
res += " <strong>AND</strong> "
else:
res = ""
res += "<span class=\"label label-success\"><strong>%s</strong> :</span>" % filetype
for fieldname in qry[filetype]:
qryValue = qry[filetype][fieldname]
res += " <span class=\"label label-warning\">%s = '<em>%s</em>'</span>" % (fieldname, qryValue)
if res is None:
res = "<span class=\"label label-success\"><strong>All</strong></span>"
return res
#DATABASE
def init_db(dbfile, indexfile):
with app.app_context():
print "initializing db"
if not os.path.exists(dbfile):
print "NO DATABASE FILE %s" % dbfile
sys.exit(1)
if not os.path.exists(indexfile):
print "NO INDEX FILE %s" % indexfile
sys.exit(1)
global db
global headers
global queries
jsonpickle.set_preferred_backend('simplejson')
jsonpickle.set_encoder_options('simplejson', sort_keys=True, indent=1)
dataDb = open(dbfile, 'r').read()
dataIn = open(indexfile, 'r').read()
db, lHeadersPos, lKeys = jsonpickle.decode(dataDb)
headers = jsonpickle.decode(dataIn)
transcript.transcriptdata.headersPos, transcript.transcriptdata.keys = lHeadersPos, lKeys
if db is None:
print "no data in database"
sys.exit(1)
if len(db) == 0:
print "database is empty"
sys.exit(1)
if headers is None:
print "no data in index"
sys.exit(1)
if len(headers) == 0:
print "index is empty"
sys.exit(1)
print "db loaded. %d entries" % len(db)
def getDb():
|
if __name__ == '__main__':
if not os.path.exists(joiner.dbfile):
joiner.main()
if not os.path.exists(joiner.indexfile):
indexer.main()
app.run(port=PORT)
| if db is None:
init_db(joiner.dbfile, joiner.indexfile)
return [db, headers, queries] | identifier_body |
server.py | #!/usr/bin/python
import sys,os
import re
import pprint
import time
sys.path.append('lib')
import simplejson
import jsonpickle
import transcript
import joiner
import indexer
#from pdfs import create_pdf
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, make_response, jsonify
from contextlib import closing
if not os.path.exists(joiner.setupfile):
print "count not find setup file %s" % joiner.setupfile
sys.exit(1)
for k,v in jsonpickle.decode(open(joiner.setupfile, 'r').read())['server'].items():
globals()[k] = v
#CONSTANTS
#DEBUG = True
#PER_PAGE = 20
#MAX_QUERY_BUFFER = 100
SECRET_KEY = 'development key'
#VARIABLES
db = None
headers = None
queries = {}
app = Flask(__name__)
app.config.from_object(__name__)
app.jinja_env.globals['trim_blocks' ] = True
#APPLICATION CODE :: SETUP
@app.before_request
def before_request():
g.db, g.headers, g.queries = getDb()
h = g.headers.keys()
h.sort()
h.reverse()
i = {}
for k in h:
i[k] = g.headers[k].keys()
i[k].sort()
g.headersNames = h
g.colsNames = i
@app.after_request
def after_request(response):
return response
@app.teardown_request
def teardown_request(exception):
pass
#APPLICATION CODE :: ROUTE
@app.route('/', methods=['GET'])
def initial():
sessionId = time.time()
if session.get('id'):
sessionId = session['id']
print "getting stored ID %s" % (sessionId)
else:
session['id'] = sessionId
print "storing new ID %s" % (sessionId)
resTemp = render_template('display_normal.html')
return resTemp
@app.route('/query/full/pdf/<int:page>', methods=['POST', 'PUT'])
@app.route('/query/full/<int:page>', methods=['POST', ])
@app.route('/query/<int:page>', methods=['POST', 'PUT'])
def query(page):
sessionId = time.time()
if session.get('id'):
sessionId = session['id']
print "getting stored ID %s" % (sessionId)
else:
session['id'] = sessionId
print "storing new ID %s" % (sessionId)
qry = {}
qrystr = request.form.keys()[0]
print "query string %s" % qrystr
qry = jsonpickle.decode(qrystr)
print "qry structure", qry
res = queryBuffer(sessionId, qry)
count = len(res)
flash(formatQuery(qry))
if count > 0:
if request.method == 'POST':
maxPage = count/PER_PAGE
if float(count)/PER_PAGE > maxPage:
|
if page > maxPage:
page = maxPage
perc = int((float(page) / maxPage) * 100)
beginPos = (page - 1) * PER_PAGE
print " count %d page %d max page %d perc %d%% begin pos %d" % (count, page, maxPage, perc, beginPos)
resPage = res
templateFile = 'response.html'
full = False
if str(request.path).startswith("/query/full"):
print " template display full"
templateFile = 'display_full.html'
full = True
if page > 0:
resPage = getResultForPage(res, page, PER_PAGE, count)
resKeys = resPage.keys()
resKeys.sort(key=transcript.sortNode)
resTemp = render_template(templateFile, page=page, count=count, maxPage=maxPage, perc=perc, beginPos=beginPos, resPage=resPage, resKeys=resKeys, full=full);
if str(request.path).startswith("/query/full/pdf"):
print " template display full PDF"
pdf = create_pdf(resTemp)
return pdf
else:
return resTemp
elif request.method == 'PUT':
return make_response(jsonpickle.encode(res))
else:
if request.method == 'POST':
if str(request.path).startswith("/query/full"):
resTemp = make_response("No match for query")
return resTemp
else:
resTemp = make_response("No match for query")
return resTemp
elif request.method == 'PUT':
return make_response(jsonpickle.encode(res))
#APPLICATION CODE :: ACESSORY FUNCTIONS
def queryBuffer(sessionId, qry):
if sessionId not in g.queries:
print g.queries.keys()
g.queries[sessionId] = []
res = None
if len(g.queries[sessionId]) == 0:
res = None
print "no queries to session %s" % (str(sessionId))
else:
if qry in [x[0] for x in g.queries[sessionId]]:
print "session %s has qry in store (%d)" % (str(sessionId), len(g.queries[sessionId]))
for i in range(len(g.queries[sessionId])):
lqry, lres = g.queries[sessionId][i]
print "%d: %s vs %s" % (i, lqry, qry)
if lqry == qry:
res = lres
g.queries[sessionId].pop(i)
g.queries[sessionId].append([lqry, lres])
print " session %s has qry at position %d" % (str(sessionId), i)
break
else:
print "session %s does not have qry in store" % (str(sessionId))
res = None
if res is None:
print "querying"
res = getResult(g.db, g.headers, qry)
g.queries[sessionId].append([qry, res])
if len(g.queries[sessionId]) > MAX_QUERY_BUFFER:
g.queries.pop(0)
return res
def getResult(db, headers, qry):
print "length qry %d" % len(qry)
if len(qry) > 0:
print "filtering"
dbN = {}
lists = []
for filetype in qry:
print ' analyzing filetype %s' % filetype
for fieldname in qry[filetype]:
print ' analyzing field %s' % fieldname
if ((filetype in headers) and (fieldname in headers[filetype])):
valN, qryType, index = headers[filetype][fieldname]
indexRes = index.res
qryValue = qry[filetype][fieldname]
if qryType == 'selectmultiple':
#TODO: check if variables are from the correct type
# and if the keys exists
lLists = []
for qrySelected in qryValue:
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
if indexKey == qrySelected:
qryRes = indexVal
lLists.append([filetype, fieldname, qrySelected, set(qryRes)])
break
lists.extend(lLists)
elif qryType == 'rangeminmax' or qryType == 'rangeminmaxlog':
minVal, maxVal = qryValue.split(" - ")
minVal = int(minVal)
maxVal = int(maxVal)
lLists = []
#print "MINMAX %d - %d" % (minVal, maxVal)
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
#print " VAL %d" % indexKey
if indexKey >= minVal:
#print " KEY %d <= VAL %d" % (indexKey, minVal)
if indexKey <= maxVal:
#print " KEY %d >= VAL %d" % (indexKey, maxVal)
qryRes = indexVal
lLists.extend(qryRes)
else:
break
lists.append([filetype, fieldname, qryValue, set(lLists)])
elif qryType == 'rangemin' or qryType == 'rangeminlog':
minVal = qryValue
minVal = int(minVal)
lLists = []
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
if indexKey >= minVal:
qryRes = indexVal
lLists.extend(qryRes)
lists.append([filetype, fieldname, qryValue, set(lLists)])
elif qryType == 'input':
lLists = []
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
if indexKey.find(qryValue) != -1:
qryRes = indexVal
lLists.extend(qryRes)
lists.append([filetype, fieldname, qryValue, set(lLists)])
else:
print " !! no such qry field type: %s !!" % qryType
else:
print " !! no such field %s !!" % fieldname
print headers[filetype].keys()
resIds = None
llist = [x[3] for x in lists]
resIds = set.intersection(*llist)
#for llist in lists:
# lFileType = llist[0]
# lFieldName = llist[1]
# lQryValue = llist[2]
# lResIds = llist[3]
#
# if len(lResIds) == 0:
# print " file type %s field %s qry %s yield no result" % (lFileType, lFieldName, lQryValue)
# resIds = set()
# break
#
# if resIds is None:
# print " file type %s field %s qry %s len %d is first result" % (lFileType, lFieldName, lQryValue, len(lResIds))
# resIds = set(lResIds)
# else:
# print " file type %s field %s qry %s len %d . intersecting (%d)" % (lFileType, lFieldName, lQryValue, len(lResIds), len(resIds))
# resIds = set(resIds).intersection(set(lResIds))
# print " file type %s field %s qry %s len %d . intersected (%d)" % (lFileType, lFieldName, lQryValue, len(lResIds), len(resIds))
#
# if len(resIds) == 0:
# print " colapsed to length 0"
# break
print " final list has %d entries" % (len(resIds))
for resId in resIds:
dbN[resId] = db[resId]
return dbN
else:
print "returning all"
return db
def getResultForPage(res, page, num_per_page, count):
resKeys = res.keys()
resKeys.sort(key=transcript.sortNode)
print " getting page %d" % page
begin = (page - 1) * num_per_page
end = (page * num_per_page)
lenRes = len(res)
if end > lenRes:
end = lenRes
if begin > end:
begin = end - num_per_page
print " len %d begin %d end %d" % (lenRes, begin, end)
resp = resKeys[begin: end]
outvals = {}
for k in resp:
outvals[k] = res[k]
return outvals
def formatQuery(qry):
res = None
for filetype in qry:
if res is not None:
res += " <strong>AND</strong> "
else:
res = ""
res += "<span class=\"label label-success\"><strong>%s</strong> :</span>" % filetype
for fieldname in qry[filetype]:
qryValue = qry[filetype][fieldname]
res += " <span class=\"label label-warning\">%s = '<em>%s</em>'</span>" % (fieldname, qryValue)
if res is None:
res = "<span class=\"label label-success\"><strong>All</strong></span>"
return res
#DATABASE
def init_db(dbfile, indexfile):
with app.app_context():
print "initializing db"
if not os.path.exists(dbfile):
print "NO DATABASE FILE %s" % dbfile
sys.exit(1)
if not os.path.exists(indexfile):
print "NO INDEX FILE %s" % indexfile
sys.exit(1)
global db
global headers
global queries
jsonpickle.set_preferred_backend('simplejson')
jsonpickle.set_encoder_options('simplejson', sort_keys=True, indent=1)
dataDb = open(dbfile, 'r').read()
dataIn = open(indexfile, 'r').read()
db, lHeadersPos, lKeys = jsonpickle.decode(dataDb)
headers = jsonpickle.decode(dataIn)
transcript.transcriptdata.headersPos, transcript.transcriptdata.keys = lHeadersPos, lKeys
if db is None:
print "no data in database"
sys.exit(1)
if len(db) == 0:
print "database is empty"
sys.exit(1)
if headers is None:
print "no data in index"
sys.exit(1)
if len(headers) == 0:
print "index is empty"
sys.exit(1)
print "db loaded. %d entries" % len(db)
def getDb():
if db is None:
init_db(joiner.dbfile, joiner.indexfile)
return [db, headers, queries]
if __name__ == '__main__':
if not os.path.exists(joiner.dbfile):
joiner.main()
if not os.path.exists(joiner.indexfile):
indexer.main()
app.run(port=PORT)
| maxPage += 1 | conditional_block |
server.py | #!/usr/bin/python
import sys,os
import re
import pprint
import time
sys.path.append('lib')
import simplejson
import jsonpickle
import transcript
import joiner
import indexer
#from pdfs import create_pdf
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, make_response, jsonify
from contextlib import closing
if not os.path.exists(joiner.setupfile):
print "count not find setup file %s" % joiner.setupfile
sys.exit(1)
for k,v in jsonpickle.decode(open(joiner.setupfile, 'r').read())['server'].items():
globals()[k] = v
#CONSTANTS
#DEBUG = True
#PER_PAGE = 20
#MAX_QUERY_BUFFER = 100
SECRET_KEY = 'development key'
#VARIABLES
db = None
headers = None
queries = {}
app = Flask(__name__)
app.config.from_object(__name__)
app.jinja_env.globals['trim_blocks' ] = True
#APPLICATION CODE :: SETUP
@app.before_request
def before_request():
g.db, g.headers, g.queries = getDb()
h = g.headers.keys()
h.sort()
h.reverse()
i = {}
for k in h:
i[k] = g.headers[k].keys()
i[k].sort()
g.headersNames = h
g.colsNames = i
@app.after_request
def after_request(response):
return response
@app.teardown_request
def teardown_request(exception):
pass
#APPLICATION CODE :: ROUTE
@app.route('/', methods=['GET'])
def initial():
sessionId = time.time()
if session.get('id'):
sessionId = session['id']
print "getting stored ID %s" % (sessionId)
else:
session['id'] = sessionId
print "storing new ID %s" % (sessionId)
resTemp = render_template('display_normal.html')
return resTemp
@app.route('/query/full/pdf/<int:page>', methods=['POST', 'PUT'])
@app.route('/query/full/<int:page>', methods=['POST', ])
@app.route('/query/<int:page>', methods=['POST', 'PUT'])
def query(page):
sessionId = time.time()
if session.get('id'):
sessionId = session['id']
print "getting stored ID %s" % (sessionId)
else:
session['id'] = sessionId
print "storing new ID %s" % (sessionId)
qry = {}
qrystr = request.form.keys()[0]
print "query string %s" % qrystr
qry = jsonpickle.decode(qrystr)
print "qry structure", qry
res = queryBuffer(sessionId, qry)
count = len(res)
flash(formatQuery(qry))
if count > 0:
if request.method == 'POST':
maxPage = count/PER_PAGE
if float(count)/PER_PAGE > maxPage:
maxPage += 1
if page > maxPage:
page = maxPage
perc = int((float(page) / maxPage) * 100)
beginPos = (page - 1) * PER_PAGE
print " count %d page %d max page %d perc %d%% begin pos %d" % (count, page, maxPage, perc, beginPos)
resPage = res
templateFile = 'response.html'
full = False
if str(request.path).startswith("/query/full"):
print " template display full"
templateFile = 'display_full.html'
full = True
if page > 0:
resPage = getResultForPage(res, page, PER_PAGE, count)
resKeys = resPage.keys()
resKeys.sort(key=transcript.sortNode)
resTemp = render_template(templateFile, page=page, count=count, maxPage=maxPage, perc=perc, beginPos=beginPos, resPage=resPage, resKeys=resKeys, full=full);
if str(request.path).startswith("/query/full/pdf"):
print " template display full PDF"
pdf = create_pdf(resTemp)
return pdf
else:
return resTemp
elif request.method == 'PUT':
return make_response(jsonpickle.encode(res))
else:
if request.method == 'POST':
if str(request.path).startswith("/query/full"):
resTemp = make_response("No match for query")
return resTemp
else:
resTemp = make_response("No match for query")
return resTemp
elif request.method == 'PUT':
return make_response(jsonpickle.encode(res))
| g.queries[sessionId] = []
res = None
if len(g.queries[sessionId]) == 0:
res = None
print "no queries to session %s" % (str(sessionId))
else:
if qry in [x[0] for x in g.queries[sessionId]]:
print "session %s has qry in store (%d)" % (str(sessionId), len(g.queries[sessionId]))
for i in range(len(g.queries[sessionId])):
lqry, lres = g.queries[sessionId][i]
print "%d: %s vs %s" % (i, lqry, qry)
if lqry == qry:
res = lres
g.queries[sessionId].pop(i)
g.queries[sessionId].append([lqry, lres])
print " session %s has qry at position %d" % (str(sessionId), i)
break
else:
print "session %s does not have qry in store" % (str(sessionId))
res = None
if res is None:
print "querying"
res = getResult(g.db, g.headers, qry)
g.queries[sessionId].append([qry, res])
if len(g.queries[sessionId]) > MAX_QUERY_BUFFER:
g.queries.pop(0)
return res
def getResult(db, headers, qry):
print "length qry %d" % len(qry)
if len(qry) > 0:
print "filtering"
dbN = {}
lists = []
for filetype in qry:
print ' analyzing filetype %s' % filetype
for fieldname in qry[filetype]:
print ' analyzing field %s' % fieldname
if ((filetype in headers) and (fieldname in headers[filetype])):
valN, qryType, index = headers[filetype][fieldname]
indexRes = index.res
qryValue = qry[filetype][fieldname]
if qryType == 'selectmultiple':
#TODO: check if variables are from the correct type
# and if the keys exists
lLists = []
for qrySelected in qryValue:
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
if indexKey == qrySelected:
qryRes = indexVal
lLists.append([filetype, fieldname, qrySelected, set(qryRes)])
break
lists.extend(lLists)
elif qryType == 'rangeminmax' or qryType == 'rangeminmaxlog':
minVal, maxVal = qryValue.split(" - ")
minVal = int(minVal)
maxVal = int(maxVal)
lLists = []
#print "MINMAX %d - %d" % (minVal, maxVal)
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
#print " VAL %d" % indexKey
if indexKey >= minVal:
#print " KEY %d <= VAL %d" % (indexKey, minVal)
if indexKey <= maxVal:
#print " KEY %d >= VAL %d" % (indexKey, maxVal)
qryRes = indexVal
lLists.extend(qryRes)
else:
break
lists.append([filetype, fieldname, qryValue, set(lLists)])
elif qryType == 'rangemin' or qryType == 'rangeminlog':
minVal = qryValue
minVal = int(minVal)
lLists = []
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
if indexKey >= minVal:
qryRes = indexVal
lLists.extend(qryRes)
lists.append([filetype, fieldname, qryValue, set(lLists)])
elif qryType == 'input':
lLists = []
for i in range(len(indexRes)):
indexKey = indexRes[i][0]
indexVal = indexRes[i][1]
if indexKey.find(qryValue) != -1:
qryRes = indexVal
lLists.extend(qryRes)
lists.append([filetype, fieldname, qryValue, set(lLists)])
else:
print " !! no such qry field type: %s !!" % qryType
else:
print " !! no such field %s !!" % fieldname
print headers[filetype].keys()
resIds = None
llist = [x[3] for x in lists]
resIds = set.intersection(*llist)
#for llist in lists:
# lFileType = llist[0]
# lFieldName = llist[1]
# lQryValue = llist[2]
# lResIds = llist[3]
#
# if len(lResIds) == 0:
# print " file type %s field %s qry %s yield no result" % (lFileType, lFieldName, lQryValue)
# resIds = set()
# break
#
# if resIds is None:
# print " file type %s field %s qry %s len %d is first result" % (lFileType, lFieldName, lQryValue, len(lResIds))
# resIds = set(lResIds)
# else:
# print " file type %s field %s qry %s len %d . intersecting (%d)" % (lFileType, lFieldName, lQryValue, len(lResIds), len(resIds))
# resIds = set(resIds).intersection(set(lResIds))
# print " file type %s field %s qry %s len %d . intersected (%d)" % (lFileType, lFieldName, lQryValue, len(lResIds), len(resIds))
#
# if len(resIds) == 0:
# print " colapsed to length 0"
# break
print " final list has %d entries" % (len(resIds))
for resId in resIds:
dbN[resId] = db[resId]
return dbN
else:
print "returning all"
return db
def getResultForPage(res, page, num_per_page, count):
resKeys = res.keys()
resKeys.sort(key=transcript.sortNode)
print " getting page %d" % page
begin = (page - 1) * num_per_page
end = (page * num_per_page)
lenRes = len(res)
if end > lenRes:
end = lenRes
if begin > end:
begin = end - num_per_page
print " len %d begin %d end %d" % (lenRes, begin, end)
resp = resKeys[begin: end]
outvals = {}
for k in resp:
outvals[k] = res[k]
return outvals
def formatQuery(qry):
res = None
for filetype in qry:
if res is not None:
res += " <strong>AND</strong> "
else:
res = ""
res += "<span class=\"label label-success\"><strong>%s</strong> :</span>" % filetype
for fieldname in qry[filetype]:
qryValue = qry[filetype][fieldname]
res += " <span class=\"label label-warning\">%s = '<em>%s</em>'</span>" % (fieldname, qryValue)
if res is None:
res = "<span class=\"label label-success\"><strong>All</strong></span>"
return res
#DATABASE
def init_db(dbfile, indexfile):
with app.app_context():
print "initializing db"
if not os.path.exists(dbfile):
print "NO DATABASE FILE %s" % dbfile
sys.exit(1)
if not os.path.exists(indexfile):
print "NO INDEX FILE %s" % indexfile
sys.exit(1)
global db
global headers
global queries
jsonpickle.set_preferred_backend('simplejson')
jsonpickle.set_encoder_options('simplejson', sort_keys=True, indent=1)
dataDb = open(dbfile, 'r').read()
dataIn = open(indexfile, 'r').read()
db, lHeadersPos, lKeys = jsonpickle.decode(dataDb)
headers = jsonpickle.decode(dataIn)
transcript.transcriptdata.headersPos, transcript.transcriptdata.keys = lHeadersPos, lKeys
if db is None:
print "no data in database"
sys.exit(1)
if len(db) == 0:
print "database is empty"
sys.exit(1)
if headers is None:
print "no data in index"
sys.exit(1)
if len(headers) == 0:
print "index is empty"
sys.exit(1)
print "db loaded. %d entries" % len(db)
def getDb():
if db is None:
init_db(joiner.dbfile, joiner.indexfile)
return [db, headers, queries]
if __name__ == '__main__':
if not os.path.exists(joiner.dbfile):
joiner.main()
if not os.path.exists(joiner.indexfile):
indexer.main()
app.run(port=PORT) |
#APPLICATION CODE :: ACESSORY FUNCTIONS
def queryBuffer(sessionId, qry):
if sessionId not in g.queries:
print g.queries.keys() | random_line_split |
metar.py | '''
X-plane NOAA GFS weather plugin.
Copyright (C) 2012-2015 Joan Perez i Cauhe
---
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or any later version.
'''
import re
import os
import sqlite3
import math
import sys
import shutil
from datetime import datetime, timedelta
import time
from util import util
from c import c
from asyncdownload import AsyncDownload
class Metar:
'''
Metar download and interpretation class
'''
# Metar parse regex
RE_CLOUD = re.compile(r'\b(FEW|BKN|SCT|OVC|VV)([0-9]+)([A-Z][A-Z][A-Z]?)?\b')
RE_WIND = re.compile(r'\b(VRB|[0-9]{3})([0-9]{2,3})(G[0-9]{2,3})?(MPH|KT?|MPS|KMH)\b')
RE_VARIABLE_WIND = re.compile(r'\b([0-9]{3})V([0-9]{3})\b')
RE_VISIBILITY = re.compile(r'\b(CAVOK|[PM]?([0-9]{4})|([0-9] )?([0-9]{1,2})(/[0-9])?(SM|KM))\b')
RE_PRESSURE = re.compile(r'\b(Q|QNH|SLP|A)[ ]?([0-9]{3,4})\b')
RE_TEMPERATURE = re.compile(r'\b(M|-)?([0-9]{1,2})/(M|-)?([0-9]{1,2})\b')
RE_TEMPERATURE2 = re.compile(r'\bT(0|1)([0-9]{3})(0|1)([0-9]{3})\b')
RE_PRECIPITATION = re.compile('(-|\+)?(RE)?(DZ|SG|IC|PL|SH)?(DZ|RA|SN|TS)(NO|E)?')
METAR_STATIONS_URL = 'https://www.aviationweather.gov/docs/metar/stations.txt'
NOAA_METAR_URL = 'https://aviationweather.gov/adds/dataserver_current/current/metars.cache.csv.gz'
VATSIM_METAR_URL = 'https://metar.vatsim.net/metar.php?id=all'
IVAO_METAR_URL = 'https://wx.ivao.aero/metar.php'
STATION_UPDATE_RATE = 30 # In days
def __init__(self, conf):
self.conf = conf
self.cachepath = os.sep.join([conf.cachepath, 'metar'])
if not os.path.exists(self.cachepath):
os.makedirs(self.cachepath)
self.database = os.sep.join([self.cachepath, 'metar.db'])
self.th_db = False
# Weather variables
self.weather = None
self.reparse = True
# Download flags
self.ms_download = False
self.downloading = False
self.next_metarRWX = time.time() + 30
# Main db connection, create db if doens't exist
createdb = True
if os.path.exists(self.database):
createdb = False
self.connection = self.dbConnect(self.database)
self.cursor = self.connection.cursor()
if createdb:
self.conf.ms_update = 0
self.dbCreate(self.connection)
# Metar stations update
if (time.time() - self.conf.ms_update) > self.STATION_UPDATE_RATE * 86400:
self.ms_download = AsyncDownload(self.conf, self.METAR_STATIONS_URL, os.sep.join(['metar', 'stations.txt']))
self.last_latlon, self.last_station, self.last_timestamp = [False]*3
def dbConnect(self, path):
return sqlite3.connect(path, check_same_thread=False)
def dbCreate(self, db):
cursor = db.cursor()
cursor.execute('''CREATE TABLE airports (icao text KEY UNIQUE, lat real, lon real, elevation int,
timestamp int KEY, metar text)''')
db.commit()
def updateStations(self, db, path):
''' Updates aiports db from metar stations file'''
self.conf.ms_update = time.time()
cursor = db.cursor()
f = open(path, 'r')
n = 0
for line in f.readlines():
if line[0] != '!' and len(line) > 80:
icao = line[20:24]
lat = float(line[39:41]) + round(float(line[42:44])/60, 4)
if line[44] == 'S':
lat *= -1
lon = float(line[47:50]) + round(float(line[51:53])/60, 4)
if line[53] == 'W':
lon *= -1
elevation = int(line[55:59])
if line[20] != ' ' and line[51] != '9':
cursor.execute('INSERT OR REPLACE INTO airports (icao, lat, lon, elevation, timestamp) VALUES (?,?,?,?,0)',
(icao.strip('"'), lat, lon, elevation))
n += 1
f.close()
db.commit()
return n
def updateMetar(self, db, path):
''' Updates metar table from Metar file'''
f = open(path, 'r')
nupdated = 0
nparsed = 0
timestamp = 0
cursor = db.cursor()
i = 0
inserts = []
INSBUF = cursor.arraysize
today_prefix = datetime.utcnow().strftime('%Y%m')
yesterday_prefix = (datetime.utcnow() + timedelta(days=-1)).strftime('%Y%m')
today = datetime.utcnow().strftime('%d')
for line in f.readlines():
if line[0].isalpha() and len(line) > 11 and line[11] == 'Z':
i += 1
icao, mtime, metar = line[0:4], line[5:11] , re.sub(r'[^\x00-\x7F]+',' ', line[5:-1])
metar = metar.split(',')[0]
if mtime[-1] == 'Z':
mtime = '0' + mtime[:-1]
if not mtime.isdigit():
mtime = '000000'
# Prepend year and month to the timestamp
if mtime[:2] == today:
timestamp = today_prefix + mtime
else:
timestamp = yesterday_prefix + mtime
inserts.append((timestamp, metar, icao, timestamp))
nparsed += 1
timestamp = 0
if (i % INSBUF) == 0:
cursor.executemany('UPDATE airports SET timestamp = ?, metar = ? WHERE icao = ? AND timestamp < ?', inserts)
inserts = []
nupdated += cursor.rowcount
if len(inserts):
cursor.executemany('UPDATE airports SET timestamp = ?, metar = ? WHERE icao = ? AND timestamp < ?', inserts)
nupdated += cursor.rowcount
db.commit()
f.close()
if not self.conf.keepOldFiles:
util.remove(path)
return nupdated, nparsed
def clearMetarReports(self, db):
'''Clears all metar reports from the db'''
cursor = db.cursor()
cursor.execute('UPDATE airports SET metar = NULL, timestamp = 0')
db.commit()
def getClosestStation(self, db, lat, lon, limit = 1):
''' Return closest airport with a metar report'''
cursor = db.cursor()
fudge = math.pow(math.cos(math.radians(lat)),2)
if self.conf.ignore_metar_stations:
q = '''SELECT * FROM airports
WHERE metar NOT NULL AND icao NOT in (%s)
ORDER BY ((? - lat) * (? - lat) + (? - lon) * (? - lon) * ?)
LIMIT ?''' % (','.join(['?'] * len(self.conf.ignore_metar_stations)))
res = cursor.execute(q , tuple(self.conf.ignore_metar_stations) + (lat, lat, lon, lon, fudge, limit))
else:
res = cursor.execute('''SELECT * FROM airports
WHERE metar NOT NULL
ORDER BY ((? - lat) * (? - lat) + (? - lon) * (? - lon) * ?)
LIMIT ?''', (lat, lat, lon, lon, fudge, limit))
ret = res.fetchall()
if limit == 1 and len(ret) > 0:
return ret[0]
return ret
def getMetar(self, db, icao):
''' Get metar from icao name '''
cursor = db.cursor()
res = cursor.execute('''SELECT * FROM airports
WHERE icao = ? AND metar NOT NULL LIMIT 1''', (icao.upper(), ))
ret = res.fetchall()
if len(ret) > 0:
return ret[0]
return ret
def getCycle(self):
now = datetime.utcnow()
# Cycle is updated until the houre has arrived (ex: 01 cycle updates until 1am)
cnow = now + timedelta(hours=0, minutes=5)
timestamp = int(time.time())
return ('%02d' % cnow.hour, timestamp)
def parseMetar(self, icao, metar, airport_msl = 0):
''' Parse metar'''
weather = {
'icao': icao,
'metar': metar,
'elevation': airport_msl,
'wind': [0, 0, 0], # Heading, speed, shear
'variable_wind': False,
'clouds': [0, 0, False] * 3, # Alt, coverage type
'temperature': [False, False], # Temperature, dewpoint
'pressure': False, # space c.pa2inhg(10.1325),
'visibility': 9998,
'precipitation': {},
}
metar = metar.split('TEMPO')[0]
clouds = []
for cloud in self.RE_CLOUD.findall(metar):
coverage, alt, type = cloud
alt = float(alt) * 30.48 + airport_msl
clouds.append([alt, coverage, type])
weather['clouds'] = clouds
m = self.RE_PRESSURE.search(metar)
if m:
unit, press = m.groups()
press = float(press)
if unit:
if unit == 'A':
press = press/100
elif unit == 'SLP':
if press > 500:
press = c.pa2inhg((press / 10 + 900) * 100)
else:
press = c.pa2inhg((press / 10 + 1000) * 100)
elif unit == 'Q':
press = c.pa2inhg(press * 100)
if 25 < press < 35:
weather['pressure'] = press
m = self.RE_TEMPERATURE2.search(metar)
if m:
tp, temp, dp, dew = m.groups()
temp = float(temp) * 0.1
dew = float(dew) * 0.1
if tp == '1': temp *= -1
if dp == '1': dew *= -1
weather['temperature'] = [temp, dew]
else:
m = self.RE_TEMPERATURE.search(metar)
if m:
temps, temp, dews, dew = m.groups()
temp = int(temp)
dew = int(dew)
if dews: dew *= -1
if temps: temp *= -1
weather['temperature'] = [temp, dew]
metar = metar.split('RMK')[0]
m = self.RE_VISIBILITY.search(metar)
if m:
if m.group(0) == 'CAVOK' or (m.group(0)[0] == 'P' and int(m.group(2)) > 7999):
visibility = 9999
else:
visibility = 0
vis0, vis1, vis2, vis3, div, unit = m.groups()
if vis1: visibility += int(vis1)
if vis2: visibility += int(vis2)
if vis3:
vis3 = int(vis3)
if div:
vis3 /= float(div[1:])
visibility += vis3
if unit == 'SM': visibility *= 1609.34
if unit == 'KM': visibility *= 1000
weather['visibility'] = visibility
m = self.RE_WIND.search(metar)
if m:
heading, speed, gust, unit = m.groups()
if heading == 'VRB':
heading = 0
weather['variable_wind'] = [0, 360]
else:
heading = int(heading)
speed = int(speed)
if not gust:
gust = 0
else:
gust = int(gust[1:]) - speed
if unit in ('MPS', 'MPH'):
speed = c.ms2knots(speed)
gust = c.ms2knots(gust)
if unit == 'MPH':
speed /= 60
gust /= 60
if unit == 'KMH':
speed = c.m2kn(speed / 1000.0)
gust = c.m2kn(gust / 1000.0)
weather['wind'] = [heading, speed, gust]
m = self.RE_VARIABLE_WIND.search(metar)
if m:
h1, h2 = m.groups()
weather['variable_wind'] = [int(h1), int(h2)]
precipitation = {}
for precp in self.RE_PRECIPITATION.findall(metar):
intensity, recent, mod, kind, neg = precp
if neg == 'E':
recent = 'RE'
if neg != 'NO':
precipitation[kind] = {'int': intensity ,'mod': mod, 'recent': recent}
weather['precipitation'] = precipitation
# Extended visibility
if weather['visibility'] > 9998:
weather['mt_visibility'] = weather['visibility']
ext_vis = c.rh2visibility(c.dewpoint2rh(weather['temperature'][0], weather['temperature'][1]))
if ext_vis > weather['visibility']:
weather['visibility'] = int(ext_vis)
return weather
def | (self, lat, lon, rate):
# Worker thread requires it's own db connection and cursor
if not self.th_db:
self.th_db = self.dbConnect(self.database)
# Check for new metar dowloaded data
if self.downloading == True:
if not self.download.q.empty():
self.downloading = False
metarfile = self.download.q.get()
if metarfile:
print 'Parsing METAR download.'
updated, parsed = self.updateMetar(self.th_db, os.sep.join([self.conf.cachepath, metarfile]))
self.reparse = True
print "METAR updated/parsed: %d/%d" % (updated, parsed)
else:
pass
elif self.conf.download:
# Download new data if required
cycle, timestamp = self.getCycle()
if (timestamp - self.last_timestamp) > self.conf.metar_updaterate * 60:
self.last_timestamp = timestamp
self.downloadCycle(cycle, timestamp)
# Update stations table if required
if self.ms_download and not self.ms_download.q.empty():
print 'Updating metar stations.'
nstations = self.updateStations(self.th_db, os.sep.join([self.conf.cachepath, self.ms_download.q.get()]))
self.ms_download = False
print '%d metar stations updated.' % nstations
# Update METAR.rwx
if self.conf.updateMetarRWX and self.next_metarRWX < time.time():
if self.updateMetarRWX(self.th_db):
self.next_metarRWX = time.time() + 300
print 'Updated METAR.rwx file.'
else:
# Retry in 10 sec
self.next_metarRWX = time.time() + 10
def downloadCycle(self, cycle, timestamp):
self.downloading = True
cachepath = os.sep.join([self.conf.cachepath, 'metar'])
if not os.path.exists(cachepath):
os.makedirs(cachepath)
prefix = self.conf.metar_source
if self.conf.metar_source == 'NOAA':
url = self.NOAA_METAR_URL
elif self.conf.metar_source == 'VATSIM':
url = self.VATSIM_METAR_URL
elif self.conf.metar_source == 'IVAO':
url = self.IVAO_METAR_URL
cachefile = os.sep.join(['metar', '%s_%d_%sZ.txt' % (prefix, timestamp, cycle)])
self.download = AsyncDownload(self.conf, url, cachefile)
def updateMetarRWX(self, db):
# Updates metar RWX file.
cursor = db.cursor()
try:
f = open(os.sep.join([self.conf.syspath, 'METAR.rwx']), 'w')
except:
print "ERROR updating METAR.rwx file: %s %s" % (sys.exc_info()[0], sys.exc_info()[1])
return False
res = cursor.execute('SELECT icao, metar FROM airports WHERE metar NOT NULL')
while True:
rows = res.fetchmany()
if rows:
for row in rows:
f.write('%s %s\n' % (row[0], row[1]))
else:
break
f.close()
return True
def die(self):
self.connection.commit()
self.connection.close()
| run | identifier_name |
metar.py | '''
X-plane NOAA GFS weather plugin.
Copyright (C) 2012-2015 Joan Perez i Cauhe
---
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or any later version.
'''
import re
import os
import sqlite3
import math
import sys
import shutil
from datetime import datetime, timedelta
import time
from util import util
from c import c
from asyncdownload import AsyncDownload
class Metar:
'''
Metar download and interpretation class
'''
# Metar parse regex
RE_CLOUD = re.compile(r'\b(FEW|BKN|SCT|OVC|VV)([0-9]+)([A-Z][A-Z][A-Z]?)?\b')
RE_WIND = re.compile(r'\b(VRB|[0-9]{3})([0-9]{2,3})(G[0-9]{2,3})?(MPH|KT?|MPS|KMH)\b')
RE_VARIABLE_WIND = re.compile(r'\b([0-9]{3})V([0-9]{3})\b')
RE_VISIBILITY = re.compile(r'\b(CAVOK|[PM]?([0-9]{4})|([0-9] )?([0-9]{1,2})(/[0-9])?(SM|KM))\b')
RE_PRESSURE = re.compile(r'\b(Q|QNH|SLP|A)[ ]?([0-9]{3,4})\b')
RE_TEMPERATURE = re.compile(r'\b(M|-)?([0-9]{1,2})/(M|-)?([0-9]{1,2})\b')
RE_TEMPERATURE2 = re.compile(r'\bT(0|1)([0-9]{3})(0|1)([0-9]{3})\b')
RE_PRECIPITATION = re.compile('(-|\+)?(RE)?(DZ|SG|IC|PL|SH)?(DZ|RA|SN|TS)(NO|E)?')
METAR_STATIONS_URL = 'https://www.aviationweather.gov/docs/metar/stations.txt'
NOAA_METAR_URL = 'https://aviationweather.gov/adds/dataserver_current/current/metars.cache.csv.gz'
VATSIM_METAR_URL = 'https://metar.vatsim.net/metar.php?id=all'
IVAO_METAR_URL = 'https://wx.ivao.aero/metar.php'
STATION_UPDATE_RATE = 30 # In days
def __init__(self, conf):
self.conf = conf
self.cachepath = os.sep.join([conf.cachepath, 'metar'])
if not os.path.exists(self.cachepath):
os.makedirs(self.cachepath)
self.database = os.sep.join([self.cachepath, 'metar.db'])
self.th_db = False
# Weather variables
self.weather = None
self.reparse = True
# Download flags
self.ms_download = False
self.downloading = False
self.next_metarRWX = time.time() + 30
# Main db connection, create db if doens't exist
createdb = True
if os.path.exists(self.database):
createdb = False
self.connection = self.dbConnect(self.database)
self.cursor = self.connection.cursor()
if createdb:
self.conf.ms_update = 0
self.dbCreate(self.connection)
# Metar stations update
if (time.time() - self.conf.ms_update) > self.STATION_UPDATE_RATE * 86400:
self.ms_download = AsyncDownload(self.conf, self.METAR_STATIONS_URL, os.sep.join(['metar', 'stations.txt']))
self.last_latlon, self.last_station, self.last_timestamp = [False]*3
def dbConnect(self, path):
return sqlite3.connect(path, check_same_thread=False)
def dbCreate(self, db):
cursor = db.cursor()
cursor.execute('''CREATE TABLE airports (icao text KEY UNIQUE, lat real, lon real, elevation int,
timestamp int KEY, metar text)''')
db.commit()
def updateStations(self, db, path):
''' Updates aiports db from metar stations file'''
self.conf.ms_update = time.time()
cursor = db.cursor()
f = open(path, 'r')
n = 0
for line in f.readlines():
if line[0] != '!' and len(line) > 80:
icao = line[20:24]
lat = float(line[39:41]) + round(float(line[42:44])/60, 4)
if line[44] == 'S':
lat *= -1
lon = float(line[47:50]) + round(float(line[51:53])/60, 4)
if line[53] == 'W':
lon *= -1
elevation = int(line[55:59])
if line[20] != ' ' and line[51] != '9':
cursor.execute('INSERT OR REPLACE INTO airports (icao, lat, lon, elevation, timestamp) VALUES (?,?,?,?,0)',
(icao.strip('"'), lat, lon, elevation))
n += 1
f.close()
db.commit()
return n
def updateMetar(self, db, path):
''' Updates metar table from Metar file'''
f = open(path, 'r')
nupdated = 0
nparsed = 0
timestamp = 0
cursor = db.cursor()
i = 0
inserts = []
INSBUF = cursor.arraysize
today_prefix = datetime.utcnow().strftime('%Y%m')
yesterday_prefix = (datetime.utcnow() + timedelta(days=-1)).strftime('%Y%m')
today = datetime.utcnow().strftime('%d')
for line in f.readlines():
if line[0].isalpha() and len(line) > 11 and line[11] == 'Z':
i += 1
icao, mtime, metar = line[0:4], line[5:11] , re.sub(r'[^\x00-\x7F]+',' ', line[5:-1])
metar = metar.split(',')[0]
if mtime[-1] == 'Z':
mtime = '0' + mtime[:-1]
if not mtime.isdigit():
mtime = '000000'
# Prepend year and month to the timestamp
if mtime[:2] == today:
timestamp = today_prefix + mtime
else:
timestamp = yesterday_prefix + mtime
inserts.append((timestamp, metar, icao, timestamp))
nparsed += 1
timestamp = 0
if (i % INSBUF) == 0:
cursor.executemany('UPDATE airports SET timestamp = ?, metar = ? WHERE icao = ? AND timestamp < ?', inserts)
inserts = []
nupdated += cursor.rowcount
if len(inserts):
cursor.executemany('UPDATE airports SET timestamp = ?, metar = ? WHERE icao = ? AND timestamp < ?', inserts)
nupdated += cursor.rowcount
db.commit()
f.close()
if not self.conf.keepOldFiles:
util.remove(path)
return nupdated, nparsed
def clearMetarReports(self, db):
'''Clears all metar reports from the db'''
cursor = db.cursor()
cursor.execute('UPDATE airports SET metar = NULL, timestamp = 0')
db.commit()
def getClosestStation(self, db, lat, lon, limit = 1):
''' Return closest airport with a metar report'''
cursor = db.cursor()
fudge = math.pow(math.cos(math.radians(lat)),2)
if self.conf.ignore_metar_stations:
q = '''SELECT * FROM airports
WHERE metar NOT NULL AND icao NOT in (%s)
ORDER BY ((? - lat) * (? - lat) + (? - lon) * (? - lon) * ?)
LIMIT ?''' % (','.join(['?'] * len(self.conf.ignore_metar_stations)))
res = cursor.execute(q , tuple(self.conf.ignore_metar_stations) + (lat, lat, lon, lon, fudge, limit))
else:
res = cursor.execute('''SELECT * FROM airports
WHERE metar NOT NULL
ORDER BY ((? - lat) * (? - lat) + (? - lon) * (? - lon) * ?)
LIMIT ?''', (lat, lat, lon, lon, fudge, limit))
ret = res.fetchall()
if limit == 1 and len(ret) > 0:
return ret[0]
return ret
def getMetar(self, db, icao):
''' Get metar from icao name '''
cursor = db.cursor()
res = cursor.execute('''SELECT * FROM airports
WHERE icao = ? AND metar NOT NULL LIMIT 1''', (icao.upper(), ))
ret = res.fetchall()
if len(ret) > 0:
return ret[0]
return ret
def getCycle(self):
now = datetime.utcnow()
# Cycle is updated until the houre has arrived (ex: 01 cycle updates until 1am)
cnow = now + timedelta(hours=0, minutes=5)
timestamp = int(time.time())
return ('%02d' % cnow.hour, timestamp)
def parseMetar(self, icao, metar, airport_msl = 0):
''' Parse metar'''
weather = {
'icao': icao,
'metar': metar,
'elevation': airport_msl,
'wind': [0, 0, 0], # Heading, speed, shear
'variable_wind': False,
'clouds': [0, 0, False] * 3, # Alt, coverage type
'temperature': [False, False], # Temperature, dewpoint
'pressure': False, # space c.pa2inhg(10.1325),
'visibility': 9998,
'precipitation': {},
}
metar = metar.split('TEMPO')[0]
clouds = []
for cloud in self.RE_CLOUD.findall(metar):
coverage, alt, type = cloud
alt = float(alt) * 30.48 + airport_msl
clouds.append([alt, coverage, type])
weather['clouds'] = clouds
m = self.RE_PRESSURE.search(metar)
if m:
unit, press = m.groups()
press = float(press)
if unit:
if unit == 'A':
press = press/100
elif unit == 'SLP':
if press > 500:
press = c.pa2inhg((press / 10 + 900) * 100)
else:
press = c.pa2inhg((press / 10 + 1000) * 100)
elif unit == 'Q':
press = c.pa2inhg(press * 100)
if 25 < press < 35:
weather['pressure'] = press
m = self.RE_TEMPERATURE2.search(metar)
if m:
tp, temp, dp, dew = m.groups()
temp = float(temp) * 0.1
dew = float(dew) * 0.1
if tp == '1': temp *= -1
if dp == '1': dew *= -1
weather['temperature'] = [temp, dew]
else:
m = self.RE_TEMPERATURE.search(metar)
if m:
temps, temp, dews, dew = m.groups()
temp = int(temp)
dew = int(dew)
if dews: dew *= -1
if temps: temp *= -1
weather['temperature'] = [temp, dew]
metar = metar.split('RMK')[0]
m = self.RE_VISIBILITY.search(metar)
if m:
if m.group(0) == 'CAVOK' or (m.group(0)[0] == 'P' and int(m.group(2)) > 7999):
visibility = 9999
else:
visibility = 0
vis0, vis1, vis2, vis3, div, unit = m.groups()
if vis1: visibility += int(vis1)
if vis2: visibility += int(vis2)
if vis3:
vis3 = int(vis3)
if div:
vis3 /= float(div[1:])
visibility += vis3
if unit == 'SM': visibility *= 1609.34
if unit == 'KM': visibility *= 1000
weather['visibility'] = visibility
m = self.RE_WIND.search(metar)
if m:
heading, speed, gust, unit = m.groups()
if heading == 'VRB':
heading = 0
weather['variable_wind'] = [0, 360]
else:
heading = int(heading)
speed = int(speed)
if not gust:
gust = 0
else:
gust = int(gust[1:]) - speed
if unit in ('MPS', 'MPH'):
speed = c.ms2knots(speed)
gust = c.ms2knots(gust)
if unit == 'MPH':
speed /= 60
gust /= 60
if unit == 'KMH':
speed = c.m2kn(speed / 1000.0)
gust = c.m2kn(gust / 1000.0)
weather['wind'] = [heading, speed, gust]
m = self.RE_VARIABLE_WIND.search(metar)
if m:
h1, h2 = m.groups()
weather['variable_wind'] = [int(h1), int(h2)]
precipitation = {}
for precp in self.RE_PRECIPITATION.findall(metar):
intensity, recent, mod, kind, neg = precp
if neg == 'E':
recent = 'RE'
if neg != 'NO':
precipitation[kind] = {'int': intensity ,'mod': mod, 'recent': recent}
weather['precipitation'] = precipitation
# Extended visibility
if weather['visibility'] > 9998:
weather['mt_visibility'] = weather['visibility']
ext_vis = c.rh2visibility(c.dewpoint2rh(weather['temperature'][0], weather['temperature'][1]))
if ext_vis > weather['visibility']:
weather['visibility'] = int(ext_vis)
return weather
def run(self, lat, lon, rate):
# Worker thread requires it's own db connection and cursor
if not self.th_db:
self.th_db = self.dbConnect(self.database)
# Check for new metar dowloaded data
if self.downloading == True:
if not self.download.q.empty():
self.downloading = False
metarfile = self.download.q.get()
if metarfile:
print 'Parsing METAR download.'
updated, parsed = self.updateMetar(self.th_db, os.sep.join([self.conf.cachepath, metarfile]))
self.reparse = True
print "METAR updated/parsed: %d/%d" % (updated, parsed)
else:
pass
elif self.conf.download:
# Download new data if required
cycle, timestamp = self.getCycle()
if (timestamp - self.last_timestamp) > self.conf.metar_updaterate * 60:
self.last_timestamp = timestamp
self.downloadCycle(cycle, timestamp)
# Update stations table if required
if self.ms_download and not self.ms_download.q.empty():
print 'Updating metar stations.'
nstations = self.updateStations(self.th_db, os.sep.join([self.conf.cachepath, self.ms_download.q.get()]))
self.ms_download = False
print '%d metar stations updated.' % nstations
# Update METAR.rwx
if self.conf.updateMetarRWX and self.next_metarRWX < time.time():
if self.updateMetarRWX(self.th_db):
self.next_metarRWX = time.time() + 300
print 'Updated METAR.rwx file.'
else:
# Retry in 10 sec
self.next_metarRWX = time.time() + 10
def downloadCycle(self, cycle, timestamp):
self.downloading = True
cachepath = os.sep.join([self.conf.cachepath, 'metar'])
if not os.path.exists(cachepath):
os.makedirs(cachepath)
prefix = self.conf.metar_source
if self.conf.metar_source == 'NOAA':
url = self.NOAA_METAR_URL
elif self.conf.metar_source == 'VATSIM':
url = self.VATSIM_METAR_URL
elif self.conf.metar_source == 'IVAO':
url = self.IVAO_METAR_URL
cachefile = os.sep.join(['metar', '%s_%d_%sZ.txt' % (prefix, timestamp, cycle)])
self.download = AsyncDownload(self.conf, url, cachefile)
def updateMetarRWX(self, db):
# Updates metar RWX file.
|
def die(self):
self.connection.commit()
self.connection.close()
| cursor = db.cursor()
try:
f = open(os.sep.join([self.conf.syspath, 'METAR.rwx']), 'w')
except:
print "ERROR updating METAR.rwx file: %s %s" % (sys.exc_info()[0], sys.exc_info()[1])
return False
res = cursor.execute('SELECT icao, metar FROM airports WHERE metar NOT NULL')
while True:
rows = res.fetchmany()
if rows:
for row in rows:
f.write('%s %s\n' % (row[0], row[1]))
else:
break
f.close()
return True | identifier_body |
metar.py | '''
X-plane NOAA GFS weather plugin.
Copyright (C) 2012-2015 Joan Perez i Cauhe
---
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or any later version.
'''
import re
import os
import sqlite3
import math
import sys
import shutil
from datetime import datetime, timedelta
import time
from util import util
from c import c
from asyncdownload import AsyncDownload
class Metar:
'''
Metar download and interpretation class
'''
# Metar parse regex
RE_CLOUD = re.compile(r'\b(FEW|BKN|SCT|OVC|VV)([0-9]+)([A-Z][A-Z][A-Z]?)?\b')
RE_WIND = re.compile(r'\b(VRB|[0-9]{3})([0-9]{2,3})(G[0-9]{2,3})?(MPH|KT?|MPS|KMH)\b')
RE_VARIABLE_WIND = re.compile(r'\b([0-9]{3})V([0-9]{3})\b')
RE_VISIBILITY = re.compile(r'\b(CAVOK|[PM]?([0-9]{4})|([0-9] )?([0-9]{1,2})(/[0-9])?(SM|KM))\b')
RE_PRESSURE = re.compile(r'\b(Q|QNH|SLP|A)[ ]?([0-9]{3,4})\b')
RE_TEMPERATURE = re.compile(r'\b(M|-)?([0-9]{1,2})/(M|-)?([0-9]{1,2})\b')
RE_TEMPERATURE2 = re.compile(r'\bT(0|1)([0-9]{3})(0|1)([0-9]{3})\b')
RE_PRECIPITATION = re.compile('(-|\+)?(RE)?(DZ|SG|IC|PL|SH)?(DZ|RA|SN|TS)(NO|E)?')
METAR_STATIONS_URL = 'https://www.aviationweather.gov/docs/metar/stations.txt'
NOAA_METAR_URL = 'https://aviationweather.gov/adds/dataserver_current/current/metars.cache.csv.gz'
VATSIM_METAR_URL = 'https://metar.vatsim.net/metar.php?id=all'
IVAO_METAR_URL = 'https://wx.ivao.aero/metar.php'
STATION_UPDATE_RATE = 30 # In days
def __init__(self, conf):
self.conf = conf
self.cachepath = os.sep.join([conf.cachepath, 'metar'])
if not os.path.exists(self.cachepath):
os.makedirs(self.cachepath)
self.database = os.sep.join([self.cachepath, 'metar.db'])
self.th_db = False
# Weather variables
self.weather = None
self.reparse = True
# Download flags
self.ms_download = False
self.downloading = False
self.next_metarRWX = time.time() + 30
# Main db connection, create db if doens't exist
createdb = True
if os.path.exists(self.database):
createdb = False
self.connection = self.dbConnect(self.database)
self.cursor = self.connection.cursor()
if createdb:
self.conf.ms_update = 0
self.dbCreate(self.connection)
# Metar stations update
if (time.time() - self.conf.ms_update) > self.STATION_UPDATE_RATE * 86400:
self.ms_download = AsyncDownload(self.conf, self.METAR_STATIONS_URL, os.sep.join(['metar', 'stations.txt']))
self.last_latlon, self.last_station, self.last_timestamp = [False]*3
def dbConnect(self, path):
return sqlite3.connect(path, check_same_thread=False)
def dbCreate(self, db):
cursor = db.cursor()
cursor.execute('''CREATE TABLE airports (icao text KEY UNIQUE, lat real, lon real, elevation int,
timestamp int KEY, metar text)''')
db.commit()
def updateStations(self, db, path):
''' Updates aiports db from metar stations file'''
self.conf.ms_update = time.time()
cursor = db.cursor()
f = open(path, 'r')
n = 0
for line in f.readlines():
if line[0] != '!' and len(line) > 80:
icao = line[20:24]
lat = float(line[39:41]) + round(float(line[42:44])/60, 4)
if line[44] == 'S':
lat *= -1
lon = float(line[47:50]) + round(float(line[51:53])/60, 4)
if line[53] == 'W':
lon *= -1
elevation = int(line[55:59])
if line[20] != ' ' and line[51] != '9':
cursor.execute('INSERT OR REPLACE INTO airports (icao, lat, lon, elevation, timestamp) VALUES (?,?,?,?,0)',
(icao.strip('"'), lat, lon, elevation))
n += 1
f.close()
db.commit()
return n
def updateMetar(self, db, path):
''' Updates metar table from Metar file'''
f = open(path, 'r')
nupdated = 0
nparsed = 0
timestamp = 0
cursor = db.cursor()
i = 0
inserts = []
INSBUF = cursor.arraysize
today_prefix = datetime.utcnow().strftime('%Y%m')
yesterday_prefix = (datetime.utcnow() + timedelta(days=-1)).strftime('%Y%m')
today = datetime.utcnow().strftime('%d')
for line in f.readlines():
if line[0].isalpha() and len(line) > 11 and line[11] == 'Z':
i += 1
icao, mtime, metar = line[0:4], line[5:11] , re.sub(r'[^\x00-\x7F]+',' ', line[5:-1])
metar = metar.split(',')[0]
if mtime[-1] == 'Z':
mtime = '0' + mtime[:-1]
if not mtime.isdigit():
mtime = '000000'
# Prepend year and month to the timestamp
if mtime[:2] == today:
timestamp = today_prefix + mtime
else:
timestamp = yesterday_prefix + mtime
inserts.append((timestamp, metar, icao, timestamp))
nparsed += 1
timestamp = 0
if (i % INSBUF) == 0:
cursor.executemany('UPDATE airports SET timestamp = ?, metar = ? WHERE icao = ? AND timestamp < ?', inserts)
inserts = []
nupdated += cursor.rowcount
if len(inserts):
cursor.executemany('UPDATE airports SET timestamp = ?, metar = ? WHERE icao = ? AND timestamp < ?', inserts)
nupdated += cursor.rowcount
db.commit()
f.close()
if not self.conf.keepOldFiles:
util.remove(path)
return nupdated, nparsed
def clearMetarReports(self, db):
'''Clears all metar reports from the db'''
cursor = db.cursor()
cursor.execute('UPDATE airports SET metar = NULL, timestamp = 0')
db.commit()
def getClosestStation(self, db, lat, lon, limit = 1):
''' Return closest airport with a metar report'''
cursor = db.cursor()
fudge = math.pow(math.cos(math.radians(lat)),2)
if self.conf.ignore_metar_stations:
q = '''SELECT * FROM airports
WHERE metar NOT NULL AND icao NOT in (%s)
ORDER BY ((? - lat) * (? - lat) + (? - lon) * (? - lon) * ?)
LIMIT ?''' % (','.join(['?'] * len(self.conf.ignore_metar_stations)))
res = cursor.execute(q , tuple(self.conf.ignore_metar_stations) + (lat, lat, lon, lon, fudge, limit))
else:
res = cursor.execute('''SELECT * FROM airports
WHERE metar NOT NULL
ORDER BY ((? - lat) * (? - lat) + (? - lon) * (? - lon) * ?)
LIMIT ?''', (lat, lat, lon, lon, fudge, limit))
ret = res.fetchall()
if limit == 1 and len(ret) > 0:
return ret[0]
return ret
def getMetar(self, db, icao):
''' Get metar from icao name '''
cursor = db.cursor()
res = cursor.execute('''SELECT * FROM airports
WHERE icao = ? AND metar NOT NULL LIMIT 1''', (icao.upper(), ))
ret = res.fetchall()
if len(ret) > 0:
return ret[0]
return ret
def getCycle(self):
now = datetime.utcnow()
# Cycle is updated until the houre has arrived (ex: 01 cycle updates until 1am)
cnow = now + timedelta(hours=0, minutes=5)
timestamp = int(time.time())
return ('%02d' % cnow.hour, timestamp)
def parseMetar(self, icao, metar, airport_msl = 0):
''' Parse metar'''
weather = {
'icao': icao,
'metar': metar,
'elevation': airport_msl,
'wind': [0, 0, 0], # Heading, speed, shear
'variable_wind': False,
'clouds': [0, 0, False] * 3, # Alt, coverage type
'temperature': [False, False], # Temperature, dewpoint
'pressure': False, # space c.pa2inhg(10.1325),
'visibility': 9998,
'precipitation': {},
}
metar = metar.split('TEMPO')[0]
clouds = []
for cloud in self.RE_CLOUD.findall(metar):
coverage, alt, type = cloud
alt = float(alt) * 30.48 + airport_msl
clouds.append([alt, coverage, type])
weather['clouds'] = clouds
m = self.RE_PRESSURE.search(metar)
if m:
|
m = self.RE_TEMPERATURE2.search(metar)
if m:
tp, temp, dp, dew = m.groups()
temp = float(temp) * 0.1
dew = float(dew) * 0.1
if tp == '1': temp *= -1
if dp == '1': dew *= -1
weather['temperature'] = [temp, dew]
else:
m = self.RE_TEMPERATURE.search(metar)
if m:
temps, temp, dews, dew = m.groups()
temp = int(temp)
dew = int(dew)
if dews: dew *= -1
if temps: temp *= -1
weather['temperature'] = [temp, dew]
metar = metar.split('RMK')[0]
m = self.RE_VISIBILITY.search(metar)
if m:
if m.group(0) == 'CAVOK' or (m.group(0)[0] == 'P' and int(m.group(2)) > 7999):
visibility = 9999
else:
visibility = 0
vis0, vis1, vis2, vis3, div, unit = m.groups()
if vis1: visibility += int(vis1)
if vis2: visibility += int(vis2)
if vis3:
vis3 = int(vis3)
if div:
vis3 /= float(div[1:])
visibility += vis3
if unit == 'SM': visibility *= 1609.34
if unit == 'KM': visibility *= 1000
weather['visibility'] = visibility
m = self.RE_WIND.search(metar)
if m:
heading, speed, gust, unit = m.groups()
if heading == 'VRB':
heading = 0
weather['variable_wind'] = [0, 360]
else:
heading = int(heading)
speed = int(speed)
if not gust:
gust = 0
else:
gust = int(gust[1:]) - speed
if unit in ('MPS', 'MPH'):
speed = c.ms2knots(speed)
gust = c.ms2knots(gust)
if unit == 'MPH':
speed /= 60
gust /= 60
if unit == 'KMH':
speed = c.m2kn(speed / 1000.0)
gust = c.m2kn(gust / 1000.0)
weather['wind'] = [heading, speed, gust]
m = self.RE_VARIABLE_WIND.search(metar)
if m:
h1, h2 = m.groups()
weather['variable_wind'] = [int(h1), int(h2)]
precipitation = {}
for precp in self.RE_PRECIPITATION.findall(metar):
intensity, recent, mod, kind, neg = precp
if neg == 'E':
recent = 'RE'
if neg != 'NO':
precipitation[kind] = {'int': intensity ,'mod': mod, 'recent': recent}
weather['precipitation'] = precipitation
# Extended visibility
if weather['visibility'] > 9998:
weather['mt_visibility'] = weather['visibility']
ext_vis = c.rh2visibility(c.dewpoint2rh(weather['temperature'][0], weather['temperature'][1]))
if ext_vis > weather['visibility']:
weather['visibility'] = int(ext_vis)
return weather
def run(self, lat, lon, rate):
# Worker thread requires it's own db connection and cursor
if not self.th_db:
self.th_db = self.dbConnect(self.database)
# Check for new metar dowloaded data
if self.downloading == True:
if not self.download.q.empty():
self.downloading = False
metarfile = self.download.q.get()
if metarfile:
print 'Parsing METAR download.'
updated, parsed = self.updateMetar(self.th_db, os.sep.join([self.conf.cachepath, metarfile]))
self.reparse = True
print "METAR updated/parsed: %d/%d" % (updated, parsed)
else:
pass
elif self.conf.download:
# Download new data if required
cycle, timestamp = self.getCycle()
if (timestamp - self.last_timestamp) > self.conf.metar_updaterate * 60:
self.last_timestamp = timestamp
self.downloadCycle(cycle, timestamp)
# Update stations table if required
if self.ms_download and not self.ms_download.q.empty():
print 'Updating metar stations.'
nstations = self.updateStations(self.th_db, os.sep.join([self.conf.cachepath, self.ms_download.q.get()]))
self.ms_download = False
print '%d metar stations updated.' % nstations
# Update METAR.rwx
if self.conf.updateMetarRWX and self.next_metarRWX < time.time():
if self.updateMetarRWX(self.th_db):
self.next_metarRWX = time.time() + 300
print 'Updated METAR.rwx file.'
else:
# Retry in 10 sec
self.next_metarRWX = time.time() + 10
def downloadCycle(self, cycle, timestamp):
self.downloading = True
cachepath = os.sep.join([self.conf.cachepath, 'metar'])
if not os.path.exists(cachepath):
os.makedirs(cachepath)
prefix = self.conf.metar_source
if self.conf.metar_source == 'NOAA':
url = self.NOAA_METAR_URL
elif self.conf.metar_source == 'VATSIM':
url = self.VATSIM_METAR_URL
elif self.conf.metar_source == 'IVAO':
url = self.IVAO_METAR_URL
cachefile = os.sep.join(['metar', '%s_%d_%sZ.txt' % (prefix, timestamp, cycle)])
self.download = AsyncDownload(self.conf, url, cachefile)
def updateMetarRWX(self, db):
# Updates metar RWX file.
cursor = db.cursor()
try:
f = open(os.sep.join([self.conf.syspath, 'METAR.rwx']), 'w')
except:
print "ERROR updating METAR.rwx file: %s %s" % (sys.exc_info()[0], sys.exc_info()[1])
return False
res = cursor.execute('SELECT icao, metar FROM airports WHERE metar NOT NULL')
while True:
rows = res.fetchmany()
if rows:
for row in rows:
f.write('%s %s\n' % (row[0], row[1]))
else:
break
f.close()
return True
def die(self):
self.connection.commit()
self.connection.close()
| unit, press = m.groups()
press = float(press)
if unit:
if unit == 'A':
press = press/100
elif unit == 'SLP':
if press > 500:
press = c.pa2inhg((press / 10 + 900) * 100)
else:
press = c.pa2inhg((press / 10 + 1000) * 100)
elif unit == 'Q':
press = c.pa2inhg(press * 100)
if 25 < press < 35:
weather['pressure'] = press | conditional_block |
metar.py | '''
X-plane NOAA GFS weather plugin.
Copyright (C) 2012-2015 Joan Perez i Cauhe
---
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or any later version.
'''
import re
import os
import sqlite3
import math
import sys
import shutil
from datetime import datetime, timedelta
import time
from util import util
from c import c
from asyncdownload import AsyncDownload
class Metar:
'''
Metar download and interpretation class
'''
# Metar parse regex
RE_CLOUD = re.compile(r'\b(FEW|BKN|SCT|OVC|VV)([0-9]+)([A-Z][A-Z][A-Z]?)?\b')
RE_WIND = re.compile(r'\b(VRB|[0-9]{3})([0-9]{2,3})(G[0-9]{2,3})?(MPH|KT?|MPS|KMH)\b')
RE_VARIABLE_WIND = re.compile(r'\b([0-9]{3})V([0-9]{3})\b')
RE_VISIBILITY = re.compile(r'\b(CAVOK|[PM]?([0-9]{4})|([0-9] )?([0-9]{1,2})(/[0-9])?(SM|KM))\b')
RE_PRESSURE = re.compile(r'\b(Q|QNH|SLP|A)[ ]?([0-9]{3,4})\b')
RE_TEMPERATURE = re.compile(r'\b(M|-)?([0-9]{1,2})/(M|-)?([0-9]{1,2})\b')
RE_TEMPERATURE2 = re.compile(r'\bT(0|1)([0-9]{3})(0|1)([0-9]{3})\b')
RE_PRECIPITATION = re.compile('(-|\+)?(RE)?(DZ|SG|IC|PL|SH)?(DZ|RA|SN|TS)(NO|E)?')
METAR_STATIONS_URL = 'https://www.aviationweather.gov/docs/metar/stations.txt'
NOAA_METAR_URL = 'https://aviationweather.gov/adds/dataserver_current/current/metars.cache.csv.gz'
VATSIM_METAR_URL = 'https://metar.vatsim.net/metar.php?id=all'
IVAO_METAR_URL = 'https://wx.ivao.aero/metar.php'
STATION_UPDATE_RATE = 30 # In days
def __init__(self, conf):
self.conf = conf
self.cachepath = os.sep.join([conf.cachepath, 'metar'])
if not os.path.exists(self.cachepath):
os.makedirs(self.cachepath)
self.database = os.sep.join([self.cachepath, 'metar.db'])
self.th_db = False
# Weather variables
self.weather = None
self.reparse = True
# Download flags
self.ms_download = False
self.downloading = False
self.next_metarRWX = time.time() + 30
# Main db connection, create db if doens't exist
createdb = True
if os.path.exists(self.database):
createdb = False
self.connection = self.dbConnect(self.database)
self.cursor = self.connection.cursor()
if createdb:
self.conf.ms_update = 0
self.dbCreate(self.connection)
# Metar stations update
if (time.time() - self.conf.ms_update) > self.STATION_UPDATE_RATE * 86400:
self.ms_download = AsyncDownload(self.conf, self.METAR_STATIONS_URL, os.sep.join(['metar', 'stations.txt'])) |
def dbCreate(self, db):
cursor = db.cursor()
cursor.execute('''CREATE TABLE airports (icao text KEY UNIQUE, lat real, lon real, elevation int,
timestamp int KEY, metar text)''')
db.commit()
def updateStations(self, db, path):
''' Updates aiports db from metar stations file'''
self.conf.ms_update = time.time()
cursor = db.cursor()
f = open(path, 'r')
n = 0
for line in f.readlines():
if line[0] != '!' and len(line) > 80:
icao = line[20:24]
lat = float(line[39:41]) + round(float(line[42:44])/60, 4)
if line[44] == 'S':
lat *= -1
lon = float(line[47:50]) + round(float(line[51:53])/60, 4)
if line[53] == 'W':
lon *= -1
elevation = int(line[55:59])
if line[20] != ' ' and line[51] != '9':
cursor.execute('INSERT OR REPLACE INTO airports (icao, lat, lon, elevation, timestamp) VALUES (?,?,?,?,0)',
(icao.strip('"'), lat, lon, elevation))
n += 1
f.close()
db.commit()
return n
def updateMetar(self, db, path):
''' Updates metar table from Metar file'''
f = open(path, 'r')
nupdated = 0
nparsed = 0
timestamp = 0
cursor = db.cursor()
i = 0
inserts = []
INSBUF = cursor.arraysize
today_prefix = datetime.utcnow().strftime('%Y%m')
yesterday_prefix = (datetime.utcnow() + timedelta(days=-1)).strftime('%Y%m')
today = datetime.utcnow().strftime('%d')
for line in f.readlines():
if line[0].isalpha() and len(line) > 11 and line[11] == 'Z':
i += 1
icao, mtime, metar = line[0:4], line[5:11] , re.sub(r'[^\x00-\x7F]+',' ', line[5:-1])
metar = metar.split(',')[0]
if mtime[-1] == 'Z':
mtime = '0' + mtime[:-1]
if not mtime.isdigit():
mtime = '000000'
# Prepend year and month to the timestamp
if mtime[:2] == today:
timestamp = today_prefix + mtime
else:
timestamp = yesterday_prefix + mtime
inserts.append((timestamp, metar, icao, timestamp))
nparsed += 1
timestamp = 0
if (i % INSBUF) == 0:
cursor.executemany('UPDATE airports SET timestamp = ?, metar = ? WHERE icao = ? AND timestamp < ?', inserts)
inserts = []
nupdated += cursor.rowcount
if len(inserts):
cursor.executemany('UPDATE airports SET timestamp = ?, metar = ? WHERE icao = ? AND timestamp < ?', inserts)
nupdated += cursor.rowcount
db.commit()
f.close()
if not self.conf.keepOldFiles:
util.remove(path)
return nupdated, nparsed
def clearMetarReports(self, db):
'''Clears all metar reports from the db'''
cursor = db.cursor()
cursor.execute('UPDATE airports SET metar = NULL, timestamp = 0')
db.commit()
def getClosestStation(self, db, lat, lon, limit = 1):
''' Return closest airport with a metar report'''
cursor = db.cursor()
fudge = math.pow(math.cos(math.radians(lat)),2)
if self.conf.ignore_metar_stations:
q = '''SELECT * FROM airports
WHERE metar NOT NULL AND icao NOT in (%s)
ORDER BY ((? - lat) * (? - lat) + (? - lon) * (? - lon) * ?)
LIMIT ?''' % (','.join(['?'] * len(self.conf.ignore_metar_stations)))
res = cursor.execute(q , tuple(self.conf.ignore_metar_stations) + (lat, lat, lon, lon, fudge, limit))
else:
res = cursor.execute('''SELECT * FROM airports
WHERE metar NOT NULL
ORDER BY ((? - lat) * (? - lat) + (? - lon) * (? - lon) * ?)
LIMIT ?''', (lat, lat, lon, lon, fudge, limit))
ret = res.fetchall()
if limit == 1 and len(ret) > 0:
return ret[0]
return ret
def getMetar(self, db, icao):
''' Get metar from icao name '''
cursor = db.cursor()
res = cursor.execute('''SELECT * FROM airports
WHERE icao = ? AND metar NOT NULL LIMIT 1''', (icao.upper(), ))
ret = res.fetchall()
if len(ret) > 0:
return ret[0]
return ret
def getCycle(self):
now = datetime.utcnow()
# Cycle is updated until the houre has arrived (ex: 01 cycle updates until 1am)
cnow = now + timedelta(hours=0, minutes=5)
timestamp = int(time.time())
return ('%02d' % cnow.hour, timestamp)
def parseMetar(self, icao, metar, airport_msl = 0):
''' Parse metar'''
weather = {
'icao': icao,
'metar': metar,
'elevation': airport_msl,
'wind': [0, 0, 0], # Heading, speed, shear
'variable_wind': False,
'clouds': [0, 0, False] * 3, # Alt, coverage type
'temperature': [False, False], # Temperature, dewpoint
'pressure': False, # space c.pa2inhg(10.1325),
'visibility': 9998,
'precipitation': {},
}
metar = metar.split('TEMPO')[0]
clouds = []
for cloud in self.RE_CLOUD.findall(metar):
coverage, alt, type = cloud
alt = float(alt) * 30.48 + airport_msl
clouds.append([alt, coverage, type])
weather['clouds'] = clouds
m = self.RE_PRESSURE.search(metar)
if m:
unit, press = m.groups()
press = float(press)
if unit:
if unit == 'A':
press = press/100
elif unit == 'SLP':
if press > 500:
press = c.pa2inhg((press / 10 + 900) * 100)
else:
press = c.pa2inhg((press / 10 + 1000) * 100)
elif unit == 'Q':
press = c.pa2inhg(press * 100)
if 25 < press < 35:
weather['pressure'] = press
m = self.RE_TEMPERATURE2.search(metar)
if m:
tp, temp, dp, dew = m.groups()
temp = float(temp) * 0.1
dew = float(dew) * 0.1
if tp == '1': temp *= -1
if dp == '1': dew *= -1
weather['temperature'] = [temp, dew]
else:
m = self.RE_TEMPERATURE.search(metar)
if m:
temps, temp, dews, dew = m.groups()
temp = int(temp)
dew = int(dew)
if dews: dew *= -1
if temps: temp *= -1
weather['temperature'] = [temp, dew]
metar = metar.split('RMK')[0]
m = self.RE_VISIBILITY.search(metar)
if m:
if m.group(0) == 'CAVOK' or (m.group(0)[0] == 'P' and int(m.group(2)) > 7999):
visibility = 9999
else:
visibility = 0
vis0, vis1, vis2, vis3, div, unit = m.groups()
if vis1: visibility += int(vis1)
if vis2: visibility += int(vis2)
if vis3:
vis3 = int(vis3)
if div:
vis3 /= float(div[1:])
visibility += vis3
if unit == 'SM': visibility *= 1609.34
if unit == 'KM': visibility *= 1000
weather['visibility'] = visibility
m = self.RE_WIND.search(metar)
if m:
heading, speed, gust, unit = m.groups()
if heading == 'VRB':
heading = 0
weather['variable_wind'] = [0, 360]
else:
heading = int(heading)
speed = int(speed)
if not gust:
gust = 0
else:
gust = int(gust[1:]) - speed
if unit in ('MPS', 'MPH'):
speed = c.ms2knots(speed)
gust = c.ms2knots(gust)
if unit == 'MPH':
speed /= 60
gust /= 60
if unit == 'KMH':
speed = c.m2kn(speed / 1000.0)
gust = c.m2kn(gust / 1000.0)
weather['wind'] = [heading, speed, gust]
m = self.RE_VARIABLE_WIND.search(metar)
if m:
h1, h2 = m.groups()
weather['variable_wind'] = [int(h1), int(h2)]
precipitation = {}
for precp in self.RE_PRECIPITATION.findall(metar):
intensity, recent, mod, kind, neg = precp
if neg == 'E':
recent = 'RE'
if neg != 'NO':
precipitation[kind] = {'int': intensity ,'mod': mod, 'recent': recent}
weather['precipitation'] = precipitation
# Extended visibility
if weather['visibility'] > 9998:
weather['mt_visibility'] = weather['visibility']
ext_vis = c.rh2visibility(c.dewpoint2rh(weather['temperature'][0], weather['temperature'][1]))
if ext_vis > weather['visibility']:
weather['visibility'] = int(ext_vis)
return weather
def run(self, lat, lon, rate):
# Worker thread requires it's own db connection and cursor
if not self.th_db:
self.th_db = self.dbConnect(self.database)
# Check for new metar dowloaded data
if self.downloading == True:
if not self.download.q.empty():
self.downloading = False
metarfile = self.download.q.get()
if metarfile:
print 'Parsing METAR download.'
updated, parsed = self.updateMetar(self.th_db, os.sep.join([self.conf.cachepath, metarfile]))
self.reparse = True
print "METAR updated/parsed: %d/%d" % (updated, parsed)
else:
pass
elif self.conf.download:
# Download new data if required
cycle, timestamp = self.getCycle()
if (timestamp - self.last_timestamp) > self.conf.metar_updaterate * 60:
self.last_timestamp = timestamp
self.downloadCycle(cycle, timestamp)
# Update stations table if required
if self.ms_download and not self.ms_download.q.empty():
print 'Updating metar stations.'
nstations = self.updateStations(self.th_db, os.sep.join([self.conf.cachepath, self.ms_download.q.get()]))
self.ms_download = False
print '%d metar stations updated.' % nstations
# Update METAR.rwx
if self.conf.updateMetarRWX and self.next_metarRWX < time.time():
if self.updateMetarRWX(self.th_db):
self.next_metarRWX = time.time() + 300
print 'Updated METAR.rwx file.'
else:
# Retry in 10 sec
self.next_metarRWX = time.time() + 10
def downloadCycle(self, cycle, timestamp):
self.downloading = True
cachepath = os.sep.join([self.conf.cachepath, 'metar'])
if not os.path.exists(cachepath):
os.makedirs(cachepath)
prefix = self.conf.metar_source
if self.conf.metar_source == 'NOAA':
url = self.NOAA_METAR_URL
elif self.conf.metar_source == 'VATSIM':
url = self.VATSIM_METAR_URL
elif self.conf.metar_source == 'IVAO':
url = self.IVAO_METAR_URL
cachefile = os.sep.join(['metar', '%s_%d_%sZ.txt' % (prefix, timestamp, cycle)])
self.download = AsyncDownload(self.conf, url, cachefile)
def updateMetarRWX(self, db):
# Updates metar RWX file.
cursor = db.cursor()
try:
f = open(os.sep.join([self.conf.syspath, 'METAR.rwx']), 'w')
except:
print "ERROR updating METAR.rwx file: %s %s" % (sys.exc_info()[0], sys.exc_info()[1])
return False
res = cursor.execute('SELECT icao, metar FROM airports WHERE metar NOT NULL')
while True:
rows = res.fetchmany()
if rows:
for row in rows:
f.write('%s %s\n' % (row[0], row[1]))
else:
break
f.close()
return True
def die(self):
self.connection.commit()
self.connection.close() |
self.last_latlon, self.last_station, self.last_timestamp = [False]*3
def dbConnect(self, path):
return sqlite3.connect(path, check_same_thread=False) | random_line_split |
lib.rs | //! A Proxy Connector crate for Hyper based applications
//!
//! # Example
//! ```rust,no_run
//! extern crate hyper;
//! extern crate hyper_proxy;
//! extern crate futures;
//! extern crate tokio_core;
//!
//! use hyper::{Chunk, Client, Request, Method, Uri};
//! use hyper::client::HttpConnector;
//! use hyper::header::Basic;
//! use futures::{Future, Stream};
//! use hyper_proxy::{Proxy, ProxyConnector, Intercept};
//! use tokio_core::reactor::Core;
//!
//! fn main() {
//! let mut core = Core::new().unwrap();
//! let handle = core.handle();
//!
//! let proxy = {
//! let proxy_uri = "http://my-proxy:8080".parse().unwrap();
//! let mut proxy = Proxy::new(Intercept::All, proxy_uri);
//! proxy.set_authorization(Basic {
//! username: "John Doe".into(),
//! password: Some("Agent1234".into()),
//! });
//! let connector = HttpConnector::new(4, &handle);
//! let proxy_connector = ProxyConnector::from_proxy(connector, proxy).unwrap();
//! proxy_connector
//! };
//!
//! // Connecting to http will trigger regular GETs and POSTs.
//! // We need to manually append the relevant headers to the request
//! let uri: Uri = "http://my-remote-website.com".parse().unwrap();
//! let mut req = Request::new(Method::Get, uri.clone());
//! if let Some(headers) = proxy.http_headers(&uri) {
//! req.headers_mut().extend(headers.iter());
//! req.set_proxy(true);
//! }
//! let client = Client::configure().connector(proxy).build(&handle);
//! let fut_http = client.request(req)
//! .and_then(|res| res.body().concat2())
//! .map(move |body: Chunk| ::std::str::from_utf8(&body).unwrap().to_string());
//!
//! // Connecting to an https uri is straightforward (uses 'CONNECT' method underneath)
//! let uri = "https://my-remote-websitei-secured.com".parse().unwrap();
//! let fut_https = client
//! .get(uri)
//! .and_then(|res| res.body().concat2())
//! .map(move |body: Chunk| ::std::str::from_utf8(&body).unwrap().to_string());
//!
//! let futs = fut_http.join(fut_https);
//!
//! let (_http_res, _https_res) = core.run(futs).unwrap();
//! }
//! ```
#![deny(missing_docs)]
extern crate bytes;
#[macro_use]
extern crate futures;
extern crate hyper;
#[cfg(test)]
extern crate hyper_tls;
#[cfg(feature = "tls")]
extern crate native_tls;
extern crate tokio_core;
extern crate tokio_io;
#[cfg(feature = "tls")]
extern crate tokio_tls;
mod tunnel;
mod stream;
use std::any::Any;
use std::fmt;
use std::io;
use std::sync::Arc;
use futures::Future;
use hyper::Uri;
use hyper::client::Service;
use hyper::header::{Authorization, Header, Headers, ProxyAuthorization, Scheme};
#[cfg(feature = "tls")]
use native_tls::TlsConnector;
use tokio_io::{AsyncRead, AsyncWrite};
#[cfg(feature = "tls")]
use tokio_tls::TlsConnectorExt;
use stream::ProxyStream;
/// The Intercept enum to filter connections
#[derive(Debug, Clone)]
pub enum Intercept {
/// All incoming connection will go through proxy
All,
/// Only http connections will go through proxy
Http,
/// Only https connections will go through proxy
Https,
/// No connection will go through this proxy
None,
/// A custom intercept
Custom(Custom),
}
/// A Custom struct to proxy custom uris
#[derive(Clone)]
pub struct Custom(Arc<Fn(&Uri) -> bool + Send + Sync>);
impl fmt::Debug for Custom {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "_")
}
}
impl<F: Fn(&Uri) -> bool + Send + Sync + 'static> From<F> for Custom {
fn from(f: F) -> Custom {
Custom(Arc::new(f))
}
}
impl Intercept {
/// A function to check if given `Uri` is proxied
pub fn matches(&self, uri: &Uri) -> bool {
match (self, uri.scheme()) {
(&Intercept::All, _)
| (&Intercept::Http, Some("http"))
| (&Intercept::Https, Some("https")) => true,
(&Intercept::Custom(Custom(ref f)), _) => f(uri),
_ => false,
}
}
}
impl<F: Fn(&Uri) -> bool + Send + Sync + 'static> From<F> for Intercept {
fn from(f: F) -> Intercept {
Intercept::Custom(f.into())
}
}
/// A Proxy strcut
#[derive(Clone, Debug)]
pub struct Proxy {
intercept: Intercept,
headers: Headers,
uri: Uri,
}
impl Proxy {
/// Create a new `Proxy`
pub fn new<I: Into<Intercept>>(intercept: I, uri: Uri) -> Proxy {
Proxy {
intercept: intercept.into(),
uri: uri,
headers: Headers::new(),
}
}
/// Set `Proxy` authorization
pub fn set_authorization<S: Scheme + Any>(&mut self, scheme: S) {
match self.intercept {
Intercept::Http => self.headers.set(Authorization(scheme)),
Intercept::Https => self.headers.set(ProxyAuthorization(scheme)),
_ => {
self.headers.set(ProxyAuthorization(scheme.clone()));
self.headers.set(Authorization(scheme));
}
}
}
/// Set a custom header
pub fn set_header<H: Header>(&mut self, header: H) {
self.headers.set(header);
}
/// Get current intercept
pub fn intercept(&self) -> &Intercept {
&self.intercept
}
/// Get current `Headers` which must be sent to proxy
pub fn headers(&self) -> &Headers {
&self.headers
}
/// Get proxy uri
pub fn uri(&self) -> &Uri {
&self.uri
}
}
/// A wrapper around `Proxy`s with a connector.
#[derive(Clone)]
pub struct ProxyConnector<C> {
proxies: Vec<Proxy>,
connector: C,
#[cfg(feature = "tls")]
tls: Option<TlsConnector>,
#[cfg(not(feature = "tls"))]
tls: Option<()>,
}
impl<C: fmt::Debug> fmt::Debug for ProxyConnector<C> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
f,
"ProxyConnector {}{{ proxies: {:?}, connector: {:?} }}",
if self.tls.is_some() {
""
} else {
"(unsecured)"
},
self.proxies,
self.connector
)
}
}
impl<C> ProxyConnector<C> {
/// Create a new secured Proxies
#[cfg(feature = "tls")]
pub fn new(connector: C) -> Result<Self, io::Error> {
let tls = TlsConnector::builder()
.and_then(|b| b.build())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
Ok(ProxyConnector {
proxies: Vec::new(),
connector: connector,
tls: Some(tls),
})
}
/// Create a new unsecured Proxy
pub fn unsecured(connector: C) -> Self {
ProxyConnector {
proxies: Vec::new(),
connector: connector,
tls: None,
}
}
/// Create a proxy connector and attach a particular proxy
#[cfg(feature = "tls")]
pub fn from_proxy(connector: C, proxy: Proxy) -> Result<Self, io::Error> {
let mut c = ProxyConnector::new(connector)?;
c.proxies.push(proxy);
Ok(c)
}
/// Create a proxy connector and attach a particular proxy
pub fn from_proxy_unsecured(connector: C, proxy: Proxy) -> Self {
let mut c = ProxyConnector::unsecured(connector);
c.proxies.push(proxy);
c
}
/// Change proxy connector
pub fn with_connector<CC>(self, connector: CC) -> ProxyConnector<CC> {
ProxyConnector {
connector: connector,
proxies: self.proxies,
tls: self.tls,
}
}
/// Set or unset tls when tunneling
#[cfg(feature = "tls")]
pub fn set_tls(&mut self, tls: Option<TlsConnector>) {
self.tls = tls;
}
/// Get the current proxies
pub fn proxies(&self) -> &[Proxy] {
&self.proxies
}
/// Add a new additional proxy
pub fn add_proxy(&mut self, proxy: Proxy) {
self.proxies.push(proxy);
}
/// Extend the list of proxies
pub fn extend_proxies<I: IntoIterator<Item = Proxy>>(&mut self, proxies: I) {
self.proxies.extend(proxies)
}
/// Get http headers for a matching uri
///
/// These headers must be appended to the hyper Request for the proxy to work properly.
/// This is needed only for http requests.
pub fn http_headers(&self, uri: &Uri) -> Option<&Headers> {
if uri.scheme() != Some("http") {
return None;
}
self.match_proxy(uri).map(|p| &p.headers)
}
fn | (&self, uri: &Uri) -> Option<&Proxy> {
self.proxies.iter().find(|p| p.intercept.matches(uri))
}
}
impl<C> Service for ProxyConnector<C>
where
C: Service<Request = Uri, Error = io::Error> + 'static,
C::Future: 'static,
<C::Future as Future>::Item: AsyncRead + AsyncWrite + 'static,
{
type Request = Uri;
type Response = ProxyStream<C::Response>;
type Error = io::Error;
type Future = Box<Future<Item = ProxyStream<C::Response>, Error = Self::Error>>;
fn call(&self, uri: Uri) -> Self::Future {
if let Some(ref p) = self.match_proxy(&uri) {
if uri.scheme() == Some("https") {
let host = uri.host().unwrap().to_owned();
let port = uri.port().unwrap_or(443);
let tunnel = tunnel::Tunnel::new(&host, port, &p.headers);
let proxy_stream = self.connector
.call(p.uri.clone())
.and_then(move |io| tunnel.with_stream(io));
match self.tls.as_ref() {
#[cfg(feature = "tls")]
Some(tls) => {
let tls = tls.clone();
Box::new(
proxy_stream
.and_then(move |io| tls.connect_async(&host, io).map_err(io_err))
.map(|s| ProxyStream::Secured(s)),
)
},
#[cfg(not(feature = "tls"))]
Some(_) => panic!("hyper-proxy was not built with TLS support"),
None => Box::new(proxy_stream.map(|s| ProxyStream::Regular(s))),
}
} else {
// without TLS, there is absolutely zero benefit from tunneling, as the proxy can
// read the plaintext traffic. Thus, tunneling is just restrictive to the proxies
// resources.
Box::new(
self.connector
.call(p.uri.clone())
.map(|s| ProxyStream::Regular(s)),
)
}
} else {
Box::new(self.connector.call(uri).map(|s| ProxyStream::Regular(s)))
}
}
}
#[inline]
fn io_err<E: Into<Box<::std::error::Error + Send + Sync>>>(e: E) -> io::Error {
io::Error::new(io::ErrorKind::Other, e)
}
| match_proxy | identifier_name |
lib.rs | //! A Proxy Connector crate for Hyper based applications
//!
//! # Example
//! ```rust,no_run
//! extern crate hyper;
//! extern crate hyper_proxy;
//! extern crate futures;
//! extern crate tokio_core;
//!
//! use hyper::{Chunk, Client, Request, Method, Uri};
//! use hyper::client::HttpConnector;
//! use hyper::header::Basic;
//! use futures::{Future, Stream};
//! use hyper_proxy::{Proxy, ProxyConnector, Intercept};
//! use tokio_core::reactor::Core;
//!
//! fn main() {
//! let mut core = Core::new().unwrap();
//! let handle = core.handle();
//!
//! let proxy = {
//! let proxy_uri = "http://my-proxy:8080".parse().unwrap();
//! let mut proxy = Proxy::new(Intercept::All, proxy_uri);
//! proxy.set_authorization(Basic {
//! username: "John Doe".into(),
//! password: Some("Agent1234".into()),
//! });
//! let connector = HttpConnector::new(4, &handle);
//! let proxy_connector = ProxyConnector::from_proxy(connector, proxy).unwrap();
//! proxy_connector
//! };
//!
//! // Connecting to http will trigger regular GETs and POSTs.
//! // We need to manually append the relevant headers to the request
//! let uri: Uri = "http://my-remote-website.com".parse().unwrap();
//! let mut req = Request::new(Method::Get, uri.clone());
//! if let Some(headers) = proxy.http_headers(&uri) {
//! req.headers_mut().extend(headers.iter());
//! req.set_proxy(true);
//! }
//! let client = Client::configure().connector(proxy).build(&handle);
//! let fut_http = client.request(req)
//! .and_then(|res| res.body().concat2())
//! .map(move |body: Chunk| ::std::str::from_utf8(&body).unwrap().to_string());
//!
//! // Connecting to an https uri is straightforward (uses 'CONNECT' method underneath)
//! let uri = "https://my-remote-websitei-secured.com".parse().unwrap();
//! let fut_https = client
//! .get(uri)
//! .and_then(|res| res.body().concat2())
//! .map(move |body: Chunk| ::std::str::from_utf8(&body).unwrap().to_string());
//!
//! let futs = fut_http.join(fut_https);
//!
//! let (_http_res, _https_res) = core.run(futs).unwrap();
//! }
//! ```
#![deny(missing_docs)]
extern crate bytes;
#[macro_use]
extern crate futures;
extern crate hyper;
#[cfg(test)]
extern crate hyper_tls;
#[cfg(feature = "tls")]
extern crate native_tls;
extern crate tokio_core;
extern crate tokio_io;
#[cfg(feature = "tls")]
extern crate tokio_tls;
mod tunnel;
mod stream;
use std::any::Any;
use std::fmt;
use std::io;
use std::sync::Arc;
use futures::Future;
use hyper::Uri;
use hyper::client::Service;
use hyper::header::{Authorization, Header, Headers, ProxyAuthorization, Scheme};
#[cfg(feature = "tls")]
use native_tls::TlsConnector;
use tokio_io::{AsyncRead, AsyncWrite};
#[cfg(feature = "tls")]
use tokio_tls::TlsConnectorExt;
use stream::ProxyStream;
/// The Intercept enum to filter connections
#[derive(Debug, Clone)]
pub enum Intercept {
/// All incoming connection will go through proxy
All,
/// Only http connections will go through proxy
Http,
/// Only https connections will go through proxy
Https,
/// No connection will go through this proxy
None,
/// A custom intercept
Custom(Custom),
}
/// A Custom struct to proxy custom uris
#[derive(Clone)]
pub struct Custom(Arc<Fn(&Uri) -> bool + Send + Sync>);
impl fmt::Debug for Custom {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "_")
}
}
impl<F: Fn(&Uri) -> bool + Send + Sync + 'static> From<F> for Custom {
fn from(f: F) -> Custom {
Custom(Arc::new(f))
}
}
impl Intercept {
/// A function to check if given `Uri` is proxied
pub fn matches(&self, uri: &Uri) -> bool |
}
impl<F: Fn(&Uri) -> bool + Send + Sync + 'static> From<F> for Intercept {
fn from(f: F) -> Intercept {
Intercept::Custom(f.into())
}
}
/// A Proxy strcut
#[derive(Clone, Debug)]
pub struct Proxy {
intercept: Intercept,
headers: Headers,
uri: Uri,
}
impl Proxy {
/// Create a new `Proxy`
pub fn new<I: Into<Intercept>>(intercept: I, uri: Uri) -> Proxy {
Proxy {
intercept: intercept.into(),
uri: uri,
headers: Headers::new(),
}
}
/// Set `Proxy` authorization
pub fn set_authorization<S: Scheme + Any>(&mut self, scheme: S) {
match self.intercept {
Intercept::Http => self.headers.set(Authorization(scheme)),
Intercept::Https => self.headers.set(ProxyAuthorization(scheme)),
_ => {
self.headers.set(ProxyAuthorization(scheme.clone()));
self.headers.set(Authorization(scheme));
}
}
}
/// Set a custom header
pub fn set_header<H: Header>(&mut self, header: H) {
self.headers.set(header);
}
/// Get current intercept
pub fn intercept(&self) -> &Intercept {
&self.intercept
}
/// Get current `Headers` which must be sent to proxy
pub fn headers(&self) -> &Headers {
&self.headers
}
/// Get proxy uri
pub fn uri(&self) -> &Uri {
&self.uri
}
}
/// A wrapper around `Proxy`s with a connector.
#[derive(Clone)]
pub struct ProxyConnector<C> {
proxies: Vec<Proxy>,
connector: C,
#[cfg(feature = "tls")]
tls: Option<TlsConnector>,
#[cfg(not(feature = "tls"))]
tls: Option<()>,
}
impl<C: fmt::Debug> fmt::Debug for ProxyConnector<C> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
f,
"ProxyConnector {}{{ proxies: {:?}, connector: {:?} }}",
if self.tls.is_some() {
""
} else {
"(unsecured)"
},
self.proxies,
self.connector
)
}
}
impl<C> ProxyConnector<C> {
/// Create a new secured Proxies
#[cfg(feature = "tls")]
pub fn new(connector: C) -> Result<Self, io::Error> {
let tls = TlsConnector::builder()
.and_then(|b| b.build())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
Ok(ProxyConnector {
proxies: Vec::new(),
connector: connector,
tls: Some(tls),
})
}
/// Create a new unsecured Proxy
pub fn unsecured(connector: C) -> Self {
ProxyConnector {
proxies: Vec::new(),
connector: connector,
tls: None,
}
}
/// Create a proxy connector and attach a particular proxy
#[cfg(feature = "tls")]
pub fn from_proxy(connector: C, proxy: Proxy) -> Result<Self, io::Error> {
let mut c = ProxyConnector::new(connector)?;
c.proxies.push(proxy);
Ok(c)
}
/// Create a proxy connector and attach a particular proxy
pub fn from_proxy_unsecured(connector: C, proxy: Proxy) -> Self {
let mut c = ProxyConnector::unsecured(connector);
c.proxies.push(proxy);
c
}
/// Change proxy connector
pub fn with_connector<CC>(self, connector: CC) -> ProxyConnector<CC> {
ProxyConnector {
connector: connector,
proxies: self.proxies,
tls: self.tls,
}
}
/// Set or unset tls when tunneling
#[cfg(feature = "tls")]
pub fn set_tls(&mut self, tls: Option<TlsConnector>) {
self.tls = tls;
}
/// Get the current proxies
pub fn proxies(&self) -> &[Proxy] {
&self.proxies
}
/// Add a new additional proxy
pub fn add_proxy(&mut self, proxy: Proxy) {
self.proxies.push(proxy);
}
/// Extend the list of proxies
pub fn extend_proxies<I: IntoIterator<Item = Proxy>>(&mut self, proxies: I) {
self.proxies.extend(proxies)
}
/// Get http headers for a matching uri
///
/// These headers must be appended to the hyper Request for the proxy to work properly.
/// This is needed only for http requests.
pub fn http_headers(&self, uri: &Uri) -> Option<&Headers> {
if uri.scheme() != Some("http") {
return None;
}
self.match_proxy(uri).map(|p| &p.headers)
}
fn match_proxy(&self, uri: &Uri) -> Option<&Proxy> {
self.proxies.iter().find(|p| p.intercept.matches(uri))
}
}
impl<C> Service for ProxyConnector<C>
where
C: Service<Request = Uri, Error = io::Error> + 'static,
C::Future: 'static,
<C::Future as Future>::Item: AsyncRead + AsyncWrite + 'static,
{
type Request = Uri;
type Response = ProxyStream<C::Response>;
type Error = io::Error;
type Future = Box<Future<Item = ProxyStream<C::Response>, Error = Self::Error>>;
fn call(&self, uri: Uri) -> Self::Future {
if let Some(ref p) = self.match_proxy(&uri) {
if uri.scheme() == Some("https") {
let host = uri.host().unwrap().to_owned();
let port = uri.port().unwrap_or(443);
let tunnel = tunnel::Tunnel::new(&host, port, &p.headers);
let proxy_stream = self.connector
.call(p.uri.clone())
.and_then(move |io| tunnel.with_stream(io));
match self.tls.as_ref() {
#[cfg(feature = "tls")]
Some(tls) => {
let tls = tls.clone();
Box::new(
proxy_stream
.and_then(move |io| tls.connect_async(&host, io).map_err(io_err))
.map(|s| ProxyStream::Secured(s)),
)
},
#[cfg(not(feature = "tls"))]
Some(_) => panic!("hyper-proxy was not built with TLS support"),
None => Box::new(proxy_stream.map(|s| ProxyStream::Regular(s))),
}
} else {
// without TLS, there is absolutely zero benefit from tunneling, as the proxy can
// read the plaintext traffic. Thus, tunneling is just restrictive to the proxies
// resources.
Box::new(
self.connector
.call(p.uri.clone())
.map(|s| ProxyStream::Regular(s)),
)
}
} else {
Box::new(self.connector.call(uri).map(|s| ProxyStream::Regular(s)))
}
}
}
#[inline]
fn io_err<E: Into<Box<::std::error::Error + Send + Sync>>>(e: E) -> io::Error {
io::Error::new(io::ErrorKind::Other, e)
}
| {
match (self, uri.scheme()) {
(&Intercept::All, _)
| (&Intercept::Http, Some("http"))
| (&Intercept::Https, Some("https")) => true,
(&Intercept::Custom(Custom(ref f)), _) => f(uri),
_ => false,
}
} | identifier_body |
lib.rs | //! A Proxy Connector crate for Hyper based applications
//!
//! # Example
//! ```rust,no_run
//! extern crate hyper;
//! extern crate hyper_proxy;
//! extern crate futures;
//! extern crate tokio_core;
//!
//! use hyper::{Chunk, Client, Request, Method, Uri};
//! use hyper::client::HttpConnector;
//! use hyper::header::Basic;
//! use futures::{Future, Stream};
//! use hyper_proxy::{Proxy, ProxyConnector, Intercept};
//! use tokio_core::reactor::Core;
//!
//! fn main() {
//! let mut core = Core::new().unwrap();
//! let handle = core.handle();
//!
//! let proxy = {
//! let proxy_uri = "http://my-proxy:8080".parse().unwrap();
//! let mut proxy = Proxy::new(Intercept::All, proxy_uri);
//! proxy.set_authorization(Basic {
//! username: "John Doe".into(),
//! password: Some("Agent1234".into()),
//! });
//! let connector = HttpConnector::new(4, &handle);
//! let proxy_connector = ProxyConnector::from_proxy(connector, proxy).unwrap();
//! proxy_connector
//! };
//!
//! // Connecting to http will trigger regular GETs and POSTs.
//! // We need to manually append the relevant headers to the request
//! let uri: Uri = "http://my-remote-website.com".parse().unwrap();
//! let mut req = Request::new(Method::Get, uri.clone());
//! if let Some(headers) = proxy.http_headers(&uri) {
//! req.headers_mut().extend(headers.iter());
//! req.set_proxy(true);
//! }
//! let client = Client::configure().connector(proxy).build(&handle);
//! let fut_http = client.request(req)
//! .and_then(|res| res.body().concat2())
//! .map(move |body: Chunk| ::std::str::from_utf8(&body).unwrap().to_string());
//!
//! // Connecting to an https uri is straightforward (uses 'CONNECT' method underneath)
//! let uri = "https://my-remote-websitei-secured.com".parse().unwrap();
//! let fut_https = client
//! .get(uri)
//! .and_then(|res| res.body().concat2())
//! .map(move |body: Chunk| ::std::str::from_utf8(&body).unwrap().to_string());
//!
//! let futs = fut_http.join(fut_https);
//!
//! let (_http_res, _https_res) = core.run(futs).unwrap();
//! }
//! ```
#![deny(missing_docs)]
extern crate bytes;
#[macro_use]
extern crate futures;
extern crate hyper;
#[cfg(test)]
extern crate hyper_tls;
#[cfg(feature = "tls")]
extern crate native_tls;
extern crate tokio_core;
extern crate tokio_io;
#[cfg(feature = "tls")]
extern crate tokio_tls;
mod tunnel;
mod stream;
use std::any::Any;
use std::fmt;
use std::io;
use std::sync::Arc;
use futures::Future;
use hyper::Uri;
use hyper::client::Service;
use hyper::header::{Authorization, Header, Headers, ProxyAuthorization, Scheme};
#[cfg(feature = "tls")]
use native_tls::TlsConnector;
use tokio_io::{AsyncRead, AsyncWrite};
#[cfg(feature = "tls")]
use tokio_tls::TlsConnectorExt;
use stream::ProxyStream;
/// The Intercept enum to filter connections
#[derive(Debug, Clone)]
pub enum Intercept {
/// All incoming connection will go through proxy
All,
/// Only http connections will go through proxy
Http,
/// Only https connections will go through proxy
Https,
/// No connection will go through this proxy
None,
/// A custom intercept
Custom(Custom),
}
/// A Custom struct to proxy custom uris
#[derive(Clone)]
pub struct Custom(Arc<Fn(&Uri) -> bool + Send + Sync>);
impl fmt::Debug for Custom {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "_")
}
}
impl<F: Fn(&Uri) -> bool + Send + Sync + 'static> From<F> for Custom {
fn from(f: F) -> Custom {
Custom(Arc::new(f))
}
}
impl Intercept {
/// A function to check if given `Uri` is proxied
pub fn matches(&self, uri: &Uri) -> bool {
match (self, uri.scheme()) {
(&Intercept::All, _)
| (&Intercept::Http, Some("http"))
| (&Intercept::Https, Some("https")) => true,
(&Intercept::Custom(Custom(ref f)), _) => f(uri),
_ => false,
}
}
}
impl<F: Fn(&Uri) -> bool + Send + Sync + 'static> From<F> for Intercept {
fn from(f: F) -> Intercept {
Intercept::Custom(f.into())
}
}
/// A Proxy strcut | pub struct Proxy {
intercept: Intercept,
headers: Headers,
uri: Uri,
}
impl Proxy {
/// Create a new `Proxy`
pub fn new<I: Into<Intercept>>(intercept: I, uri: Uri) -> Proxy {
Proxy {
intercept: intercept.into(),
uri: uri,
headers: Headers::new(),
}
}
/// Set `Proxy` authorization
pub fn set_authorization<S: Scheme + Any>(&mut self, scheme: S) {
match self.intercept {
Intercept::Http => self.headers.set(Authorization(scheme)),
Intercept::Https => self.headers.set(ProxyAuthorization(scheme)),
_ => {
self.headers.set(ProxyAuthorization(scheme.clone()));
self.headers.set(Authorization(scheme));
}
}
}
/// Set a custom header
pub fn set_header<H: Header>(&mut self, header: H) {
self.headers.set(header);
}
/// Get current intercept
pub fn intercept(&self) -> &Intercept {
&self.intercept
}
/// Get current `Headers` which must be sent to proxy
pub fn headers(&self) -> &Headers {
&self.headers
}
/// Get proxy uri
pub fn uri(&self) -> &Uri {
&self.uri
}
}
/// A wrapper around `Proxy`s with a connector.
#[derive(Clone)]
pub struct ProxyConnector<C> {
proxies: Vec<Proxy>,
connector: C,
#[cfg(feature = "tls")]
tls: Option<TlsConnector>,
#[cfg(not(feature = "tls"))]
tls: Option<()>,
}
impl<C: fmt::Debug> fmt::Debug for ProxyConnector<C> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(
f,
"ProxyConnector {}{{ proxies: {:?}, connector: {:?} }}",
if self.tls.is_some() {
""
} else {
"(unsecured)"
},
self.proxies,
self.connector
)
}
}
impl<C> ProxyConnector<C> {
/// Create a new secured Proxies
#[cfg(feature = "tls")]
pub fn new(connector: C) -> Result<Self, io::Error> {
let tls = TlsConnector::builder()
.and_then(|b| b.build())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
Ok(ProxyConnector {
proxies: Vec::new(),
connector: connector,
tls: Some(tls),
})
}
/// Create a new unsecured Proxy
pub fn unsecured(connector: C) -> Self {
ProxyConnector {
proxies: Vec::new(),
connector: connector,
tls: None,
}
}
/// Create a proxy connector and attach a particular proxy
#[cfg(feature = "tls")]
pub fn from_proxy(connector: C, proxy: Proxy) -> Result<Self, io::Error> {
let mut c = ProxyConnector::new(connector)?;
c.proxies.push(proxy);
Ok(c)
}
/// Create a proxy connector and attach a particular proxy
pub fn from_proxy_unsecured(connector: C, proxy: Proxy) -> Self {
let mut c = ProxyConnector::unsecured(connector);
c.proxies.push(proxy);
c
}
/// Change proxy connector
pub fn with_connector<CC>(self, connector: CC) -> ProxyConnector<CC> {
ProxyConnector {
connector: connector,
proxies: self.proxies,
tls: self.tls,
}
}
/// Set or unset tls when tunneling
#[cfg(feature = "tls")]
pub fn set_tls(&mut self, tls: Option<TlsConnector>) {
self.tls = tls;
}
/// Get the current proxies
pub fn proxies(&self) -> &[Proxy] {
&self.proxies
}
/// Add a new additional proxy
pub fn add_proxy(&mut self, proxy: Proxy) {
self.proxies.push(proxy);
}
/// Extend the list of proxies
pub fn extend_proxies<I: IntoIterator<Item = Proxy>>(&mut self, proxies: I) {
self.proxies.extend(proxies)
}
/// Get http headers for a matching uri
///
/// These headers must be appended to the hyper Request for the proxy to work properly.
/// This is needed only for http requests.
pub fn http_headers(&self, uri: &Uri) -> Option<&Headers> {
if uri.scheme() != Some("http") {
return None;
}
self.match_proxy(uri).map(|p| &p.headers)
}
fn match_proxy(&self, uri: &Uri) -> Option<&Proxy> {
self.proxies.iter().find(|p| p.intercept.matches(uri))
}
}
impl<C> Service for ProxyConnector<C>
where
C: Service<Request = Uri, Error = io::Error> + 'static,
C::Future: 'static,
<C::Future as Future>::Item: AsyncRead + AsyncWrite + 'static,
{
type Request = Uri;
type Response = ProxyStream<C::Response>;
type Error = io::Error;
type Future = Box<Future<Item = ProxyStream<C::Response>, Error = Self::Error>>;
fn call(&self, uri: Uri) -> Self::Future {
if let Some(ref p) = self.match_proxy(&uri) {
if uri.scheme() == Some("https") {
let host = uri.host().unwrap().to_owned();
let port = uri.port().unwrap_or(443);
let tunnel = tunnel::Tunnel::new(&host, port, &p.headers);
let proxy_stream = self.connector
.call(p.uri.clone())
.and_then(move |io| tunnel.with_stream(io));
match self.tls.as_ref() {
#[cfg(feature = "tls")]
Some(tls) => {
let tls = tls.clone();
Box::new(
proxy_stream
.and_then(move |io| tls.connect_async(&host, io).map_err(io_err))
.map(|s| ProxyStream::Secured(s)),
)
},
#[cfg(not(feature = "tls"))]
Some(_) => panic!("hyper-proxy was not built with TLS support"),
None => Box::new(proxy_stream.map(|s| ProxyStream::Regular(s))),
}
} else {
// without TLS, there is absolutely zero benefit from tunneling, as the proxy can
// read the plaintext traffic. Thus, tunneling is just restrictive to the proxies
// resources.
Box::new(
self.connector
.call(p.uri.clone())
.map(|s| ProxyStream::Regular(s)),
)
}
} else {
Box::new(self.connector.call(uri).map(|s| ProxyStream::Regular(s)))
}
}
}
#[inline]
fn io_err<E: Into<Box<::std::error::Error + Send + Sync>>>(e: E) -> io::Error {
io::Error::new(io::ErrorKind::Other, e)
} | #[derive(Clone, Debug)] | random_line_split |
ahrs_serv.py | """ AHRS - Madgwicks, basico
Este codigo se conecta por el bus de I2C del Raspberry PI modelo 2 al IMU10 de Adafruit, y usa los datos de los sensores para
alimentar una implementacion del filtro de Madgwicks que retorna la orientacion en quaterniones del sensor (que son transformadas a Angulos
de Euler). Luego lo enivia por tcp/ip a una computadora que grafica el resultado.
"""
# Funciones de comunicacion
def get_interfaces():
""" (Python 3) Funcion que devuelve una lista con strings de todos las interfaces de red que tenga tu computadora
*NOTA: Solo funciona en Linux
get_ifaces()
['enp3s0', 'vmnet1', 'vmnet8', 'wlp2s0', ' lo']"""
with open('/proc/net/dev','r') as f: #Abrimos el archivo con la informacion de red
interfaces = []
for linea in f:
if ':' in linea:
interfaces.append(linea[:linea.find(':')]) #Extraemos los primeros caracteres de las lineas con informacion de las interfaces
return [iface.lstrip().rstrip() for iface in interfaces]
def get_ip_address2(ifname):
""" (Python 2)Funcion que recibe un string con el nombre de una interfaz de red y devuelve
un string con la direccion IP de la interfaz, o None si dicha interfaz no
tiene direccion IP asignada.
get_ip_address('wlp2s0')
'192.168.1.4' """
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
except:
return None
def get_network_config2():
""" (Python 2) Funcion que devuelve un diccionario con las interfaces de red de la computadora y sus respectivas direcciones
ip. """
interfaces = get_interfaces()
ips = [get_ip_address2(ip) for ip in interfaces]
return dict(zip(interfaces,ips))
# Funciones que configuran los sensores
def accel_setup():
global ahrs
global accel_addr
ahrs.write_byte_data(accel_addr,0x23,0x88) #Prendemos alta resolucion y hold de update de los registros de salida en el reg 23h
ahrs.write_byte_data(accel_addr,0x20,0x27) #sacamos el accelerometro del shutdown mode
def magn_setup():
global ahrs
global magn_addr
ahrs.write_byte_data(magn_addr,0x00,0x10) #Seteamos la velocidad de las mediciones a 15Hz
ahrs.write_byte_data(magn_addr,0x01,0x20) #Ponemos la escala +-1.3g
ahrs.write_byte_data(magn_addr,0x02,0x00) #Prendemos el magnetometro
def gyro_setup():
global ahrs
global gyro_addr
ahrs.write_byte_data(gyro_addr,0x20,0x8F) #DataRate 400Hz, BW 20Hz, All Axis enabled, Gyro ON
ahrs.write_byte_data(gyro_addr,0x23,0xA0) #Escala 2000dps, BlockUpdates
ahrs.write_byte_data(gyro_addr,0x24,0x02) #OutSel = 10h, use HPF and LPF2, HPen = 0. | # Funciones que sacan los valores de los sensores.
def accel_read():
global ahrs
global accel_addr
accel_data = [0,0,0]
##Sacamos los datos de acceleracion de los 3 ejes
#Eje X
xl = format(ahrs.read_byte_data(accel_addr,0x28), '#010b')[2:6]
xh = format(ahrs.read_byte_data(accel_addr,0x29), '#010b')[2:]
#Eje Y
yl = format(ahrs.read_byte_data(accel_addr,0x2A), '#010b')[2:6]
yh = format(ahrs.read_byte_data(accel_addr,0x2B), '#010b')[2:]
#Eje Z
zl = format(ahrs.read_byte_data(accel_addr,0x2C), '#010b')[2:6]
zh = format(ahrs.read_byte_data(accel_addr,0x2D), '#010b')[2:]
## Combinamos juntos los 2 bytes.
accel_data[0] = int('0b' + xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1)) #Eje X #Unimos los bytes en complemento a 2
accel_data[1] = int('0b' + yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1)) #Eje Y #Unimos los bytes en complemento a 2
accel_data[2] = int('0b' + zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1)) #Eje Z #Unimos los bytes en complemento a 2
#Normalizamos el vector antes de retornarlo
norma = np.linalg.norm(accel_data)
accel_data = list(map(lambda x: x/norma,accel_data))
return accel_data
def magn_read():
global ahrs
global magn_addr
magn_data = [0,0,0]
##Sacamos los datos de campo magnetico de los 3 ejes
#Eje X
xh = ahrs.read_byte_data(magn_addr,0x03)
xl = ahrs.read_byte_data(magn_addr,0x04)
#Eje Y
yh = ahrs.read_byte_data(magn_addr,0x07)
yl = ahrs.read_byte_data(magn_addr,0x08)
#Eje Z
zh = ahrs.read_byte_data(magn_addr,0x05)
zl = ahrs.read_byte_data(magn_addr,0x06)
#Convertimos los resultados a binario para poder verlos
xl = format(xl, '#010b')[2:]
xh = format(xh, '#010b')[2:]
yl = format(yl, '#010b')[2:]
yh = format(yh, '#010b')[2:]
zl = format(zl, '#010b')[2:]
zh = format(zh, '#010b')[2:]
#Y aplicamos el complemento a 2 para conseguir el numero
magn_data[0] = int( xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1))
magn_data[1] = int( yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1))
magn_data[2] = int( zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1))
#Escalamos los datos
magn_data[0] = (magn_data[0] - 35.0) * 1.0
magn_data[1] = (magn_data[1] + 35.0) * 1.02702702703
magn_data[2] = (magn_data[2] - 3.0) * 0.974358974359
#Normalizamos el vector
norma = np.linalg.norm(magn_data)
magn_data = list(map(lambda x: x/norma,magn_data))
return magn_data
def gyro_read():
global ahrs
global gyro_addr
gyro_data = [0,0,0]
#Eje X
xh = ahrs.read_byte_data(gyro_addr,0x29)
xl = ahrs.read_byte_data(gyro_addr,0x28)
#Eje Y
yh = ahrs.read_byte_data(gyro_addr,0x2B)
yl = ahrs.read_byte_data(gyro_addr,0x2A)
#Eje Z
zh = ahrs.read_byte_data(gyro_addr,0x2D)
zl = ahrs.read_byte_data(gyro_addr,0x2C)
#Convertimos los resultados a binario para poder verlos
xl = format(xl, '#010b')[2:]
xh = format(xh, '#010b')[2:]
yl = format(yl, '#010b')[2:]
yh = format(yh, '#010b')[2:]
zl = format(zl, '#010b')[2:]
zh = format(zh, '#010b')[2:]
#Y aplicamos el complemento a 2 para conseguir el numero
x = int( xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1))
y = int( yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1))
z = int( zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1))
#Calculamos los grados por segundo (para 2000dps)
gyro_data[0] = float(x)*70/1000
gyro_data[1] = float(y)*70/1000
gyro_data[2] = float(z)*70/1000
#Transformamos los datos a radianes/seg
gyro_data = list(map(math.radians, gyro_data))
return gyro_data
def madgwicks_filter(accel_datas, magn_datas, gyro_datas, deltat):
global SEq
global b_x
global b_z
global w_b
global beta
global zeta
# print "accel = {}".format(accel_datas)
# print "magn = {}".format(magn_datas)
# print "gyro = {}".format(gyro_datas)
# print "deltat = {}".format(deltat)
# print SEq
# print b_x
# print w_b
# print beta
#axulirary variables to avoid reapeated calcualtions
halfSEq_1 = 0.5 * SEq[0]
halfSEq_2 = 0.5 * SEq[1]
halfSEq_3 = 0.5 * SEq[2]
halfSEq_4 = 0.5 * SEq[3]
twoSEq_1 = 2.0 * SEq[0]
twoSEq_2 = 2.0 * SEq[1]
twoSEq_3 = 2.0 * SEq[2]
twoSEq_4 = 2.0 * SEq[3]
twob_x = 2.0 * b_x
twob_z = 2.0 * b_z
twob_xSEq_1 = 2.0 * b_x * SEq[0]
twob_xSEq_2 = 2.0 * b_x * SEq[1]
twob_xSEq_3 = 2.0 * b_x * SEq[2]
twob_xSEq_4 = 2.0 * b_x * SEq[3]
twob_zSEq_1 = 2.0 * b_z * SEq[0]
twob_zSEq_2 = 2.0 * b_z * SEq[1]
twob_zSEq_3 = 2.0 * b_z * SEq[2]
twob_zSEq_4 = 2.0 * b_z * SEq[3]
SEq_1SEq_2 = SEq[0] * SEq[1]
SEq_1SEq_3 = SEq[0] * SEq[2]
SEq_1SEq_4 = SEq[0] * SEq[3]
SEq_2SEq_3 = SEq[1] * SEq[2]
SEq_2SEq_4 = SEq[1] * SEq[3]
SEq_3SEq_4 = SEq[2] * SEq[3]
twom_x = 2.0 * magn_datas[0]
twom_y = 2.0 * magn_datas[1]
twom_z = 2.0 * magn_datas[2]
# compute the objective function and Jacobian
f_1 = twoSEq_2 * SEq[3] - twoSEq_1 * SEq[2] - accel_datas[0]
f_2 = twoSEq_1 * SEq[1] + twoSEq_3 * SEq[3] - accel_datas[1]
f_3 = 1.0 - twoSEq_2 * SEq[1] - twoSEq_3 * SEq[2] - accel_datas[2]
f_4 = twob_x * (0.5 - SEq[2] * SEq[2] - SEq[3] * SEq[3]) + twob_z * (SEq_2SEq_4 - SEq_1SEq_3) - magn_datas[0]
f_5 = twob_x * (SEq[1] * SEq[2] - SEq[0] * SEq[3]) + twob_z * (SEq[0] * SEq[1] + SEq[2] * SEq[3]) - magn_datas[1]
f_6 = twob_x * (SEq_1SEq_3 + SEq_2SEq_4) + twob_z * (0.5 - SEq[1] * SEq[1] - SEq[2] * SEq[2]) - magn_datas[2]
J_11or24 = twoSEq_3 # J_11 negated in matrix multiplication
J_12or23 = 2.0 * SEq[3]
J_13or22 = twoSEq_1 # J_12 negated in matrix multiplication
J_14or21 = twoSEq_2
J_32 = 2.0 * J_14or21 # negated in matrix multiplication
J_33 = 2.0 * J_11or24 # negated in matrix multiplication
J_41 = twob_zSEq_3 # negated in matrix multiplication
J_42 = twob_zSEq_4
J_43 = 2.0 * twob_xSEq_3 + twob_zSEq_1 # negated in matrix multiplication
J_44 = 2.0 * twob_xSEq_4 - twob_zSEq_2 # negated in matrix multiplication
J_51 = twob_xSEq_4 - twob_zSEq_2 # negated in matrix multiplication
J_52 = twob_xSEq_3 + twob_zSEq_1
J_53 = twob_xSEq_2 + twob_zSEq_4
J_54 = twob_xSEq_1 - twob_zSEq_3 # negated in matrix multiplication
J_61 = twob_xSEq_3
J_62 = twob_xSEq_4 - 2.0 * twob_zSEq_2
J_63 = twob_xSEq_1 - 2.0 * twob_zSEq_3
J_64 = twob_xSEq_2
#print "f_1 = {} f_2 = {} f_3 = {} f_4 = {} f_5 = {} f_6 = {}".format(f_1,f_2,f_3,f_4,f_5,f_6)
# print "J_64 = {} J_63 = {} J_62 = {} J_61 = {} J_54 = {} J_53 = {} J_52 = {} J_51 = {} J_44 = {} J_43 = {} J_42 = {} J_41 = {}".format(J_64,J_63,J_62,J_61,J_54,J_53,J_52,J_51,J_44,J_43,J_42,J_41)
# compute the gradient (matrix multiplication)
SEqHatDot_1 = J_14or21 * f_2 - J_11or24 * f_1 - J_41 * f_4 - J_51 * f_5 + J_61 * f_6
SEqHatDot_2 = J_12or23 * f_1 + J_13or22 * f_2 - J_32 * f_3 + J_42 * f_4 + J_52 * f_5 + J_62 * f_6
SEqHatDot_3 = J_12or23 * f_2 - J_33 * f_3 - J_13or22 * f_1 - J_43 * f_4 + J_53 * f_5 + J_63 * f_6
SEqHatDot_4 = J_14or21 * f_1 + J_11or24 * f_2 - J_44 * f_4 - J_54 * f_5 + J_64 * f_6
###
# print SEqHatDot_1
# print SEqHatDot_2
# print SEqHatDot_3
# print SEqHatDot_4
# print
# normalise the gradient to estimate direction of the gyroscope error
norm = math.sqrt(SEqHatDot_1**2 + SEqHatDot_2**2 + SEqHatDot_3**2 + SEqHatDot_4**2)
SEqHatDot_1 = SEqHatDot_1 / norm
SEqHatDot_2 = SEqHatDot_2 / norm
SEqHatDot_3 = SEqHatDot_3 / norm
SEqHatDot_4 = SEqHatDot_4 / norm
###
# print "SEqHatDot_1: {} SEqHatDot_2: {} SEqHatDot_3: {} SEqHatDot_4: {}".format(SEqHatDot_1,SEqHatDot_2,SEqHatDot_3,SEqHatDot_4)
# compute angular estimated direction of the gyroscope error
w_err_x = twoSEq_1 * SEqHatDot_2 - twoSEq_2 * SEqHatDot_1 - twoSEq_3 * SEqHatDot_4 + twoSEq_4 * SEqHatDot_3
w_err_y = twoSEq_1 * SEqHatDot_3 + twoSEq_2 * SEqHatDot_4 - twoSEq_3 * SEqHatDot_1 - twoSEq_4 * SEqHatDot_2
w_err_z = twoSEq_1 * SEqHatDot_4 - twoSEq_2 * SEqHatDot_3 + twoSEq_3 * SEqHatDot_2 - twoSEq_4 * SEqHatDot_1
# print "w_err_x: {}, w_err_y:{}, w_err_z:{}".format(w_err_x, w_err_y, w_err_z)
# print "zeta: {}".format(zeta)
# print "deltat: {}".format(deltat)
# compute and remove the gyroscope baises
# print "w_b1: {}".format(w_b)
w_b[0] += w_err_x * deltat * zeta
w_b[1] += w_err_y * deltat * zeta
w_b[2] += w_err_z * deltat * zeta
# print "w_b2: {}".format(w_b)
gyro_datas[0] -= w_b[0]
gyro_datas[1] -= w_b[1]
gyro_datas[2] -= w_b[2]
###
# compute the quaternion rate measured by gyroscopes
SEqDot_omega_1 = -halfSEq_2 * gyro_datas[0] - halfSEq_3 * gyro_datas[1] - halfSEq_4 * gyro_datas[2]
SEqDot_omega_2 = halfSEq_1 * gyro_datas[0] + halfSEq_3 * gyro_datas[2] - halfSEq_4 * gyro_datas[1]
SEqDot_omega_3 = halfSEq_1 * gyro_datas[1] - halfSEq_2 * gyro_datas[2] + halfSEq_4 * gyro_datas[0]
SEqDot_omega_4 = halfSEq_1 * gyro_datas[2] + halfSEq_2 * gyro_datas[1] - halfSEq_3 * gyro_datas[0]
# compute then integrate the estimated quaternion rate
SEq[0] += (SEqDot_omega_1 - (beta * SEqHatDot_1)) * deltat
SEq[1] += (SEqDot_omega_2 - (beta * SEqHatDot_2)) * deltat
SEq[2] += (SEqDot_omega_3 - (beta * SEqHatDot_3)) * deltat
SEq[3] += (SEqDot_omega_4 - (beta * SEqHatDot_4)) * deltat
# Normalizamos los quaterniones
norm = np.linalg.norm(SEq)
SEq = map(lambda x: x/norm,SEq)
# compute flux in the earth frame
SEq_1SEq_2 = SEq[0] * SEq[1] # recompute axulirary variables
SEq_1SEq_3 = SEq[0] * SEq[2]
SEq_1SEq_4 = SEq[0] * SEq[3]
SEq_3SEq_4 = SEq[2] * SEq[3]
SEq_2SEq_3 = SEq[1] * SEq[2]
SEq_2SEq_4 = SEq[1] * SEq[3]
h_x = twom_x * (0.5 - SEq[2] * SEq[2] - SEq[3] * SEq[3]) + twom_y * (SEq_2SEq_3 - SEq_1SEq_4) + twom_z * (SEq_2SEq_4 + SEq_1SEq_3)
h_y = twom_x * (SEq_2SEq_3 + SEq_1SEq_4) + twom_y * (0.5 - SEq[1] * SEq[1] - SEq[3] * SEq[3]) + twom_z * (SEq_3SEq_4 - SEq_1SEq_2)
h_z = twom_x * (SEq_2SEq_4 - SEq_1SEq_3) + twom_y * (SEq_3SEq_4 + SEq_1SEq_2) + twom_z * (0.5 - SEq[1] * SEq[1] - SEq[2] * SEq[2])
# normalise the flux vector to have only components in the x and z
b_x = math.sqrt((h_x * h_x) + (h_y * h_y))
b_z = h_z
def Quat_to_Euler(quater):
euler = [0,0,0]
euler[0] = math.atan2(2*(quater[0]*quater[1] + quater[2]*quater[3]),quater[0]*quater[0] - quater[1]*quater[1] - quater[2]*quater[2] + quater[3]*quater[3])
euler[1] = math.asin(-2*((quater[0]*quater[2] - quater[1]*quater[3]))/(quater[0]*quater[0] + quater[1]*quater[1] + quater[2]*quater[2] + quater[3]*quater[3]))
euler[2] = math.atan2(2*(quater[1]*quater[2] + quater[0]*quater[3]),-quater[0]*quater[0] - quater[1]*quater[1] + quater[2]*quater[2] + quater[3]*quater[3])
euler = map(math.degrees,euler)
return euler
import smbus
import time
import numpy as np
import math
import socket
import fcntl
import struct
#Analizamos la red para encontrar el ip correcto
inter_faces = get_network_config2()
if inter_faces['eth0'] == None: #Le damos prioridad a la conexion ethernet
host = inter_faces['wlan0']
tarjeta = 'wlan0'
else:
host = inter_faces['eth0']
tarjeta = 'eth0'
print("Intentando establecer conexion en interfaz {} con la direccion ip {}".format(tarjeta, host))
#Establecemos la conexion
try:
port = 23322
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host,port))
s.listen(1)
conn,addr = s.accept()
except:
s.close() #Si algo falla, cierra todo.
print("[-] ERROR = No se pudo establecer la conexion")
exit()
#Abrimos el puerto I2C
ahrs = smbus.SMBus(1)
#Definimos las direcciones de los sensores
gyro_addr = 0x6B
accel_addr = 0x19
magn_addr = 0x1E
#Variables globales
SEq = [0.0,0.0,0.0,1.0] #Quaterniones
b_x = 1 #Earth Flux
b_z = 0
w_b = [0,0,0] #Gyroscopic Bias Error
beta = math.sqrt(3.0/4.0)*math.radians(5) #gyro measurment error rad/s (5 deg/s)
zeta = math.sqrt(3.0/4.0)*math.radians(0.2) #gyro drift error rad/s/s (0.2 deg/s/s)
#Colocamos los valores de configuracion
accel_setup()
magn_setup()
gyro_setup()
#Leemos los datos de los sensores.
accel_data = accel_read()
magn_data = magn_read()
gyro_data = gyro_read()
#Variables de tiempo
time_new = 0
time_old = time.time()
#loop de control
while(1):
#sacamos medidas de sensores
accel_data = accel_read()
magn_data = magn_read()
gyro_data = gyro_read()
#medimos tiempo
time_new = time.time()
#corremos el filtro
madgwicks_filter(accel_data, magn_data, gyro_data, time_new - time_old)
#Actualizamos el tiempo
time_old = time_new
#Calculamos los Angulos de Euler
Angulos = Quat_to_Euler(SEq)
#Imprimimos
print("Pitch: {:+.2f}deg Roll: {:+.2f}deg Yaw: {:+.2f}deg Quaternion:({:+.3f}, {:+.3f}, {:+.3f}, {:+.3f})".format(Angulos[0],Angulos[1],Angulos[2], SEq[0], SEq[1], SEq[2], SEq[3] ))
mensaje = "{:+.2f},{:+.2f},{:+.2f}\n".format(Angulos[0],Angulos[1],Angulos[2])
try:
conn.sendall(mensaje) #Enviamos por TCP la informacion
except:
s.close() #Si algo falla, cierra todo.
print("[-] ERROR = No se pudo mandar el paquete")
exit()
time.sleep(0.01)
# print("Accel:({:+.3f},{:+.3f},{:+.3f}) Magn:({:+.3f},{:+.3f},{:+.3f}) Gyro:({:+.3f},{:+.3f},{:+.3f})".format(accel_data[0],accel_data[1],accel_data[2],magn_data[0],magn_data[1],magn_data[2],gyro_data[0],gyro_data[1],gyro_data[2])) | random_line_split | |
ahrs_serv.py | """ AHRS - Madgwicks, basico
Este codigo se conecta por el bus de I2C del Raspberry PI modelo 2 al IMU10 de Adafruit, y usa los datos de los sensores para
alimentar una implementacion del filtro de Madgwicks que retorna la orientacion en quaterniones del sensor (que son transformadas a Angulos
de Euler). Luego lo enivia por tcp/ip a una computadora que grafica el resultado.
"""
# Funciones de comunicacion
def get_interfaces():
""" (Python 3) Funcion que devuelve una lista con strings de todos las interfaces de red que tenga tu computadora
*NOTA: Solo funciona en Linux
get_ifaces()
['enp3s0', 'vmnet1', 'vmnet8', 'wlp2s0', ' lo']"""
with open('/proc/net/dev','r') as f: #Abrimos el archivo con la informacion de red
interfaces = []
for linea in f:
if ':' in linea:
interfaces.append(linea[:linea.find(':')]) #Extraemos los primeros caracteres de las lineas con informacion de las interfaces
return [iface.lstrip().rstrip() for iface in interfaces]
def get_ip_address2(ifname):
""" (Python 2)Funcion que recibe un string con el nombre de una interfaz de red y devuelve
un string con la direccion IP de la interfaz, o None si dicha interfaz no
tiene direccion IP asignada.
get_ip_address('wlp2s0')
'192.168.1.4' """
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
except:
return None
def get_network_config2():
""" (Python 2) Funcion que devuelve un diccionario con las interfaces de red de la computadora y sus respectivas direcciones
ip. """
interfaces = get_interfaces()
ips = [get_ip_address2(ip) for ip in interfaces]
return dict(zip(interfaces,ips))
# Funciones que configuran los sensores
def accel_setup():
global ahrs
global accel_addr
ahrs.write_byte_data(accel_addr,0x23,0x88) #Prendemos alta resolucion y hold de update de los registros de salida en el reg 23h
ahrs.write_byte_data(accel_addr,0x20,0x27) #sacamos el accelerometro del shutdown mode
def magn_setup():
|
def gyro_setup():
global ahrs
global gyro_addr
ahrs.write_byte_data(gyro_addr,0x20,0x8F) #DataRate 400Hz, BW 20Hz, All Axis enabled, Gyro ON
ahrs.write_byte_data(gyro_addr,0x23,0xA0) #Escala 2000dps, BlockUpdates
ahrs.write_byte_data(gyro_addr,0x24,0x02) #OutSel = 10h, use HPF and LPF2, HPen = 0.
# Funciones que sacan los valores de los sensores.
def accel_read():
global ahrs
global accel_addr
accel_data = [0,0,0]
##Sacamos los datos de acceleracion de los 3 ejes
#Eje X
xl = format(ahrs.read_byte_data(accel_addr,0x28), '#010b')[2:6]
xh = format(ahrs.read_byte_data(accel_addr,0x29), '#010b')[2:]
#Eje Y
yl = format(ahrs.read_byte_data(accel_addr,0x2A), '#010b')[2:6]
yh = format(ahrs.read_byte_data(accel_addr,0x2B), '#010b')[2:]
#Eje Z
zl = format(ahrs.read_byte_data(accel_addr,0x2C), '#010b')[2:6]
zh = format(ahrs.read_byte_data(accel_addr,0x2D), '#010b')[2:]
## Combinamos juntos los 2 bytes.
accel_data[0] = int('0b' + xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1)) #Eje X #Unimos los bytes en complemento a 2
accel_data[1] = int('0b' + yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1)) #Eje Y #Unimos los bytes en complemento a 2
accel_data[2] = int('0b' + zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1)) #Eje Z #Unimos los bytes en complemento a 2
#Normalizamos el vector antes de retornarlo
norma = np.linalg.norm(accel_data)
accel_data = list(map(lambda x: x/norma,accel_data))
return accel_data
def magn_read():
global ahrs
global magn_addr
magn_data = [0,0,0]
##Sacamos los datos de campo magnetico de los 3 ejes
#Eje X
xh = ahrs.read_byte_data(magn_addr,0x03)
xl = ahrs.read_byte_data(magn_addr,0x04)
#Eje Y
yh = ahrs.read_byte_data(magn_addr,0x07)
yl = ahrs.read_byte_data(magn_addr,0x08)
#Eje Z
zh = ahrs.read_byte_data(magn_addr,0x05)
zl = ahrs.read_byte_data(magn_addr,0x06)
#Convertimos los resultados a binario para poder verlos
xl = format(xl, '#010b')[2:]
xh = format(xh, '#010b')[2:]
yl = format(yl, '#010b')[2:]
yh = format(yh, '#010b')[2:]
zl = format(zl, '#010b')[2:]
zh = format(zh, '#010b')[2:]
#Y aplicamos el complemento a 2 para conseguir el numero
magn_data[0] = int( xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1))
magn_data[1] = int( yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1))
magn_data[2] = int( zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1))
#Escalamos los datos
magn_data[0] = (magn_data[0] - 35.0) * 1.0
magn_data[1] = (magn_data[1] + 35.0) * 1.02702702703
magn_data[2] = (magn_data[2] - 3.0) * 0.974358974359
#Normalizamos el vector
norma = np.linalg.norm(magn_data)
magn_data = list(map(lambda x: x/norma,magn_data))
return magn_data
def gyro_read():
global ahrs
global gyro_addr
gyro_data = [0,0,0]
#Eje X
xh = ahrs.read_byte_data(gyro_addr,0x29)
xl = ahrs.read_byte_data(gyro_addr,0x28)
#Eje Y
yh = ahrs.read_byte_data(gyro_addr,0x2B)
yl = ahrs.read_byte_data(gyro_addr,0x2A)
#Eje Z
zh = ahrs.read_byte_data(gyro_addr,0x2D)
zl = ahrs.read_byte_data(gyro_addr,0x2C)
#Convertimos los resultados a binario para poder verlos
xl = format(xl, '#010b')[2:]
xh = format(xh, '#010b')[2:]
yl = format(yl, '#010b')[2:]
yh = format(yh, '#010b')[2:]
zl = format(zl, '#010b')[2:]
zh = format(zh, '#010b')[2:]
#Y aplicamos el complemento a 2 para conseguir el numero
x = int( xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1))
y = int( yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1))
z = int( zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1))
#Calculamos los grados por segundo (para 2000dps)
gyro_data[0] = float(x)*70/1000
gyro_data[1] = float(y)*70/1000
gyro_data[2] = float(z)*70/1000
#Transformamos los datos a radianes/seg
gyro_data = list(map(math.radians, gyro_data))
return gyro_data
def madgwicks_filter(accel_datas, magn_datas, gyro_datas, deltat):
global SEq
global b_x
global b_z
global w_b
global beta
global zeta
# print "accel = {}".format(accel_datas)
# print "magn = {}".format(magn_datas)
# print "gyro = {}".format(gyro_datas)
# print "deltat = {}".format(deltat)
# print SEq
# print b_x
# print w_b
# print beta
#axulirary variables to avoid reapeated calcualtions
halfSEq_1 = 0.5 * SEq[0]
halfSEq_2 = 0.5 * SEq[1]
halfSEq_3 = 0.5 * SEq[2]
halfSEq_4 = 0.5 * SEq[3]
twoSEq_1 = 2.0 * SEq[0]
twoSEq_2 = 2.0 * SEq[1]
twoSEq_3 = 2.0 * SEq[2]
twoSEq_4 = 2.0 * SEq[3]
twob_x = 2.0 * b_x
twob_z = 2.0 * b_z
twob_xSEq_1 = 2.0 * b_x * SEq[0]
twob_xSEq_2 = 2.0 * b_x * SEq[1]
twob_xSEq_3 = 2.0 * b_x * SEq[2]
twob_xSEq_4 = 2.0 * b_x * SEq[3]
twob_zSEq_1 = 2.0 * b_z * SEq[0]
twob_zSEq_2 = 2.0 * b_z * SEq[1]
twob_zSEq_3 = 2.0 * b_z * SEq[2]
twob_zSEq_4 = 2.0 * b_z * SEq[3]
SEq_1SEq_2 = SEq[0] * SEq[1]
SEq_1SEq_3 = SEq[0] * SEq[2]
SEq_1SEq_4 = SEq[0] * SEq[3]
SEq_2SEq_3 = SEq[1] * SEq[2]
SEq_2SEq_4 = SEq[1] * SEq[3]
SEq_3SEq_4 = SEq[2] * SEq[3]
twom_x = 2.0 * magn_datas[0]
twom_y = 2.0 * magn_datas[1]
twom_z = 2.0 * magn_datas[2]
# compute the objective function and Jacobian
f_1 = twoSEq_2 * SEq[3] - twoSEq_1 * SEq[2] - accel_datas[0]
f_2 = twoSEq_1 * SEq[1] + twoSEq_3 * SEq[3] - accel_datas[1]
f_3 = 1.0 - twoSEq_2 * SEq[1] - twoSEq_3 * SEq[2] - accel_datas[2]
f_4 = twob_x * (0.5 - SEq[2] * SEq[2] - SEq[3] * SEq[3]) + twob_z * (SEq_2SEq_4 - SEq_1SEq_3) - magn_datas[0]
f_5 = twob_x * (SEq[1] * SEq[2] - SEq[0] * SEq[3]) + twob_z * (SEq[0] * SEq[1] + SEq[2] * SEq[3]) - magn_datas[1]
f_6 = twob_x * (SEq_1SEq_3 + SEq_2SEq_4) + twob_z * (0.5 - SEq[1] * SEq[1] - SEq[2] * SEq[2]) - magn_datas[2]
J_11or24 = twoSEq_3 # J_11 negated in matrix multiplication
J_12or23 = 2.0 * SEq[3]
J_13or22 = twoSEq_1 # J_12 negated in matrix multiplication
J_14or21 = twoSEq_2
J_32 = 2.0 * J_14or21 # negated in matrix multiplication
J_33 = 2.0 * J_11or24 # negated in matrix multiplication
J_41 = twob_zSEq_3 # negated in matrix multiplication
J_42 = twob_zSEq_4
J_43 = 2.0 * twob_xSEq_3 + twob_zSEq_1 # negated in matrix multiplication
J_44 = 2.0 * twob_xSEq_4 - twob_zSEq_2 # negated in matrix multiplication
J_51 = twob_xSEq_4 - twob_zSEq_2 # negated in matrix multiplication
J_52 = twob_xSEq_3 + twob_zSEq_1
J_53 = twob_xSEq_2 + twob_zSEq_4
J_54 = twob_xSEq_1 - twob_zSEq_3 # negated in matrix multiplication
J_61 = twob_xSEq_3
J_62 = twob_xSEq_4 - 2.0 * twob_zSEq_2
J_63 = twob_xSEq_1 - 2.0 * twob_zSEq_3
J_64 = twob_xSEq_2
#print "f_1 = {} f_2 = {} f_3 = {} f_4 = {} f_5 = {} f_6 = {}".format(f_1,f_2,f_3,f_4,f_5,f_6)
# print "J_64 = {} J_63 = {} J_62 = {} J_61 = {} J_54 = {} J_53 = {} J_52 = {} J_51 = {} J_44 = {} J_43 = {} J_42 = {} J_41 = {}".format(J_64,J_63,J_62,J_61,J_54,J_53,J_52,J_51,J_44,J_43,J_42,J_41)
# compute the gradient (matrix multiplication)
SEqHatDot_1 = J_14or21 * f_2 - J_11or24 * f_1 - J_41 * f_4 - J_51 * f_5 + J_61 * f_6
SEqHatDot_2 = J_12or23 * f_1 + J_13or22 * f_2 - J_32 * f_3 + J_42 * f_4 + J_52 * f_5 + J_62 * f_6
SEqHatDot_3 = J_12or23 * f_2 - J_33 * f_3 - J_13or22 * f_1 - J_43 * f_4 + J_53 * f_5 + J_63 * f_6
SEqHatDot_4 = J_14or21 * f_1 + J_11or24 * f_2 - J_44 * f_4 - J_54 * f_5 + J_64 * f_6
###
# print SEqHatDot_1
# print SEqHatDot_2
# print SEqHatDot_3
# print SEqHatDot_4
# print
# normalise the gradient to estimate direction of the gyroscope error
norm = math.sqrt(SEqHatDot_1**2 + SEqHatDot_2**2 + SEqHatDot_3**2 + SEqHatDot_4**2)
SEqHatDot_1 = SEqHatDot_1 / norm
SEqHatDot_2 = SEqHatDot_2 / norm
SEqHatDot_3 = SEqHatDot_3 / norm
SEqHatDot_4 = SEqHatDot_4 / norm
###
# print "SEqHatDot_1: {} SEqHatDot_2: {} SEqHatDot_3: {} SEqHatDot_4: {}".format(SEqHatDot_1,SEqHatDot_2,SEqHatDot_3,SEqHatDot_4)
# compute angular estimated direction of the gyroscope error
w_err_x = twoSEq_1 * SEqHatDot_2 - twoSEq_2 * SEqHatDot_1 - twoSEq_3 * SEqHatDot_4 + twoSEq_4 * SEqHatDot_3
w_err_y = twoSEq_1 * SEqHatDot_3 + twoSEq_2 * SEqHatDot_4 - twoSEq_3 * SEqHatDot_1 - twoSEq_4 * SEqHatDot_2
w_err_z = twoSEq_1 * SEqHatDot_4 - twoSEq_2 * SEqHatDot_3 + twoSEq_3 * SEqHatDot_2 - twoSEq_4 * SEqHatDot_1
# print "w_err_x: {}, w_err_y:{}, w_err_z:{}".format(w_err_x, w_err_y, w_err_z)
# print "zeta: {}".format(zeta)
# print "deltat: {}".format(deltat)
# compute and remove the gyroscope baises
# print "w_b1: {}".format(w_b)
w_b[0] += w_err_x * deltat * zeta
w_b[1] += w_err_y * deltat * zeta
w_b[2] += w_err_z * deltat * zeta
# print "w_b2: {}".format(w_b)
gyro_datas[0] -= w_b[0]
gyro_datas[1] -= w_b[1]
gyro_datas[2] -= w_b[2]
###
# compute the quaternion rate measured by gyroscopes
SEqDot_omega_1 = -halfSEq_2 * gyro_datas[0] - halfSEq_3 * gyro_datas[1] - halfSEq_4 * gyro_datas[2]
SEqDot_omega_2 = halfSEq_1 * gyro_datas[0] + halfSEq_3 * gyro_datas[2] - halfSEq_4 * gyro_datas[1]
SEqDot_omega_3 = halfSEq_1 * gyro_datas[1] - halfSEq_2 * gyro_datas[2] + halfSEq_4 * gyro_datas[0]
SEqDot_omega_4 = halfSEq_1 * gyro_datas[2] + halfSEq_2 * gyro_datas[1] - halfSEq_3 * gyro_datas[0]
# compute then integrate the estimated quaternion rate
SEq[0] += (SEqDot_omega_1 - (beta * SEqHatDot_1)) * deltat
SEq[1] += (SEqDot_omega_2 - (beta * SEqHatDot_2)) * deltat
SEq[2] += (SEqDot_omega_3 - (beta * SEqHatDot_3)) * deltat
SEq[3] += (SEqDot_omega_4 - (beta * SEqHatDot_4)) * deltat
# Normalizamos los quaterniones
norm = np.linalg.norm(SEq)
SEq = map(lambda x: x/norm,SEq)
# compute flux in the earth frame
SEq_1SEq_2 = SEq[0] * SEq[1] # recompute axulirary variables
SEq_1SEq_3 = SEq[0] * SEq[2]
SEq_1SEq_4 = SEq[0] * SEq[3]
SEq_3SEq_4 = SEq[2] * SEq[3]
SEq_2SEq_3 = SEq[1] * SEq[2]
SEq_2SEq_4 = SEq[1] * SEq[3]
h_x = twom_x * (0.5 - SEq[2] * SEq[2] - SEq[3] * SEq[3]) + twom_y * (SEq_2SEq_3 - SEq_1SEq_4) + twom_z * (SEq_2SEq_4 + SEq_1SEq_3)
h_y = twom_x * (SEq_2SEq_3 + SEq_1SEq_4) + twom_y * (0.5 - SEq[1] * SEq[1] - SEq[3] * SEq[3]) + twom_z * (SEq_3SEq_4 - SEq_1SEq_2)
h_z = twom_x * (SEq_2SEq_4 - SEq_1SEq_3) + twom_y * (SEq_3SEq_4 + SEq_1SEq_2) + twom_z * (0.5 - SEq[1] * SEq[1] - SEq[2] * SEq[2])
# normalise the flux vector to have only components in the x and z
b_x = math.sqrt((h_x * h_x) + (h_y * h_y))
b_z = h_z
def Quat_to_Euler(quater):
euler = [0,0,0]
euler[0] = math.atan2(2*(quater[0]*quater[1] + quater[2]*quater[3]),quater[0]*quater[0] - quater[1]*quater[1] - quater[2]*quater[2] + quater[3]*quater[3])
euler[1] = math.asin(-2*((quater[0]*quater[2] - quater[1]*quater[3]))/(quater[0]*quater[0] + quater[1]*quater[1] + quater[2]*quater[2] + quater[3]*quater[3]))
euler[2] = math.atan2(2*(quater[1]*quater[2] + quater[0]*quater[3]),-quater[0]*quater[0] - quater[1]*quater[1] + quater[2]*quater[2] + quater[3]*quater[3])
euler = map(math.degrees,euler)
return euler
import smbus
import time
import numpy as np
import math
import socket
import fcntl
import struct
#Analizamos la red para encontrar el ip correcto
inter_faces = get_network_config2()
if inter_faces['eth0'] == None: #Le damos prioridad a la conexion ethernet
host = inter_faces['wlan0']
tarjeta = 'wlan0'
else:
host = inter_faces['eth0']
tarjeta = 'eth0'
print("Intentando establecer conexion en interfaz {} con la direccion ip {}".format(tarjeta, host))
#Establecemos la conexion
try:
port = 23322
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host,port))
s.listen(1)
conn,addr = s.accept()
except:
s.close() #Si algo falla, cierra todo.
print("[-] ERROR = No se pudo establecer la conexion")
exit()
#Abrimos el puerto I2C
ahrs = smbus.SMBus(1)
#Definimos las direcciones de los sensores
gyro_addr = 0x6B
accel_addr = 0x19
magn_addr = 0x1E
#Variables globales
SEq = [0.0,0.0,0.0,1.0] #Quaterniones
b_x = 1 #Earth Flux
b_z = 0
w_b = [0,0,0] #Gyroscopic Bias Error
beta = math.sqrt(3.0/4.0)*math.radians(5) #gyro measurment error rad/s (5 deg/s)
zeta = math.sqrt(3.0/4.0)*math.radians(0.2) #gyro drift error rad/s/s (0.2 deg/s/s)
#Colocamos los valores de configuracion
accel_setup()
magn_setup()
gyro_setup()
#Leemos los datos de los sensores.
accel_data = accel_read()
magn_data = magn_read()
gyro_data = gyro_read()
#Variables de tiempo
time_new = 0
time_old = time.time()
#loop de control
while(1):
#sacamos medidas de sensores
accel_data = accel_read()
magn_data = magn_read()
gyro_data = gyro_read()
#medimos tiempo
time_new = time.time()
#corremos el filtro
madgwicks_filter(accel_data, magn_data, gyro_data, time_new - time_old)
#Actualizamos el tiempo
time_old = time_new
#Calculamos los Angulos de Euler
Angulos = Quat_to_Euler(SEq)
#Imprimimos
print("Pitch: {:+.2f}deg Roll: {:+.2f}deg Yaw: {:+.2f}deg Quaternion:({:+.3f}, {:+.3f}, {:+.3f}, {:+.3f})".format(Angulos[0],Angulos[1],Angulos[2], SEq[0], SEq[1], SEq[2], SEq[3] ))
mensaje = "{:+.2f},{:+.2f},{:+.2f}\n".format(Angulos[0],Angulos[1],Angulos[2])
try:
conn.sendall(mensaje) #Enviamos por TCP la informacion
except:
s.close() #Si algo falla, cierra todo.
print("[-] ERROR = No se pudo mandar el paquete")
exit()
time.sleep(0.01)
# print("Accel:({:+.3f},{:+.3f},{:+.3f}) Magn:({:+.3f},{:+.3f},{:+.3f}) Gyro:({:+.3f},{:+.3f},{:+.3f})".format(accel_data[0],accel_data[1],accel_data[2],magn_data[0],magn_data[1],magn_data[2],gyro_data[0],gyro_data[1],gyro_data[2]))
| global ahrs
global magn_addr
ahrs.write_byte_data(magn_addr,0x00,0x10) #Seteamos la velocidad de las mediciones a 15Hz
ahrs.write_byte_data(magn_addr,0x01,0x20) #Ponemos la escala +-1.3g
ahrs.write_byte_data(magn_addr,0x02,0x00) #Prendemos el magnetometro | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.