file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
fixture.go | // Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package crossdevice
import (
"context"
"encoding/json"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"time"
"chromiumos/tast/common/android/adb"
crossdevicecommon "chromiumos/tast/common/cros/crossdevice"
"chromiumos/tast/common/testexec"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/bluetooth"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/crossdevice/crossdevicesettings"
"chromiumos/tast/local/chrome/crossdevice/phonehub"
"chromiumos/tast/local/chrome/lacros/lacrosfixt"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/cryptohome"
"chromiumos/tast/local/input"
"chromiumos/tast/local/logsaver"
"chromiumos/tast/testing"
)
// FixtureOptions contains the options that we set for various crossDeviceFixture.
type FixtureOptions struct {
allFeatures bool // Whether or not to enable all cross device features.
saveScreenRecording bool
lockFixture bool // Whether or not to lock the fixture preventing chrome from being torn down outside of fixture teardown.
noSignIn bool // Whether or not to sign in with the specified GAIA account. True will not skip OOBE.
}
// NewCrossDeviceOnboarded creates a fixture that logs in to CrOS, pairs it with an Android device,
// and ensures the features in the "Connected devices" section of OS Settings are ready to use (Smart Lock, Phone Hub, etc.).
// Note that crossdevice fixtures inherit from crossdeviceAndroidSetup.
func NewCrossDeviceOnboarded(opt FixtureOptions, fOpt chrome.OptionsCallback) testing.FixtureImpl {
return &crossdeviceFixture{
fOpt: fOpt,
allFeatures: opt.allFeatures,
saveScreenRecording: opt.saveScreenRecording,
lockFixture: opt.lockFixture,
noSignIn: opt.noSignIn,
}
}
// Fixture runtime variables.
const (
// These vars can be used from the command line when running tests locally to configure the tests to run on personal GAIA accounts.
// Use these vars to log in with your own GAIA credentials on CrOS. The Android device should be signed in with the same account.
customCrOSUsername = "cros_username"
customCrOSPassword = "cros_password"
)
// postTestTimeout is the timeout for the fixture PostTest stage.
// We need a considerable amount of time to collect an Android bug report on failure.
const postTestTimeout = resetTimeout + BugReportDuration
func init() {
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceOnboardedAllFeatures",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with all Cross Device features enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{true, true, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceOnboarded",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with default Cross Device features enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupSmartLock",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceNoSignIn",
Desc: "User is not signed in (with GAIA) to CrOS but fixture requires control of an Android phone. Does not skip OOBE",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, true, true}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
SignInProfileTestExtensionManifestKey,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceOnboardedNoLock",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with default Cross Device features enabled. Doesn't lock the fixture before starting the test",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupSmartLockLogin",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, false, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
// lacros fixtures
testing.AddFixture(&testing.Fixture{
Name: "lacrosCrossdeviceOnboardedAllFeatures",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with all Cross Device features enabled with lacros enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{true, true, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return lacrosfixt.NewConfig().Opts()
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
}
type crossdeviceFixture struct {
fOpt chrome.OptionsCallback // Function to generate Chrome Options
cr *chrome.Chrome
tconn *chrome.TestConn
kb *input.KeyboardEventWriter
androidDevice *AndroidDevice
androidAttributes *AndroidAttributes
crosAttributes *crossdevicecommon.CrosAttributes
btsnoopCmd *testexec.Cmd
logMarker *logsaver.Marker // Marker for per-test log.
allFeatures bool
saveAndroidScreenRecordingOnError func(context.Context, func() bool) error
saveScreenRecording bool
lockFixture bool
noSignIn bool
logcatStartTime adb.LogcatTimestamp
downloadsPath string
}
// FixtData holds information made available to tests that specify this Fixture.
type FixtData struct {
// Chrome is the running chrome instance.
Chrome *chrome.Chrome
// TestConn is a connection to the test extension.
TestConn *chrome.TestConn
// Connection to the lock screen test extension.
LoginConn *chrome.TestConn
// AndroidDevice is an object for interacting with the connected Android device's Multidevice Snippet.
AndroidDevice *AndroidDevice
// The credentials to be used on both chromebook and phone.
Username string
Password string
// The options used to start Chrome sessions.
ChromeOptions []chrome.Option
}
func (f *crossdeviceFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
// Android device from parent fixture.
androidDevice := s.ParentValue().(*FixtData).AndroidDevice
f.androidDevice = androidDevice
// Credentials to use (same as Android).
crosUsername := s.ParentValue().(*FixtData).Username
crosPassword := s.ParentValue().(*FixtData).Password
// Allocate time for logging and saving a screenshot and bugreport in case of failure.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second+BugReportDuration)
defer cancel()
// Save logcat so we have Android logs even if fixture setup fails.
startTime, err := androidDevice.Device.LatestLogcatTimestamp(ctx)
if err != nil {
s.Fatal("Failed to get latest logcat timestamp: ", err)
}
defer androidDevice.Device.DumpLogcatFromTimestamp(cleanupCtx, filepath.Join(s.OutDir(), "fixture_setup_logcat.txt"), startTime)
defer androidDevice.DumpLogs(cleanupCtx, s.OutDir(), "fixture_setup_persistent_logcat.txt")
// Set default chrome options.
opts, err := f.fOpt(ctx, s)
if err != nil {
s.Fatal("Failed to obtain Chrome options: ", err)
}
tags := []string{
"*nearby*=3",
"*cryptauth*=3",
"*device_sync*=3",
"*multidevice*=3",
"*secure_channel*=3",
"*phonehub*=3",
"*blue*=3",
"ble_*=3",
}
opts = append(opts, chrome.ExtraArgs("--enable-logging", "--vmodule="+strings.Join(tags, ",")))
opts = append(opts, chrome.EnableFeatures("PhoneHubCameraRoll", "SmartLockUIRevamp", "OobeQuickStart"))
customUser, userOk := s.Var(customCrOSUsername)
customPass, passOk := s.Var(customCrOSPassword)
if userOk && passOk {
s.Log("Logging in with user-provided credentials")
crosUsername = customUser
crosPassword = customPass
} else {
s.Log("Logging in with default GAIA credentials")
}
if f.noSignIn {
opts = append(opts, chrome.DontSkipOOBEAfterLogin())
} else {
opts = append(opts, chrome.GAIALogin(chrome.Creds{User: crosUsername, Pass: crosPassword}))
}
if val, ok := s.Var(KeepStateVar); ok {
b, err := strconv.ParseBool(val)
if err != nil {
s.Fatalf("Unable to convert %v var to bool: %v", KeepStateVar, err)
}
if b {
opts = append(opts, chrome.KeepState())
}
}
cr, err := chrome.New(
ctx,
opts...,
)
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
f.cr = cr
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Creating test API connection failed: ", err)
}
f.tconn = tconn
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "fixture")
// Capture a bug report on the Android phone if any onboarding/setup fails.
defer func() {
if s.HasError() {
if err := BugReport(ctx, androidDevice.Device, s.OutDir()); err != nil {
s.Log("Failed to save Android bug report: ", err)
}
}
}()
// Capture btsnoop logs during fixture setup to have adequate logging during the onboarding phase.
btsnoopCmd := bluetooth.StartBTSnoopLogging(ctx, filepath.Join(s.OutDir(), "crossdevice-fixture-btsnoop.log"))
if err := btsnoopCmd.Start(); err != nil {
s.Fatal("Failed to start btsnoop logging: ", err)
}
defer btsnoopCmd.Wait()
defer btsnoopCmd.Kill()
// Enable bluetooth debug logging.
levels := bluetooth.LogVerbosity{
Bluez: true,
Kernel: true,
}
if err := bluetooth.SetDebugLogLevels(ctx, levels); err != nil {
return errors.Wrap(err, "failed to enable bluetooth debug logging")
}
// Phone and Chromebook will not be paired if we are not signed in to the Chromebook yet.
if !f.noSignIn {
// Sometimes during login the tcp connection to the snippet server on Android is lost.
// If the Pair RPC fails, reconnect to the snippet server and try again.
if err := f.PairWithAndroid(ctx, tconn, cr); err != nil {
s.Fatal("Pairing with Android failed: ", err)
}
if f.allFeatures {
// Wait for the "Smart Lock is turned on" notification to appear,
// since it will cause Phone Hub to close if it's open before the notification pops up.
if _, err := ash.WaitForNotification(ctx, tconn, 30*time.Second, ash.WaitTitleContains("Smart Lock is turned on")); err != nil {
s.Log("Smart Lock notification did not appear after 30 seconds, proceeding anyways")
}
if err := phonehub.Enable(ctx, tconn, cr); err != nil {
s.Fatal("Failed to enable Phone Hub: ", err)
}
if err := phonehub.Hide(ctx, tconn); err != nil {
s.Fatal("Failed to hide Phone Hub after enabling it: ", err)
}
if err := androidDevice.EnablePhoneHubNotifications(ctx); err != nil {
s.Fatal("Failed to enable Phone Hub notifications: ", err)
}
}
if _, err := ash.WaitForNotification(ctx, tconn, 90*time.Second, ash.WaitTitleContains("Connected to")); err != nil {
s.Fatal("Did not receive notification that Chromebook and Phone are paired")
}
}
// Store Android attributes for reporting.
androidAttributes, err := androidDevice.GetAndroidAttributes(ctx)
if err != nil {
s.Fatal("Failed to get Android attributes for reporting: ", err)
}
f.androidAttributes = androidAttributes
// Store CrOS test metadata for reporting.
crosAttributes, err := GetCrosAttributes(ctx, tconn, crosUsername)
if err != nil {
s.Fatal("Failed to get CrOS attributes for reporting: ", err)
}
f.crosAttributes = crosAttributes
// Get the user's Download path for saving screen recordings.
f.downloadsPath, err = cryptohome.DownloadsPath(ctx, f.cr.NormalizedUser())
if err != nil {
s.Fatal("Failed to get user's Downloads path: ", err)
}
// Lock chrome after all Setup is complete so we don't block other fixtures.
if f.lockFixture {
chrome.Lock()
}
return &FixtData{
Chrome: cr,
TestConn: tconn,
AndroidDevice: androidDevice,
Username: crosUsername,
Password: crosPassword,
ChromeOptions: opts,
}
} | func (f *crossdeviceFixture) TearDown(ctx context.Context, s *testing.FixtState) {
if f.lockFixture {
chrome.Unlock()
if err := f.cr.Close(ctx); err != nil {
s.Log("Failed to close Chrome connection: ", err)
}
}
f.cr = nil
}
func (f *crossdeviceFixture) Reset(ctx context.Context) error {
if err := f.cr.Responded(ctx); err != nil {
return errors.Wrap(err, "existing Chrome connection is unusable")
}
if err := f.cr.ResetState(ctx); err != nil {
return errors.Wrap(err, "failed resetting existing Chrome session")
}
return nil
}
func (f *crossdeviceFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
if err := saveDeviceAttributes(f.crosAttributes, f.androidAttributes, filepath.Join(s.OutDir(), "device_attributes.json")); err != nil {
s.Error("Failed to save device attributes: ", err)
}
f.btsnoopCmd = bluetooth.StartBTSnoopLogging(s.TestContext(), filepath.Join(s.OutDir(), "crossdevice-btsnoop.log"))
if err := f.btsnoopCmd.Start(); err != nil {
s.Fatal("Failed to start btsnoop logging: ", err)
}
if f.logMarker != nil {
s.Log("A log marker is already created but not cleaned up")
}
logMarker, err := logsaver.NewMarker(f.cr.LogFilename())
if err == nil {
f.logMarker = logMarker
} else {
s.Log("Failed to start the log saver: ", err)
}
timestamp, err := f.androidDevice.Device.LatestLogcatTimestamp(ctx)
if err != nil {
s.Fatal("Failed to get latest logcat timestamp: ", err)
}
f.logcatStartTime = timestamp
if f.saveScreenRecording {
if f.kb == nil {
// Use virtual keyboard since uiauto.StartRecordFromKB assumes F5 is the overview key.
kb, err := input.VirtualKeyboard(ctx)
if err != nil {
s.Fatal("Failed to setup keyboard for screen recording: ", err)
}
f.kb = kb
}
if err := uiauto.StartRecordFromKB(ctx, f.tconn, f.kb, f.downloadsPath); err != nil {
s.Fatal("Failed to start screen recording on CrOS: ", err)
}
saveScreen, err := f.androidDevice.StartScreenRecording(s.TestContext(), "android-screen", s.OutDir())
if err != nil {
s.Fatal("Failed to start screen recording on Android: ", err)
}
f.saveAndroidScreenRecordingOnError = saveScreen
}
}
func (f *crossdeviceFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
if err := f.btsnoopCmd.Kill(); err != nil {
s.Error("Failed to stop btsnoop log capture: ", err)
}
f.btsnoopCmd.Wait()
f.btsnoopCmd = nil
if f.logMarker != nil {
if err := f.logMarker.Save(filepath.Join(s.OutDir(), "chrome.log")); err != nil {
s.Log("Failed to store per-test log data: ", err)
}
f.logMarker = nil
}
// Restore connection to the ADB-over-WiFi device if it was lost during the test.
// This is needed for Instant Tether tests that disable WiFi on the Chromebook which interrupts the ADB connection.
if PhoneIP.Value() != "" && f.androidDevice.Device.IsConnected(ctx) != nil {
s.Log("Connection to ADB device lost, restaring")
device, err := AdbOverWifi(ctx)
if err != nil {
s.Fatal("Failed to re-initialize adb-over-wifi: ", err)
}
f.androidDevice.Device = device
if err := f.androidDevice.ReconnectToSnippet(ctx); err != nil {
s.Fatal("Failed to reconnect to the snippet: ", err)
}
}
if err := f.androidDevice.Device.DumpLogcatFromTimestamp(ctx, filepath.Join(s.OutDir(), "crossdevice-logcat.txt"), f.logcatStartTime); err != nil {
s.Fatal("Failed to save logcat logs from the test: ", err)
}
if err := f.androidDevice.DumpLogs(ctx, s.OutDir(), "crossdevice-persistent-logcat.txt"); err != nil {
s.Fatal("Failed to save persistent logcat logs: ", err)
}
if f.saveScreenRecording {
if err := f.saveAndroidScreenRecordingOnError(ctx, s.HasError); err != nil {
s.Fatal("Failed to save Android screen recording: ", err)
}
f.saveAndroidScreenRecordingOnError = nil
ui := uiauto.New(f.tconn)
var crosRecordErr error
if err := ui.Exists(uiauto.ScreenRecordStopButton)(ctx); err != nil {
// Smart Lock tests automatically stop the screen recording when they lock the screen.
// The screen recording should still exist though.
crosRecordErr = uiauto.SaveRecordFromKBOnError(ctx, f.tconn, s.HasError, s.OutDir(), f.downloadsPath)
} else {
crosRecordErr = uiauto.StopRecordFromKBAndSaveOnError(ctx, f.tconn, s.HasError, s.OutDir(), f.downloadsPath)
}
if crosRecordErr != nil {
s.Fatal("Failed to save CrOS screen recording: ", crosRecordErr)
}
}
if s.HasError() {
if err := BugReport(ctx, f.androidDevice.Device, s.OutDir()); err != nil {
s.Error("Failed to save Android bug report: ", err)
}
}
}
// Verify that pairing between Android and Chromebook is successful.
func (f *crossdeviceFixture) PairWithAndroid(ctx context.Context, tconn *chrome.TestConn, cr *chrome.Chrome) error {
if err := f.androidDevice.Pair(ctx); err != nil {
if err := f.androidDevice.ReconnectToSnippet(ctx); err != nil {
return errors.Wrap(err, "failed to reconnect to the snippet server")
}
if err := f.androidDevice.Pair(ctx); err != nil {
return errors.Wrap(err, "failed to connect the Android device to CrOS")
}
}
if err := crossdevicesettings.WaitForConnectedDevice(ctx, tconn, cr); err != nil {
return errors.Wrap(err, "failed waiting for the connected device to appear in OS settings")
}
return nil
}
// saveDeviceAttributes saves the CrOS and Android device attributes as a formatted JSON at the specified filepath.
func saveDeviceAttributes(crosAttrs *crossdevicecommon.CrosAttributes, androidAttrs *AndroidAttributes, filepath string) error {
attributes := struct {
CrOS *crossdevicecommon.CrosAttributes
Android *AndroidAttributes
}{CrOS: crosAttrs, Android: androidAttrs}
crosLog, err := json.MarshalIndent(attributes, "", "\t")
if err != nil {
return errors.Wrap(err, "failed to format device metadata for logging")
}
if err := ioutil.WriteFile(filepath, crosLog, 0644); err != nil {
return errors.Wrap(err, "failed to write CrOS attributes to output file")
}
return nil
}
// ConnectToWifi connects the chromebook to the Wifi network in its RF box.
func ConnectToWifi(ctx context.Context) error {
if err := testing.Poll(ctx, func(ctx context.Context) error {
out, err := testexec.CommandContext(ctx, "/usr/local/autotest/cros/scripts/wifi", "connect", "nearbysharing_1", "password").CombinedOutput(testexec.DumpLogOnError)
if err != nil {
if strings.Contains(string(out), "already connected") {
testing.ContextLog(ctx, "Already connected to wifi network")
return nil
}
return errors.Wrap(err, "failed to connect CrOS device to Wifi")
}
return nil
}, &testing.PollOptions{Timeout: 20 * time.Second, Interval: 3 * time.Second}); err != nil {
return errors.Wrap(err, "failed to connect to wifi")
}
return nil
} | random_line_split | |
fixture.go | // Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package crossdevice
import (
"context"
"encoding/json"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"time"
"chromiumos/tast/common/android/adb"
crossdevicecommon "chromiumos/tast/common/cros/crossdevice"
"chromiumos/tast/common/testexec"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/bluetooth"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/crossdevice/crossdevicesettings"
"chromiumos/tast/local/chrome/crossdevice/phonehub"
"chromiumos/tast/local/chrome/lacros/lacrosfixt"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/cryptohome"
"chromiumos/tast/local/input"
"chromiumos/tast/local/logsaver"
"chromiumos/tast/testing"
)
// FixtureOptions contains the options that we set for various crossDeviceFixture.
type FixtureOptions struct {
allFeatures bool // Whether or not to enable all cross device features.
saveScreenRecording bool
lockFixture bool // Whether or not to lock the fixture preventing chrome from being torn down outside of fixture teardown.
noSignIn bool // Whether or not to sign in with the specified GAIA account. True will not skip OOBE.
}
// NewCrossDeviceOnboarded creates a fixture that logs in to CrOS, pairs it with an Android device,
// and ensures the features in the "Connected devices" section of OS Settings are ready to use (Smart Lock, Phone Hub, etc.).
// Note that crossdevice fixtures inherit from crossdeviceAndroidSetup.
func NewCrossDeviceOnboarded(opt FixtureOptions, fOpt chrome.OptionsCallback) testing.FixtureImpl {
return &crossdeviceFixture{
fOpt: fOpt,
allFeatures: opt.allFeatures,
saveScreenRecording: opt.saveScreenRecording,
lockFixture: opt.lockFixture,
noSignIn: opt.noSignIn,
}
}
// Fixture runtime variables.
const (
// These vars can be used from the command line when running tests locally to configure the tests to run on personal GAIA accounts.
// Use these vars to log in with your own GAIA credentials on CrOS. The Android device should be signed in with the same account.
customCrOSUsername = "cros_username"
customCrOSPassword = "cros_password"
)
// postTestTimeout is the timeout for the fixture PostTest stage.
// We need a considerable amount of time to collect an Android bug report on failure.
const postTestTimeout = resetTimeout + BugReportDuration
func init() {
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceOnboardedAllFeatures",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with all Cross Device features enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{true, true, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceOnboarded",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with default Cross Device features enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupSmartLock",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceNoSignIn",
Desc: "User is not signed in (with GAIA) to CrOS but fixture requires control of an Android phone. Does not skip OOBE",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, true, true}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
SignInProfileTestExtensionManifestKey,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceOnboardedNoLock",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with default Cross Device features enabled. Doesn't lock the fixture before starting the test",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupSmartLockLogin",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, false, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
// lacros fixtures
testing.AddFixture(&testing.Fixture{
Name: "lacrosCrossdeviceOnboardedAllFeatures",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with all Cross Device features enabled with lacros enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{true, true, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return lacrosfixt.NewConfig().Opts()
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
}
type crossdeviceFixture struct {
fOpt chrome.OptionsCallback // Function to generate Chrome Options
cr *chrome.Chrome
tconn *chrome.TestConn
kb *input.KeyboardEventWriter
androidDevice *AndroidDevice
androidAttributes *AndroidAttributes
crosAttributes *crossdevicecommon.CrosAttributes
btsnoopCmd *testexec.Cmd
logMarker *logsaver.Marker // Marker for per-test log.
allFeatures bool
saveAndroidScreenRecordingOnError func(context.Context, func() bool) error
saveScreenRecording bool
lockFixture bool
noSignIn bool
logcatStartTime adb.LogcatTimestamp
downloadsPath string
}
// FixtData holds information made available to tests that specify this Fixture.
type FixtData struct {
// Chrome is the running chrome instance.
Chrome *chrome.Chrome
// TestConn is a connection to the test extension.
TestConn *chrome.TestConn
// Connection to the lock screen test extension.
LoginConn *chrome.TestConn
// AndroidDevice is an object for interacting with the connected Android device's Multidevice Snippet.
AndroidDevice *AndroidDevice
// The credentials to be used on both chromebook and phone.
Username string
Password string
// The options used to start Chrome sessions.
ChromeOptions []chrome.Option
}
func (f *crossdeviceFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} |
func (f *crossdeviceFixture) TearDown(ctx context.Context, s *testing.FixtState) {
if f.lockFixture {
chrome.Unlock()
if err := f.cr.Close(ctx); err != nil {
s.Log("Failed to close Chrome connection: ", err)
}
}
f.cr = nil
}
func (f *crossdeviceFixture) Reset(ctx context.Context) error {
if err := f.cr.Responded(ctx); err != nil {
return errors.Wrap(err, "existing Chrome connection is unusable")
}
if err := f.cr.ResetState(ctx); err != nil {
return errors.Wrap(err, "failed resetting existing Chrome session")
}
return nil
}
func (f *crossdeviceFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
if err := saveDeviceAttributes(f.crosAttributes, f.androidAttributes, filepath.Join(s.OutDir(), "device_attributes.json")); err != nil {
s.Error("Failed to save device attributes: ", err)
}
f.btsnoopCmd = bluetooth.StartBTSnoopLogging(s.TestContext(), filepath.Join(s.OutDir(), "crossdevice-btsnoop.log"))
if err := f.btsnoopCmd.Start(); err != nil {
s.Fatal("Failed to start btsnoop logging: ", err)
}
if f.logMarker != nil {
s.Log("A log marker is already created but not cleaned up")
}
logMarker, err := logsaver.NewMarker(f.cr.LogFilename())
if err == nil {
f.logMarker = logMarker
} else {
s.Log("Failed to start the log saver: ", err)
}
timestamp, err := f.androidDevice.Device.LatestLogcatTimestamp(ctx)
if err != nil {
s.Fatal("Failed to get latest logcat timestamp: ", err)
}
f.logcatStartTime = timestamp
if f.saveScreenRecording {
if f.kb == nil {
// Use virtual keyboard since uiauto.StartRecordFromKB assumes F5 is the overview key.
kb, err := input.VirtualKeyboard(ctx)
if err != nil {
s.Fatal("Failed to setup keyboard for screen recording: ", err)
}
f.kb = kb
}
if err := uiauto.StartRecordFromKB(ctx, f.tconn, f.kb, f.downloadsPath); err != nil {
s.Fatal("Failed to start screen recording on CrOS: ", err)
}
saveScreen, err := f.androidDevice.StartScreenRecording(s.TestContext(), "android-screen", s.OutDir())
if err != nil {
s.Fatal("Failed to start screen recording on Android: ", err)
}
f.saveAndroidScreenRecordingOnError = saveScreen
}
}
func (f *crossdeviceFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
if err := f.btsnoopCmd.Kill(); err != nil {
s.Error("Failed to stop btsnoop log capture: ", err)
}
f.btsnoopCmd.Wait()
f.btsnoopCmd = nil
if f.logMarker != nil {
if err := f.logMarker.Save(filepath.Join(s.OutDir(), "chrome.log")); err != nil {
s.Log("Failed to store per-test log data: ", err)
}
f.logMarker = nil
}
// Restore connection to the ADB-over-WiFi device if it was lost during the test.
// This is needed for Instant Tether tests that disable WiFi on the Chromebook which interrupts the ADB connection.
if PhoneIP.Value() != "" && f.androidDevice.Device.IsConnected(ctx) != nil {
s.Log("Connection to ADB device lost, restaring")
device, err := AdbOverWifi(ctx)
if err != nil {
s.Fatal("Failed to re-initialize adb-over-wifi: ", err)
}
f.androidDevice.Device = device
if err := f.androidDevice.ReconnectToSnippet(ctx); err != nil {
s.Fatal("Failed to reconnect to the snippet: ", err)
}
}
if err := f.androidDevice.Device.DumpLogcatFromTimestamp(ctx, filepath.Join(s.OutDir(), "crossdevice-logcat.txt"), f.logcatStartTime); err != nil {
s.Fatal("Failed to save logcat logs from the test: ", err)
}
if err := f.androidDevice.DumpLogs(ctx, s.OutDir(), "crossdevice-persistent-logcat.txt"); err != nil {
s.Fatal("Failed to save persistent logcat logs: ", err)
}
if f.saveScreenRecording {
if err := f.saveAndroidScreenRecordingOnError(ctx, s.HasError); err != nil {
s.Fatal("Failed to save Android screen recording: ", err)
}
f.saveAndroidScreenRecordingOnError = nil
ui := uiauto.New(f.tconn)
var crosRecordErr error
if err := ui.Exists(uiauto.ScreenRecordStopButton)(ctx); err != nil {
// Smart Lock tests automatically stop the screen recording when they lock the screen.
// The screen recording should still exist though.
crosRecordErr = uiauto.SaveRecordFromKBOnError(ctx, f.tconn, s.HasError, s.OutDir(), f.downloadsPath)
} else {
crosRecordErr = uiauto.StopRecordFromKBAndSaveOnError(ctx, f.tconn, s.HasError, s.OutDir(), f.downloadsPath)
}
if crosRecordErr != nil {
s.Fatal("Failed to save CrOS screen recording: ", crosRecordErr)
}
}
if s.HasError() {
if err := BugReport(ctx, f.androidDevice.Device, s.OutDir()); err != nil {
s.Error("Failed to save Android bug report: ", err)
}
}
}
// Verify that pairing between Android and Chromebook is successful.
func (f *crossdeviceFixture) PairWithAndroid(ctx context.Context, tconn *chrome.TestConn, cr *chrome.Chrome) error {
if err := f.androidDevice.Pair(ctx); err != nil {
if err := f.androidDevice.ReconnectToSnippet(ctx); err != nil {
return errors.Wrap(err, "failed to reconnect to the snippet server")
}
if err := f.androidDevice.Pair(ctx); err != nil {
return errors.Wrap(err, "failed to connect the Android device to CrOS")
}
}
if err := crossdevicesettings.WaitForConnectedDevice(ctx, tconn, cr); err != nil {
return errors.Wrap(err, "failed waiting for the connected device to appear in OS settings")
}
return nil
}
// saveDeviceAttributes saves the CrOS and Android device attributes as a formatted JSON at the specified filepath.
func saveDeviceAttributes(crosAttrs *crossdevicecommon.CrosAttributes, androidAttrs *AndroidAttributes, filepath string) error {
attributes := struct {
CrOS *crossdevicecommon.CrosAttributes
Android *AndroidAttributes
}{CrOS: crosAttrs, Android: androidAttrs}
crosLog, err := json.MarshalIndent(attributes, "", "\t")
if err != nil {
return errors.Wrap(err, "failed to format device metadata for logging")
}
if err := ioutil.WriteFile(filepath, crosLog, 0644); err != nil {
return errors.Wrap(err, "failed to write CrOS attributes to output file")
}
return nil
}
// ConnectToWifi connects the chromebook to the Wifi network in its RF box.
func ConnectToWifi(ctx context.Context) error {
if err := testing.Poll(ctx, func(ctx context.Context) error {
out, err := testexec.CommandContext(ctx, "/usr/local/autotest/cros/scripts/wifi", "connect", "nearbysharing_1", "password").CombinedOutput(testexec.DumpLogOnError)
if err != nil {
if strings.Contains(string(out), "already connected") {
testing.ContextLog(ctx, "Already connected to wifi network")
return nil
}
return errors.Wrap(err, "failed to connect CrOS device to Wifi")
}
return nil
}, &testing.PollOptions{Timeout: 20 * time.Second, Interval: 3 * time.Second}); err != nil {
return errors.Wrap(err, "failed to connect to wifi")
}
return nil
}
| {
// Android device from parent fixture.
androidDevice := s.ParentValue().(*FixtData).AndroidDevice
f.androidDevice = androidDevice
// Credentials to use (same as Android).
crosUsername := s.ParentValue().(*FixtData).Username
crosPassword := s.ParentValue().(*FixtData).Password
// Allocate time for logging and saving a screenshot and bugreport in case of failure.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second+BugReportDuration)
defer cancel()
// Save logcat so we have Android logs even if fixture setup fails.
startTime, err := androidDevice.Device.LatestLogcatTimestamp(ctx)
if err != nil {
s.Fatal("Failed to get latest logcat timestamp: ", err)
}
defer androidDevice.Device.DumpLogcatFromTimestamp(cleanupCtx, filepath.Join(s.OutDir(), "fixture_setup_logcat.txt"), startTime)
defer androidDevice.DumpLogs(cleanupCtx, s.OutDir(), "fixture_setup_persistent_logcat.txt")
// Set default chrome options.
opts, err := f.fOpt(ctx, s)
if err != nil {
s.Fatal("Failed to obtain Chrome options: ", err)
}
tags := []string{
"*nearby*=3",
"*cryptauth*=3",
"*device_sync*=3",
"*multidevice*=3",
"*secure_channel*=3",
"*phonehub*=3",
"*blue*=3",
"ble_*=3",
}
opts = append(opts, chrome.ExtraArgs("--enable-logging", "--vmodule="+strings.Join(tags, ",")))
opts = append(opts, chrome.EnableFeatures("PhoneHubCameraRoll", "SmartLockUIRevamp", "OobeQuickStart"))
customUser, userOk := s.Var(customCrOSUsername)
customPass, passOk := s.Var(customCrOSPassword)
if userOk && passOk {
s.Log("Logging in with user-provided credentials")
crosUsername = customUser
crosPassword = customPass
} else {
s.Log("Logging in with default GAIA credentials")
}
if f.noSignIn {
opts = append(opts, chrome.DontSkipOOBEAfterLogin())
} else {
opts = append(opts, chrome.GAIALogin(chrome.Creds{User: crosUsername, Pass: crosPassword}))
}
if val, ok := s.Var(KeepStateVar); ok {
b, err := strconv.ParseBool(val)
if err != nil {
s.Fatalf("Unable to convert %v var to bool: %v", KeepStateVar, err)
}
if b {
opts = append(opts, chrome.KeepState())
}
}
cr, err := chrome.New(
ctx,
opts...,
)
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
f.cr = cr
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Creating test API connection failed: ", err)
}
f.tconn = tconn
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "fixture")
// Capture a bug report on the Android phone if any onboarding/setup fails.
defer func() {
if s.HasError() {
if err := BugReport(ctx, androidDevice.Device, s.OutDir()); err != nil {
s.Log("Failed to save Android bug report: ", err)
}
}
}()
// Capture btsnoop logs during fixture setup to have adequate logging during the onboarding phase.
btsnoopCmd := bluetooth.StartBTSnoopLogging(ctx, filepath.Join(s.OutDir(), "crossdevice-fixture-btsnoop.log"))
if err := btsnoopCmd.Start(); err != nil {
s.Fatal("Failed to start btsnoop logging: ", err)
}
defer btsnoopCmd.Wait()
defer btsnoopCmd.Kill()
// Enable bluetooth debug logging.
levels := bluetooth.LogVerbosity{
Bluez: true,
Kernel: true,
}
if err := bluetooth.SetDebugLogLevels(ctx, levels); err != nil {
return errors.Wrap(err, "failed to enable bluetooth debug logging")
}
// Phone and Chromebook will not be paired if we are not signed in to the Chromebook yet.
if !f.noSignIn {
// Sometimes during login the tcp connection to the snippet server on Android is lost.
// If the Pair RPC fails, reconnect to the snippet server and try again.
if err := f.PairWithAndroid(ctx, tconn, cr); err != nil {
s.Fatal("Pairing with Android failed: ", err)
}
if f.allFeatures {
// Wait for the "Smart Lock is turned on" notification to appear,
// since it will cause Phone Hub to close if it's open before the notification pops up.
if _, err := ash.WaitForNotification(ctx, tconn, 30*time.Second, ash.WaitTitleContains("Smart Lock is turned on")); err != nil {
s.Log("Smart Lock notification did not appear after 30 seconds, proceeding anyways")
}
if err := phonehub.Enable(ctx, tconn, cr); err != nil {
s.Fatal("Failed to enable Phone Hub: ", err)
}
if err := phonehub.Hide(ctx, tconn); err != nil {
s.Fatal("Failed to hide Phone Hub after enabling it: ", err)
}
if err := androidDevice.EnablePhoneHubNotifications(ctx); err != nil {
s.Fatal("Failed to enable Phone Hub notifications: ", err)
}
}
if _, err := ash.WaitForNotification(ctx, tconn, 90*time.Second, ash.WaitTitleContains("Connected to")); err != nil {
s.Fatal("Did not receive notification that Chromebook and Phone are paired")
}
}
// Store Android attributes for reporting.
androidAttributes, err := androidDevice.GetAndroidAttributes(ctx)
if err != nil {
s.Fatal("Failed to get Android attributes for reporting: ", err)
}
f.androidAttributes = androidAttributes
// Store CrOS test metadata for reporting.
crosAttributes, err := GetCrosAttributes(ctx, tconn, crosUsername)
if err != nil {
s.Fatal("Failed to get CrOS attributes for reporting: ", err)
}
f.crosAttributes = crosAttributes
// Get the user's Download path for saving screen recordings.
f.downloadsPath, err = cryptohome.DownloadsPath(ctx, f.cr.NormalizedUser())
if err != nil {
s.Fatal("Failed to get user's Downloads path: ", err)
}
// Lock chrome after all Setup is complete so we don't block other fixtures.
if f.lockFixture {
chrome.Lock()
}
return &FixtData{
Chrome: cr,
TestConn: tconn,
AndroidDevice: androidDevice,
Username: crosUsername,
Password: crosPassword,
ChromeOptions: opts,
}
} | identifier_body |
process.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocore
import (
"debug/dwarf"
"fmt"
"math/bits"
"strings"
"sync"
"golang.org/x/debug/internal/core"
)
// A Process represents the state of a Go process that core dumped.
type Process struct {
proc *core.Process
// data structure for fast object finding
// The key to these maps is the object address divided by
// pageTableSize * heapInfoSize.
pageTable map[core.Address]*pageTableEntry
pages []core.Address // deterministic ordering of keys of pageTable
// number of live objects
nObj int
goroutines []*Goroutine
// runtime info
rtGlobals map[string]region
rtConstants map[string]int64
// A module is a loadable unit. Most Go programs have 1, programs
// which load plugins will have more.
modules []*module
// address -> function mapping
funcTab funcTab
// map from dwarf type to *Type
dwarfMap map[dwarf.Type]*Type
// map from address of runtime._type to *Type
runtimeMap map[core.Address]*Type
// map from runtime type name to the set of *Type with that name
// Used to find candidates to put in the runtimeMap map.
runtimeNameMap map[string][]*Type
// memory usage by category
stats *Stats
buildVersion string
// This is a Go 1.17 process, or higher. This field is used for
// differences in behavior that otherwise can't be detected via the
// type system.
is117OrGreater bool
globals []*Root
// Types of each object, indexed by object index.
initTypeHeap sync.Once
types []typeInfo
// Reverse edges.
// The reverse edges for object #i are redge[ridx[i]:ridx[i+1]].
// A "reverse edge" for object #i is a location in memory where a pointer
// to object #i lives.
initReverseEdges sync.Once
redge []core.Address
ridx []int64
// Sorted list of all roots.
// Only initialized if FlagReverse is passed to Core.
rootIdx []*Root
}
// Process returns the core.Process used to construct this Process.
func (p *Process) Process() *core.Process {
return p.proc
}
func (p *Process) | () []*Goroutine {
return p.goroutines
}
// Stats returns a breakdown of the program's memory use by category.
func (p *Process) Stats() *Stats {
return p.stats
}
// BuildVersion returns the Go version that was used to build the inferior binary.
func (p *Process) BuildVersion() string {
return p.buildVersion
}
func (p *Process) Globals() []*Root {
return p.globals
}
// FindFunc returns the function which contains the code at address pc, if any.
func (p *Process) FindFunc(pc core.Address) *Func {
return p.funcTab.find(pc)
}
func (p *Process) findType(name string) *Type {
s := p.runtimeNameMap[name]
if len(s) == 0 {
panic("can't find type " + name)
}
return s[0]
}
// Core takes a loaded core file and extracts Go information from it.
func Core(proc *core.Process) (p *Process, err error) {
// Make sure we have DWARF info.
if _, err := proc.DWARF(); err != nil {
return nil, fmt.Errorf("error reading dwarf: %w", err)
}
// Guard against failures of proc.Read* routines.
/*
defer func() {
e := recover()
if e == nil {
return
}
p = nil
if x, ok := e.(error); ok {
err = x
return
}
panic(e) // Not an error, re-panic it.
}()
*/
p = &Process{
proc: proc,
runtimeMap: map[core.Address]*Type{},
dwarfMap: map[dwarf.Type]*Type{},
}
// Initialize everything that just depends on DWARF.
p.readDWARFTypes()
p.readRuntimeConstants()
p.readGlobals()
// Find runtime globals we care about. Initialize regions for them.
p.rtGlobals = map[string]region{}
for _, g := range p.globals {
if strings.HasPrefix(g.Name, "runtime.") {
p.rtGlobals[g.Name[8:]] = region{p: p, a: g.Addr, typ: g.Type}
}
}
// Read all the data that depend on runtime globals.
p.buildVersion = p.rtGlobals["buildVersion"].String()
// runtime._type varint name length encoding, and mheap curArena
// counting changed behavior in 1.17 without explicitly related type
// changes, making the difference difficult to detect. As a workaround,
// we check on the version explicitly.
//
// Go 1.17 added runtime._func.flag, so use that as a sentinal for this
// version.
p.is117OrGreater = p.findType("runtime._func").HasField("flag")
p.readModules()
p.readHeap()
p.readGs()
p.readStackVars() // needs to be after readGs.
p.markObjects() // needs to be after readGlobals, readStackVars.
return p, nil
}
type arena struct {
heapMin core.Address
heapMax core.Address
bitmapMin core.Address
bitmapMax core.Address
spanTableMin core.Address
spanTableMax core.Address
}
func (p *Process) getArenaBaseOffset() int64 {
if x, ok := p.rtConstants["arenaBaseOffsetUintptr"]; ok { // go1.15+
// arenaBaseOffset changed sign in 1.15. Callers treat this
// value as it was specified in 1.14, so we negate it here.
return -x
}
return p.rtConstants["arenaBaseOffset"]
}
func (p *Process) readHeap() {
ptrSize := p.proc.PtrSize()
logPtrSize := p.proc.LogPtrSize()
p.pageTable = map[core.Address]*pageTableEntry{}
mheap := p.rtGlobals["mheap_"]
var arenas []arena
if mheap.HasField("spans") {
// go 1.9 or 1.10. There is a single arena.
arenaStart := core.Address(mheap.Field("arena_start").Uintptr())
arenaUsed := core.Address(mheap.Field("arena_used").Uintptr())
arenaEnd := core.Address(mheap.Field("arena_end").Uintptr())
bitmapEnd := core.Address(mheap.Field("bitmap").Uintptr())
bitmapStart := bitmapEnd.Add(-int64(mheap.Field("bitmap_mapped").Uintptr()))
spanTableStart := mheap.Field("spans").SlicePtr().Address()
spanTableEnd := spanTableStart.Add(mheap.Field("spans").SliceCap() * ptrSize)
arenas = append(arenas, arena{
heapMin: arenaStart,
heapMax: arenaEnd,
bitmapMin: bitmapStart,
bitmapMax: bitmapEnd,
spanTableMin: spanTableStart,
spanTableMax: spanTableEnd,
})
// Copy pointer bits to heap info.
// Note that the pointer bits are stored backwards.
for a := arenaStart; a < arenaUsed; a = a.Add(ptrSize) {
off := a.Sub(arenaStart) >> logPtrSize
if p.proc.ReadUint8(bitmapEnd.Add(-(off>>2)-1))>>uint(off&3)&1 != 0 {
p.setHeapPtr(a)
}
}
} else {
// go 1.11+. Has multiple arenas.
arenaSize := p.rtConstants["heapArenaBytes"]
if arenaSize%heapInfoSize != 0 {
panic("arenaSize not a multiple of heapInfoSize")
}
arenaBaseOffset := p.getArenaBaseOffset()
if ptrSize == 4 && arenaBaseOffset != 0 {
panic("arenaBaseOffset must be 0 for 32-bit inferior")
}
level1Table := mheap.Field("arenas")
level1size := level1Table.ArrayLen()
for level1 := int64(0); level1 < level1size; level1++ {
ptr := level1Table.ArrayIndex(level1)
if ptr.Address() == 0 {
continue
}
level2table := ptr.Deref()
level2size := level2table.ArrayLen()
for level2 := int64(0); level2 < level2size; level2++ {
ptr = level2table.ArrayIndex(level2)
if ptr.Address() == 0 {
continue
}
a := ptr.Deref()
min := core.Address(arenaSize*(level2+level1*level2size) - arenaBaseOffset)
max := min.Add(arenaSize)
bitmap := a.Field("bitmap")
oneBitBitmap := a.HasField("noMorePtrs") // Starting in 1.20.
spans := a.Field("spans")
arenas = append(arenas, arena{
heapMin: min,
heapMax: max,
bitmapMin: bitmap.a,
bitmapMax: bitmap.a.Add(bitmap.ArrayLen()),
spanTableMin: spans.a,
spanTableMax: spans.a.Add(spans.ArrayLen() * ptrSize),
})
// Copy out ptr/nonptr bits
n := bitmap.ArrayLen()
for i := int64(0); i < n; i++ {
if oneBitBitmap {
// The array uses 1 bit per word of heap. See mbitmap.go for
// more information.
m := bitmap.ArrayIndex(i).Uintptr()
bits := 8 * ptrSize
for j := int64(0); j < bits; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*bits + j) * ptrSize))
}
}
} else {
// The nth byte is composed of 4 object bits and 4 live/dead
// bits. We ignore the 4 live/dead bits, which are on the
// high order side of the byte.
//
// See mbitmap.go for more information on the format of
// the bitmap field of heapArena.
m := bitmap.ArrayIndex(i).Uint8()
for j := int64(0); j < 4; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*4 + j) * ptrSize))
}
}
}
}
}
}
}
p.readSpans(mheap, arenas)
}
func (p *Process) readSpans(mheap region, arenas []arena) {
var all int64
var text int64
var readOnly int64
var heap int64
var spanTable int64
var bitmap int64
var data int64
var bss int64 // also includes mmap'd regions
for _, m := range p.proc.Mappings() {
size := m.Size()
all += size
switch m.Perm() {
case core.Read:
readOnly += size
case core.Read | core.Exec:
text += size
case core.Read | core.Write:
if m.CopyOnWrite() {
// Check if m.file == text's file? That could distinguish
// data segment from mmapped file.
data += size
break
}
attribute := func(x, y core.Address, p *int64) {
a := x.Max(m.Min())
b := y.Min(m.Max())
if a < b {
*p += b.Sub(a)
size -= b.Sub(a)
}
}
for _, a := range arenas {
attribute(a.heapMin, a.heapMax, &heap)
attribute(a.bitmapMin, a.bitmapMax, &bitmap)
attribute(a.spanTableMin, a.spanTableMax, &spanTable)
}
// Any other anonymous mapping is bss.
// TODO: how to distinguish original bss from anonymous mmap?
bss += size
default:
panic("weird mapping " + m.Perm().String())
}
}
if !p.is117OrGreater && mheap.HasField("curArena") {
// 1.13.3 and up have curArena. Subtract unallocated space in
// the current arena from the heap.
//
// As of 1.17, the runtime does this automatically
// (https://go.dev/cl/270537).
ca := mheap.Field("curArena")
unused := int64(ca.Field("end").Uintptr() - ca.Field("base").Uintptr())
heap -= unused
all -= unused
}
pageSize := p.rtConstants["_PageSize"]
// Span types
spanInUse := uint8(p.rtConstants["_MSpanInUse"])
spanManual := uint8(p.rtConstants["_MSpanManual"])
spanDead := uint8(p.rtConstants["_MSpanDead"])
spanFree := uint8(p.rtConstants["_MSpanFree"])
// Process spans.
if pageSize%heapInfoSize != 0 {
panic(fmt.Sprintf("page size not a multiple of %d", heapInfoSize))
}
allspans := mheap.Field("allspans")
var freeSpanSize int64
var releasedSpanSize int64
var manualSpanSize int64
var inUseSpanSize int64
var allocSize int64
var freeSize int64
var spanRoundSize int64
var manualAllocSize int64
var manualFreeSize int64
n := allspans.SliceLen()
for i := int64(0); i < n; i++ {
s := allspans.SliceIndex(i).Deref()
min := core.Address(s.Field("startAddr").Uintptr())
elemSize := int64(s.Field("elemsize").Uintptr())
nPages := int64(s.Field("npages").Uintptr())
spanSize := nPages * pageSize
max := min.Add(spanSize)
for a := min; a != max; a = a.Add(pageSize) {
if !p.proc.Readable(a) {
// Sometimes allocated but not yet touched pages or
// MADV_DONTNEEDed pages are not written
// to the core file. Don't count these pages toward
// space usage (otherwise it can look like the heap
// is larger than the total memory used).
spanSize -= pageSize
}
}
st := s.Field("state")
if st.IsStruct() && st.HasField("s") { // go1.14+
st = st.Field("s")
}
if st.IsStruct() && st.HasField("value") { // go1.20+
st = st.Field("value")
}
switch st.Uint8() {
case spanInUse:
inUseSpanSize += spanSize
n := int64(s.Field("nelems").Uintptr())
// An object is allocated if it is marked as
// allocated or it is below freeindex.
x := s.Field("allocBits").Address()
alloc := make([]bool, n)
for i := int64(0); i < n; i++ {
alloc[i] = p.proc.ReadUint8(x.Add(i/8))>>uint(i%8)&1 != 0
}
k := int64(s.Field("freeindex").Uintptr())
for i := int64(0); i < k; i++ {
alloc[i] = true
}
for i := int64(0); i < n; i++ {
if alloc[i] {
allocSize += elemSize
} else {
freeSize += elemSize
}
}
spanRoundSize += spanSize - n*elemSize
// initialize heap info records for all inuse spans.
for a := min; a < max; a += heapInfoSize {
h := p.allocHeapInfo(a)
h.base = min
h.size = elemSize
}
// Process special records.
for sp := s.Field("specials"); sp.Address() != 0; sp = sp.Field("next") {
sp = sp.Deref() // *special to special
if sp.Field("kind").Uint8() != uint8(p.rtConstants["_KindSpecialFinalizer"]) {
// All other specials (just profile records) can't point into the heap.
continue
}
obj := min.Add(int64(sp.Field("offset").Uint16()))
p.globals = append(p.globals,
&Root{
Name: fmt.Sprintf("finalizer for %x", obj),
Addr: sp.a,
Type: p.findType("runtime.specialfinalizer"),
Frame: nil,
})
// TODO: these aren't really "globals", as they
// are kept alive by the object they reference being alive.
// But we have no way of adding edges from an object to
// the corresponding finalizer data, so we punt on that thorny
// issue for now.
}
case spanFree:
freeSpanSize += spanSize
if s.HasField("npreleased") { // go 1.11 and earlier
nReleased := int64(s.Field("npreleased").Uintptr())
releasedSpanSize += nReleased * pageSize
} else { // go 1.12 and beyond
if s.Field("scavenged").Bool() {
releasedSpanSize += spanSize
}
}
case spanDead:
// These are just deallocated span descriptors. They use no heap.
case spanManual:
manualSpanSize += spanSize
manualAllocSize += spanSize
for x := core.Address(s.Field("manualFreeList").Cast("uintptr").Uintptr()); x != 0; x = p.proc.ReadPtr(x) {
manualAllocSize -= elemSize
manualFreeSize += elemSize
}
}
}
if mheap.HasField("pages") { // go1.14+
// There are no longer "free" mspans to represent unused pages.
// Instead, there are just holes in the pagemap into which we can allocate.
// Look through the page allocator and count the total free space.
// Also keep track of how much has been scavenged.
pages := mheap.Field("pages")
chunks := pages.Field("chunks")
arenaBaseOffset := p.getArenaBaseOffset()
pallocChunkBytes := p.rtConstants["pallocChunkBytes"]
pallocChunksL1Bits := p.rtConstants["pallocChunksL1Bits"]
pallocChunksL2Bits := p.rtConstants["pallocChunksL2Bits"]
inuse := pages.Field("inUse")
ranges := inuse.Field("ranges")
for i := int64(0); i < ranges.SliceLen(); i++ {
r := ranges.SliceIndex(i)
baseField := r.Field("base")
if baseField.IsStruct() { // go 1.15+
baseField = baseField.Field("a")
}
base := core.Address(baseField.Uintptr())
limitField := r.Field("limit")
if limitField.IsStruct() { // go 1.15+
limitField = limitField.Field("a")
}
limit := core.Address(limitField.Uintptr())
chunkBase := (int64(base) + arenaBaseOffset) / pallocChunkBytes
chunkLimit := (int64(limit) + arenaBaseOffset) / pallocChunkBytes
for chunkIdx := chunkBase; chunkIdx < chunkLimit; chunkIdx++ {
var l1, l2 int64
if pallocChunksL1Bits == 0 {
l2 = chunkIdx
} else {
l1 = chunkIdx >> uint(pallocChunksL2Bits)
l2 = chunkIdx & (1<<uint(pallocChunksL2Bits) - 1)
}
chunk := chunks.ArrayIndex(l1).Deref().ArrayIndex(l2)
// Count the free bits in this chunk.
alloc := chunk.Field("pallocBits")
for i := int64(0); i < pallocChunkBytes/pageSize/64; i++ {
freeSpanSize += int64(bits.OnesCount64(^alloc.ArrayIndex(i).Uint64())) * pageSize
}
// Count the scavenged bits in this chunk.
scavenged := chunk.Field("scavenged")
for i := int64(0); i < pallocChunkBytes/pageSize/64; i++ {
releasedSpanSize += int64(bits.OnesCount64(scavenged.ArrayIndex(i).Uint64())) * pageSize
}
}
}
// Also count pages in the page cache for each P.
allp := p.rtGlobals["allp"]
for i := int64(0); i < allp.SliceLen(); i++ {
pcache := allp.SliceIndex(i).Deref().Field("pcache")
freeSpanSize += int64(bits.OnesCount64(pcache.Field("cache").Uint64())) * pageSize
releasedSpanSize += int64(bits.OnesCount64(pcache.Field("scav").Uint64())) * pageSize
}
}
p.stats = &Stats{"all", all, []*Stats{
&Stats{"text", text, nil},
&Stats{"readonly", readOnly, nil},
&Stats{"data", data, nil},
&Stats{"bss", bss, nil},
&Stats{"heap", heap, []*Stats{
&Stats{"in use spans", inUseSpanSize, []*Stats{
&Stats{"alloc", allocSize, nil},
&Stats{"free", freeSize, nil},
&Stats{"round", spanRoundSize, nil},
}},
&Stats{"manual spans", manualSpanSize, []*Stats{
&Stats{"alloc", manualAllocSize, nil},
&Stats{"free", manualFreeSize, nil},
}},
&Stats{"free spans", freeSpanSize, []*Stats{
&Stats{"retained", freeSpanSize - releasedSpanSize, nil},
&Stats{"released", releasedSpanSize, nil},
}},
}},
&Stats{"ptr bitmap", bitmap, nil},
&Stats{"span table", spanTable, nil},
}}
var check func(*Stats)
check = func(s *Stats) {
if len(s.Children) == 0 {
return
}
var sum int64
for _, c := range s.Children {
sum += c.Size
}
if sum != s.Size {
panic(fmt.Sprintf("check failed for %s: %d vs %d", s.Name, s.Size, sum))
}
for _, c := range s.Children {
check(c)
}
}
check(p.stats)
}
func (p *Process) readGs() {
// TODO: figure out how to "flush" running Gs.
allgs := p.rtGlobals["allgs"]
n := allgs.SliceLen()
for i := int64(0); i < n; i++ {
r := allgs.SliceIndex(i).Deref()
g := p.readG(r)
if g == nil {
continue
}
p.goroutines = append(p.goroutines, g)
}
}
func (p *Process) readG(r region) *Goroutine {
g := &Goroutine{r: r}
stk := r.Field("stack")
g.stackSize = int64(stk.Field("hi").Uintptr() - stk.Field("lo").Uintptr())
var osT *core.Thread // os thread working on behalf of this G (if any).
mp := r.Field("m")
if mp.Address() != 0 {
m := mp.Deref()
pid := m.Field("procid").Uint64()
// TODO check that m.curg points to g?
for _, t := range p.proc.Threads() {
if t.Pid() == pid {
osT = t
}
}
}
st := r.Field("atomicstatus")
if st.IsStruct() && st.HasField("value") { // go1.20+
st = st.Field("value")
}
status := st.Uint32()
status &^= uint32(p.rtConstants["_Gscan"])
var sp, pc core.Address
switch status {
case uint32(p.rtConstants["_Gidle"]):
return g
case uint32(p.rtConstants["_Grunnable"]), uint32(p.rtConstants["_Gwaiting"]):
sched := r.Field("sched")
sp = core.Address(sched.Field("sp").Uintptr())
pc = core.Address(sched.Field("pc").Uintptr())
case uint32(p.rtConstants["_Grunning"]):
sp = osT.SP()
pc = osT.PC()
// TODO: back up to the calling frame?
case uint32(p.rtConstants["_Gsyscall"]):
sp = core.Address(r.Field("syscallsp").Uintptr())
pc = core.Address(r.Field("syscallpc").Uintptr())
// TODO: or should we use the osT registers?
case uint32(p.rtConstants["_Gdead"]):
return nil
// TODO: copystack, others?
default:
// Unknown state. We can't read the frames, so just bail now.
// TODO: make this switch complete and then panic here.
// TODO: or just return nil?
return g
}
for {
f, err := p.readFrame(sp, pc)
if err != nil {
fmt.Printf("warning: giving up on backtrace: %v\n", err)
break
}
if f.f.name == "runtime.goexit" {
break
}
if len(g.frames) > 0 {
g.frames[len(g.frames)-1].parent = f
}
g.frames = append(g.frames, f)
if f.f.name == "runtime.sigtrampgo" {
// Continue traceback at location where the signal
// interrupted normal execution.
ctxt := p.proc.ReadPtr(sp.Add(16)) // 3rd arg
//ctxt is a *ucontext
mctxt := ctxt.Add(5 * 8)
// mctxt is a *mcontext
sp = p.proc.ReadPtr(mctxt.Add(15 * 8))
pc = p.proc.ReadPtr(mctxt.Add(16 * 8))
// TODO: totally arch-dependent!
} else {
sp = f.max
pc = core.Address(p.proc.ReadUintptr(sp - 8)) // TODO:amd64 only
}
if pc == 0 {
// TODO: when would this happen?
break
}
if f.f.name == "runtime.systemstack" {
// switch over to goroutine stack
sched := r.Field("sched")
sp = core.Address(sched.Field("sp").Uintptr())
pc = core.Address(sched.Field("pc").Uintptr())
}
}
return g
}
func (p *Process) readFrame(sp, pc core.Address) (*Frame, error) {
f := p.funcTab.find(pc)
if f == nil {
return nil, fmt.Errorf("cannot find func for pc=%#x", pc)
}
off := pc.Sub(f.entry)
size, err := f.frameSize.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read frame size at pc=%#x: %v", pc, err)
}
size += p.proc.PtrSize() // TODO: on amd64, the pushed return address
frame := &Frame{f: f, pc: pc, min: sp, max: sp.Add(size)}
// Find live ptrs in locals
live := map[core.Address]bool{}
if x := int(p.rtConstants["_FUNCDATA_LocalsPointerMaps"]); x < len(f.funcdata) {
addr := f.funcdata[x]
// TODO: Ideally we should have the same frame size check as
// runtime.getStackSize to detect errors when we are missing
// the stackmap.
if addr != 0 {
locals := region{p: p, a: addr, typ: p.findType("runtime.stackmap")}
n := locals.Field("n").Int32() // # of bitmaps
nbit := locals.Field("nbit").Int32() // # of bits per bitmap
idx, err := f.stackMap.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read stack map at pc=%#x: %v", pc, err)
}
if idx < 0 {
idx = 0
}
if idx < int64(n) {
bits := locals.Field("bytedata").a.Add(int64(nbit+7) / 8 * idx)
base := frame.max.Add(-16).Add(-int64(nbit) * p.proc.PtrSize())
// TODO: -16 for amd64. Return address and parent's frame pointer
for i := int64(0); i < int64(nbit); i++ {
if p.proc.ReadUint8(bits.Add(i/8))>>uint(i&7)&1 != 0 {
live[base.Add(i*p.proc.PtrSize())] = true
}
}
}
}
}
// Same for args
if x := int(p.rtConstants["_FUNCDATA_ArgsPointerMaps"]); x < len(f.funcdata) {
addr := f.funcdata[x]
if addr != 0 {
args := region{p: p, a: addr, typ: p.findType("runtime.stackmap")}
n := args.Field("n").Int32() // # of bitmaps
nbit := args.Field("nbit").Int32() // # of bits per bitmap
idx, err := f.stackMap.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read stack map at pc=%#x: %v", pc, err)
}
if idx < 0 {
idx = 0
}
if idx < int64(n) {
bits := args.Field("bytedata").a.Add(int64(nbit+7) / 8 * idx)
base := frame.max
// TODO: add to base for LR archs.
for i := int64(0); i < int64(nbit); i++ {
if p.proc.ReadUint8(bits.Add(i/8))>>uint(i&7)&1 != 0 {
live[base.Add(i*p.proc.PtrSize())] = true
}
}
}
}
}
frame.Live = live
return frame, nil
}
// A Stats struct is the node of a tree representing the entire memory
// usage of the Go program. Children of a node break its usage down
// by category.
// We maintain the invariant that, if there are children,
// Size == sum(c.Size for c in Children).
type Stats struct {
Name string
Size int64
Children []*Stats
}
func (s *Stats) Child(name string) *Stats {
for _, c := range s.Children {
if c.Name == name {
return c
}
}
return nil
}
| Goroutines | identifier_name |
process.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocore
import (
"debug/dwarf"
"fmt"
"math/bits"
"strings"
"sync"
"golang.org/x/debug/internal/core"
)
// A Process represents the state of a Go process that core dumped.
type Process struct {
proc *core.Process
// data structure for fast object finding
// The key to these maps is the object address divided by
// pageTableSize * heapInfoSize.
pageTable map[core.Address]*pageTableEntry
pages []core.Address // deterministic ordering of keys of pageTable
// number of live objects
nObj int
goroutines []*Goroutine
// runtime info
rtGlobals map[string]region
rtConstants map[string]int64
// A module is a loadable unit. Most Go programs have 1, programs
// which load plugins will have more.
modules []*module
// address -> function mapping
funcTab funcTab
// map from dwarf type to *Type
dwarfMap map[dwarf.Type]*Type
// map from address of runtime._type to *Type
runtimeMap map[core.Address]*Type
// map from runtime type name to the set of *Type with that name
// Used to find candidates to put in the runtimeMap map.
runtimeNameMap map[string][]*Type
// memory usage by category
stats *Stats
buildVersion string
// This is a Go 1.17 process, or higher. This field is used for
// differences in behavior that otherwise can't be detected via the
// type system.
is117OrGreater bool
globals []*Root
// Types of each object, indexed by object index.
initTypeHeap sync.Once
types []typeInfo
// Reverse edges.
// The reverse edges for object #i are redge[ridx[i]:ridx[i+1]].
// A "reverse edge" for object #i is a location in memory where a pointer
// to object #i lives.
initReverseEdges sync.Once
redge []core.Address
ridx []int64
// Sorted list of all roots.
// Only initialized if FlagReverse is passed to Core.
rootIdx []*Root
}
// Process returns the core.Process used to construct this Process.
func (p *Process) Process() *core.Process {
return p.proc
}
func (p *Process) Goroutines() []*Goroutine {
return p.goroutines
}
// Stats returns a breakdown of the program's memory use by category.
func (p *Process) Stats() *Stats {
return p.stats
}
// BuildVersion returns the Go version that was used to build the inferior binary.
func (p *Process) BuildVersion() string {
return p.buildVersion
}
func (p *Process) Globals() []*Root {
return p.globals
}
// FindFunc returns the function which contains the code at address pc, if any.
func (p *Process) FindFunc(pc core.Address) *Func {
return p.funcTab.find(pc)
}
func (p *Process) findType(name string) *Type {
s := p.runtimeNameMap[name]
if len(s) == 0 {
panic("can't find type " + name)
}
return s[0]
}
// Core takes a loaded core file and extracts Go information from it.
func Core(proc *core.Process) (p *Process, err error) {
// Make sure we have DWARF info.
if _, err := proc.DWARF(); err != nil {
return nil, fmt.Errorf("error reading dwarf: %w", err)
}
// Guard against failures of proc.Read* routines.
/*
defer func() {
e := recover()
if e == nil {
return
}
p = nil
if x, ok := e.(error); ok {
err = x
return
}
panic(e) // Not an error, re-panic it.
}()
*/
p = &Process{
proc: proc,
runtimeMap: map[core.Address]*Type{},
dwarfMap: map[dwarf.Type]*Type{},
}
// Initialize everything that just depends on DWARF.
p.readDWARFTypes()
p.readRuntimeConstants()
p.readGlobals()
// Find runtime globals we care about. Initialize regions for them.
p.rtGlobals = map[string]region{}
for _, g := range p.globals {
if strings.HasPrefix(g.Name, "runtime.") {
p.rtGlobals[g.Name[8:]] = region{p: p, a: g.Addr, typ: g.Type}
}
}
// Read all the data that depend on runtime globals.
p.buildVersion = p.rtGlobals["buildVersion"].String()
// runtime._type varint name length encoding, and mheap curArena
// counting changed behavior in 1.17 without explicitly related type
// changes, making the difference difficult to detect. As a workaround,
// we check on the version explicitly.
//
// Go 1.17 added runtime._func.flag, so use that as a sentinal for this
// version.
p.is117OrGreater = p.findType("runtime._func").HasField("flag")
p.readModules()
p.readHeap()
p.readGs()
p.readStackVars() // needs to be after readGs.
p.markObjects() // needs to be after readGlobals, readStackVars.
return p, nil
}
type arena struct {
heapMin core.Address
heapMax core.Address
bitmapMin core.Address
bitmapMax core.Address
spanTableMin core.Address
spanTableMax core.Address
}
func (p *Process) getArenaBaseOffset() int64 {
if x, ok := p.rtConstants["arenaBaseOffsetUintptr"]; ok { // go1.15+
// arenaBaseOffset changed sign in 1.15. Callers treat this
// value as it was specified in 1.14, so we negate it here.
return -x
}
return p.rtConstants["arenaBaseOffset"]
}
func (p *Process) readHeap() {
ptrSize := p.proc.PtrSize()
logPtrSize := p.proc.LogPtrSize()
p.pageTable = map[core.Address]*pageTableEntry{}
mheap := p.rtGlobals["mheap_"]
var arenas []arena
if mheap.HasField("spans") {
// go 1.9 or 1.10. There is a single arena.
arenaStart := core.Address(mheap.Field("arena_start").Uintptr())
arenaUsed := core.Address(mheap.Field("arena_used").Uintptr())
arenaEnd := core.Address(mheap.Field("arena_end").Uintptr())
bitmapEnd := core.Address(mheap.Field("bitmap").Uintptr())
bitmapStart := bitmapEnd.Add(-int64(mheap.Field("bitmap_mapped").Uintptr()))
spanTableStart := mheap.Field("spans").SlicePtr().Address()
spanTableEnd := spanTableStart.Add(mheap.Field("spans").SliceCap() * ptrSize)
arenas = append(arenas, arena{
heapMin: arenaStart,
heapMax: arenaEnd,
bitmapMin: bitmapStart,
bitmapMax: bitmapEnd,
spanTableMin: spanTableStart,
spanTableMax: spanTableEnd,
})
// Copy pointer bits to heap info.
// Note that the pointer bits are stored backwards.
for a := arenaStart; a < arenaUsed; a = a.Add(ptrSize) {
off := a.Sub(arenaStart) >> logPtrSize
if p.proc.ReadUint8(bitmapEnd.Add(-(off>>2)-1))>>uint(off&3)&1 != 0 {
p.setHeapPtr(a)
}
}
} else {
// go 1.11+. Has multiple arenas.
arenaSize := p.rtConstants["heapArenaBytes"]
if arenaSize%heapInfoSize != 0 {
panic("arenaSize not a multiple of heapInfoSize")
}
arenaBaseOffset := p.getArenaBaseOffset()
if ptrSize == 4 && arenaBaseOffset != 0 {
panic("arenaBaseOffset must be 0 for 32-bit inferior")
}
level1Table := mheap.Field("arenas")
level1size := level1Table.ArrayLen()
for level1 := int64(0); level1 < level1size; level1++ |
}
p.readSpans(mheap, arenas)
}
func (p *Process) readSpans(mheap region, arenas []arena) {
var all int64
var text int64
var readOnly int64
var heap int64
var spanTable int64
var bitmap int64
var data int64
var bss int64 // also includes mmap'd regions
for _, m := range p.proc.Mappings() {
size := m.Size()
all += size
switch m.Perm() {
case core.Read:
readOnly += size
case core.Read | core.Exec:
text += size
case core.Read | core.Write:
if m.CopyOnWrite() {
// Check if m.file == text's file? That could distinguish
// data segment from mmapped file.
data += size
break
}
attribute := func(x, y core.Address, p *int64) {
a := x.Max(m.Min())
b := y.Min(m.Max())
if a < b {
*p += b.Sub(a)
size -= b.Sub(a)
}
}
for _, a := range arenas {
attribute(a.heapMin, a.heapMax, &heap)
attribute(a.bitmapMin, a.bitmapMax, &bitmap)
attribute(a.spanTableMin, a.spanTableMax, &spanTable)
}
// Any other anonymous mapping is bss.
// TODO: how to distinguish original bss from anonymous mmap?
bss += size
default:
panic("weird mapping " + m.Perm().String())
}
}
if !p.is117OrGreater && mheap.HasField("curArena") {
// 1.13.3 and up have curArena. Subtract unallocated space in
// the current arena from the heap.
//
// As of 1.17, the runtime does this automatically
// (https://go.dev/cl/270537).
ca := mheap.Field("curArena")
unused := int64(ca.Field("end").Uintptr() - ca.Field("base").Uintptr())
heap -= unused
all -= unused
}
pageSize := p.rtConstants["_PageSize"]
// Span types
spanInUse := uint8(p.rtConstants["_MSpanInUse"])
spanManual := uint8(p.rtConstants["_MSpanManual"])
spanDead := uint8(p.rtConstants["_MSpanDead"])
spanFree := uint8(p.rtConstants["_MSpanFree"])
// Process spans.
if pageSize%heapInfoSize != 0 {
panic(fmt.Sprintf("page size not a multiple of %d", heapInfoSize))
}
allspans := mheap.Field("allspans")
var freeSpanSize int64
var releasedSpanSize int64
var manualSpanSize int64
var inUseSpanSize int64
var allocSize int64
var freeSize int64
var spanRoundSize int64
var manualAllocSize int64
var manualFreeSize int64
n := allspans.SliceLen()
for i := int64(0); i < n; i++ {
s := allspans.SliceIndex(i).Deref()
min := core.Address(s.Field("startAddr").Uintptr())
elemSize := int64(s.Field("elemsize").Uintptr())
nPages := int64(s.Field("npages").Uintptr())
spanSize := nPages * pageSize
max := min.Add(spanSize)
for a := min; a != max; a = a.Add(pageSize) {
if !p.proc.Readable(a) {
// Sometimes allocated but not yet touched pages or
// MADV_DONTNEEDed pages are not written
// to the core file. Don't count these pages toward
// space usage (otherwise it can look like the heap
// is larger than the total memory used).
spanSize -= pageSize
}
}
st := s.Field("state")
if st.IsStruct() && st.HasField("s") { // go1.14+
st = st.Field("s")
}
if st.IsStruct() && st.HasField("value") { // go1.20+
st = st.Field("value")
}
switch st.Uint8() {
case spanInUse:
inUseSpanSize += spanSize
n := int64(s.Field("nelems").Uintptr())
// An object is allocated if it is marked as
// allocated or it is below freeindex.
x := s.Field("allocBits").Address()
alloc := make([]bool, n)
for i := int64(0); i < n; i++ {
alloc[i] = p.proc.ReadUint8(x.Add(i/8))>>uint(i%8)&1 != 0
}
k := int64(s.Field("freeindex").Uintptr())
for i := int64(0); i < k; i++ {
alloc[i] = true
}
for i := int64(0); i < n; i++ {
if alloc[i] {
allocSize += elemSize
} else {
freeSize += elemSize
}
}
spanRoundSize += spanSize - n*elemSize
// initialize heap info records for all inuse spans.
for a := min; a < max; a += heapInfoSize {
h := p.allocHeapInfo(a)
h.base = min
h.size = elemSize
}
// Process special records.
for sp := s.Field("specials"); sp.Address() != 0; sp = sp.Field("next") {
sp = sp.Deref() // *special to special
if sp.Field("kind").Uint8() != uint8(p.rtConstants["_KindSpecialFinalizer"]) {
// All other specials (just profile records) can't point into the heap.
continue
}
obj := min.Add(int64(sp.Field("offset").Uint16()))
p.globals = append(p.globals,
&Root{
Name: fmt.Sprintf("finalizer for %x", obj),
Addr: sp.a,
Type: p.findType("runtime.specialfinalizer"),
Frame: nil,
})
// TODO: these aren't really "globals", as they
// are kept alive by the object they reference being alive.
// But we have no way of adding edges from an object to
// the corresponding finalizer data, so we punt on that thorny
// issue for now.
}
case spanFree:
freeSpanSize += spanSize
if s.HasField("npreleased") { // go 1.11 and earlier
nReleased := int64(s.Field("npreleased").Uintptr())
releasedSpanSize += nReleased * pageSize
} else { // go 1.12 and beyond
if s.Field("scavenged").Bool() {
releasedSpanSize += spanSize
}
}
case spanDead:
// These are just deallocated span descriptors. They use no heap.
case spanManual:
manualSpanSize += spanSize
manualAllocSize += spanSize
for x := core.Address(s.Field("manualFreeList").Cast("uintptr").Uintptr()); x != 0; x = p.proc.ReadPtr(x) {
manualAllocSize -= elemSize
manualFreeSize += elemSize
}
}
}
if mheap.HasField("pages") { // go1.14+
// There are no longer "free" mspans to represent unused pages.
// Instead, there are just holes in the pagemap into which we can allocate.
// Look through the page allocator and count the total free space.
// Also keep track of how much has been scavenged.
pages := mheap.Field("pages")
chunks := pages.Field("chunks")
arenaBaseOffset := p.getArenaBaseOffset()
pallocChunkBytes := p.rtConstants["pallocChunkBytes"]
pallocChunksL1Bits := p.rtConstants["pallocChunksL1Bits"]
pallocChunksL2Bits := p.rtConstants["pallocChunksL2Bits"]
inuse := pages.Field("inUse")
ranges := inuse.Field("ranges")
for i := int64(0); i < ranges.SliceLen(); i++ {
r := ranges.SliceIndex(i)
baseField := r.Field("base")
if baseField.IsStruct() { // go 1.15+
baseField = baseField.Field("a")
}
base := core.Address(baseField.Uintptr())
limitField := r.Field("limit")
if limitField.IsStruct() { // go 1.15+
limitField = limitField.Field("a")
}
limit := core.Address(limitField.Uintptr())
chunkBase := (int64(base) + arenaBaseOffset) / pallocChunkBytes
chunkLimit := (int64(limit) + arenaBaseOffset) / pallocChunkBytes
for chunkIdx := chunkBase; chunkIdx < chunkLimit; chunkIdx++ {
var l1, l2 int64
if pallocChunksL1Bits == 0 {
l2 = chunkIdx
} else {
l1 = chunkIdx >> uint(pallocChunksL2Bits)
l2 = chunkIdx & (1<<uint(pallocChunksL2Bits) - 1)
}
chunk := chunks.ArrayIndex(l1).Deref().ArrayIndex(l2)
// Count the free bits in this chunk.
alloc := chunk.Field("pallocBits")
for i := int64(0); i < pallocChunkBytes/pageSize/64; i++ {
freeSpanSize += int64(bits.OnesCount64(^alloc.ArrayIndex(i).Uint64())) * pageSize
}
// Count the scavenged bits in this chunk.
scavenged := chunk.Field("scavenged")
for i := int64(0); i < pallocChunkBytes/pageSize/64; i++ {
releasedSpanSize += int64(bits.OnesCount64(scavenged.ArrayIndex(i).Uint64())) * pageSize
}
}
}
// Also count pages in the page cache for each P.
allp := p.rtGlobals["allp"]
for i := int64(0); i < allp.SliceLen(); i++ {
pcache := allp.SliceIndex(i).Deref().Field("pcache")
freeSpanSize += int64(bits.OnesCount64(pcache.Field("cache").Uint64())) * pageSize
releasedSpanSize += int64(bits.OnesCount64(pcache.Field("scav").Uint64())) * pageSize
}
}
p.stats = &Stats{"all", all, []*Stats{
&Stats{"text", text, nil},
&Stats{"readonly", readOnly, nil},
&Stats{"data", data, nil},
&Stats{"bss", bss, nil},
&Stats{"heap", heap, []*Stats{
&Stats{"in use spans", inUseSpanSize, []*Stats{
&Stats{"alloc", allocSize, nil},
&Stats{"free", freeSize, nil},
&Stats{"round", spanRoundSize, nil},
}},
&Stats{"manual spans", manualSpanSize, []*Stats{
&Stats{"alloc", manualAllocSize, nil},
&Stats{"free", manualFreeSize, nil},
}},
&Stats{"free spans", freeSpanSize, []*Stats{
&Stats{"retained", freeSpanSize - releasedSpanSize, nil},
&Stats{"released", releasedSpanSize, nil},
}},
}},
&Stats{"ptr bitmap", bitmap, nil},
&Stats{"span table", spanTable, nil},
}}
var check func(*Stats)
check = func(s *Stats) {
if len(s.Children) == 0 {
return
}
var sum int64
for _, c := range s.Children {
sum += c.Size
}
if sum != s.Size {
panic(fmt.Sprintf("check failed for %s: %d vs %d", s.Name, s.Size, sum))
}
for _, c := range s.Children {
check(c)
}
}
check(p.stats)
}
func (p *Process) readGs() {
// TODO: figure out how to "flush" running Gs.
allgs := p.rtGlobals["allgs"]
n := allgs.SliceLen()
for i := int64(0); i < n; i++ {
r := allgs.SliceIndex(i).Deref()
g := p.readG(r)
if g == nil {
continue
}
p.goroutines = append(p.goroutines, g)
}
}
func (p *Process) readG(r region) *Goroutine {
g := &Goroutine{r: r}
stk := r.Field("stack")
g.stackSize = int64(stk.Field("hi").Uintptr() - stk.Field("lo").Uintptr())
var osT *core.Thread // os thread working on behalf of this G (if any).
mp := r.Field("m")
if mp.Address() != 0 {
m := mp.Deref()
pid := m.Field("procid").Uint64()
// TODO check that m.curg points to g?
for _, t := range p.proc.Threads() {
if t.Pid() == pid {
osT = t
}
}
}
st := r.Field("atomicstatus")
if st.IsStruct() && st.HasField("value") { // go1.20+
st = st.Field("value")
}
status := st.Uint32()
status &^= uint32(p.rtConstants["_Gscan"])
var sp, pc core.Address
switch status {
case uint32(p.rtConstants["_Gidle"]):
return g
case uint32(p.rtConstants["_Grunnable"]), uint32(p.rtConstants["_Gwaiting"]):
sched := r.Field("sched")
sp = core.Address(sched.Field("sp").Uintptr())
pc = core.Address(sched.Field("pc").Uintptr())
case uint32(p.rtConstants["_Grunning"]):
sp = osT.SP()
pc = osT.PC()
// TODO: back up to the calling frame?
case uint32(p.rtConstants["_Gsyscall"]):
sp = core.Address(r.Field("syscallsp").Uintptr())
pc = core.Address(r.Field("syscallpc").Uintptr())
// TODO: or should we use the osT registers?
case uint32(p.rtConstants["_Gdead"]):
return nil
// TODO: copystack, others?
default:
// Unknown state. We can't read the frames, so just bail now.
// TODO: make this switch complete and then panic here.
// TODO: or just return nil?
return g
}
for {
f, err := p.readFrame(sp, pc)
if err != nil {
fmt.Printf("warning: giving up on backtrace: %v\n", err)
break
}
if f.f.name == "runtime.goexit" {
break
}
if len(g.frames) > 0 {
g.frames[len(g.frames)-1].parent = f
}
g.frames = append(g.frames, f)
if f.f.name == "runtime.sigtrampgo" {
// Continue traceback at location where the signal
// interrupted normal execution.
ctxt := p.proc.ReadPtr(sp.Add(16)) // 3rd arg
//ctxt is a *ucontext
mctxt := ctxt.Add(5 * 8)
// mctxt is a *mcontext
sp = p.proc.ReadPtr(mctxt.Add(15 * 8))
pc = p.proc.ReadPtr(mctxt.Add(16 * 8))
// TODO: totally arch-dependent!
} else {
sp = f.max
pc = core.Address(p.proc.ReadUintptr(sp - 8)) // TODO:amd64 only
}
if pc == 0 {
// TODO: when would this happen?
break
}
if f.f.name == "runtime.systemstack" {
// switch over to goroutine stack
sched := r.Field("sched")
sp = core.Address(sched.Field("sp").Uintptr())
pc = core.Address(sched.Field("pc").Uintptr())
}
}
return g
}
func (p *Process) readFrame(sp, pc core.Address) (*Frame, error) {
f := p.funcTab.find(pc)
if f == nil {
return nil, fmt.Errorf("cannot find func for pc=%#x", pc)
}
off := pc.Sub(f.entry)
size, err := f.frameSize.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read frame size at pc=%#x: %v", pc, err)
}
size += p.proc.PtrSize() // TODO: on amd64, the pushed return address
frame := &Frame{f: f, pc: pc, min: sp, max: sp.Add(size)}
// Find live ptrs in locals
live := map[core.Address]bool{}
if x := int(p.rtConstants["_FUNCDATA_LocalsPointerMaps"]); x < len(f.funcdata) {
addr := f.funcdata[x]
// TODO: Ideally we should have the same frame size check as
// runtime.getStackSize to detect errors when we are missing
// the stackmap.
if addr != 0 {
locals := region{p: p, a: addr, typ: p.findType("runtime.stackmap")}
n := locals.Field("n").Int32() // # of bitmaps
nbit := locals.Field("nbit").Int32() // # of bits per bitmap
idx, err := f.stackMap.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read stack map at pc=%#x: %v", pc, err)
}
if idx < 0 {
idx = 0
}
if idx < int64(n) {
bits := locals.Field("bytedata").a.Add(int64(nbit+7) / 8 * idx)
base := frame.max.Add(-16).Add(-int64(nbit) * p.proc.PtrSize())
// TODO: -16 for amd64. Return address and parent's frame pointer
for i := int64(0); i < int64(nbit); i++ {
if p.proc.ReadUint8(bits.Add(i/8))>>uint(i&7)&1 != 0 {
live[base.Add(i*p.proc.PtrSize())] = true
}
}
}
}
}
// Same for args
if x := int(p.rtConstants["_FUNCDATA_ArgsPointerMaps"]); x < len(f.funcdata) {
addr := f.funcdata[x]
if addr != 0 {
args := region{p: p, a: addr, typ: p.findType("runtime.stackmap")}
n := args.Field("n").Int32() // # of bitmaps
nbit := args.Field("nbit").Int32() // # of bits per bitmap
idx, err := f.stackMap.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read stack map at pc=%#x: %v", pc, err)
}
if idx < 0 {
idx = 0
}
if idx < int64(n) {
bits := args.Field("bytedata").a.Add(int64(nbit+7) / 8 * idx)
base := frame.max
// TODO: add to base for LR archs.
for i := int64(0); i < int64(nbit); i++ {
if p.proc.ReadUint8(bits.Add(i/8))>>uint(i&7)&1 != 0 {
live[base.Add(i*p.proc.PtrSize())] = true
}
}
}
}
}
frame.Live = live
return frame, nil
}
// A Stats struct is the node of a tree representing the entire memory
// usage of the Go program. Children of a node break its usage down
// by category.
// We maintain the invariant that, if there are children,
// Size == sum(c.Size for c in Children).
type Stats struct {
Name string
Size int64
Children []*Stats
}
func (s *Stats) Child(name string) *Stats {
for _, c := range s.Children {
if c.Name == name {
return c
}
}
return nil
}
| {
ptr := level1Table.ArrayIndex(level1)
if ptr.Address() == 0 {
continue
}
level2table := ptr.Deref()
level2size := level2table.ArrayLen()
for level2 := int64(0); level2 < level2size; level2++ {
ptr = level2table.ArrayIndex(level2)
if ptr.Address() == 0 {
continue
}
a := ptr.Deref()
min := core.Address(arenaSize*(level2+level1*level2size) - arenaBaseOffset)
max := min.Add(arenaSize)
bitmap := a.Field("bitmap")
oneBitBitmap := a.HasField("noMorePtrs") // Starting in 1.20.
spans := a.Field("spans")
arenas = append(arenas, arena{
heapMin: min,
heapMax: max,
bitmapMin: bitmap.a,
bitmapMax: bitmap.a.Add(bitmap.ArrayLen()),
spanTableMin: spans.a,
spanTableMax: spans.a.Add(spans.ArrayLen() * ptrSize),
})
// Copy out ptr/nonptr bits
n := bitmap.ArrayLen()
for i := int64(0); i < n; i++ {
if oneBitBitmap {
// The array uses 1 bit per word of heap. See mbitmap.go for
// more information.
m := bitmap.ArrayIndex(i).Uintptr()
bits := 8 * ptrSize
for j := int64(0); j < bits; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*bits + j) * ptrSize))
}
}
} else {
// The nth byte is composed of 4 object bits and 4 live/dead
// bits. We ignore the 4 live/dead bits, which are on the
// high order side of the byte.
//
// See mbitmap.go for more information on the format of
// the bitmap field of heapArena.
m := bitmap.ArrayIndex(i).Uint8()
for j := int64(0); j < 4; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*4 + j) * ptrSize))
}
}
}
}
}
} | conditional_block |
process.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocore
import (
"debug/dwarf"
"fmt"
"math/bits"
"strings"
"sync"
"golang.org/x/debug/internal/core"
)
// A Process represents the state of a Go process that core dumped.
type Process struct {
proc *core.Process
// data structure for fast object finding
// The key to these maps is the object address divided by
// pageTableSize * heapInfoSize.
pageTable map[core.Address]*pageTableEntry
pages []core.Address // deterministic ordering of keys of pageTable
// number of live objects
nObj int
goroutines []*Goroutine
// runtime info
rtGlobals map[string]region
rtConstants map[string]int64
// A module is a loadable unit. Most Go programs have 1, programs
// which load plugins will have more.
modules []*module
// address -> function mapping
funcTab funcTab
// map from dwarf type to *Type
dwarfMap map[dwarf.Type]*Type
// map from address of runtime._type to *Type
runtimeMap map[core.Address]*Type
// map from runtime type name to the set of *Type with that name
// Used to find candidates to put in the runtimeMap map.
runtimeNameMap map[string][]*Type
// memory usage by category
stats *Stats
buildVersion string
// This is a Go 1.17 process, or higher. This field is used for
// differences in behavior that otherwise can't be detected via the
// type system.
is117OrGreater bool
globals []*Root
// Types of each object, indexed by object index.
initTypeHeap sync.Once
types []typeInfo
// Reverse edges.
// The reverse edges for object #i are redge[ridx[i]:ridx[i+1]].
// A "reverse edge" for object #i is a location in memory where a pointer
// to object #i lives.
initReverseEdges sync.Once
redge []core.Address
ridx []int64
// Sorted list of all roots.
// Only initialized if FlagReverse is passed to Core.
rootIdx []*Root
}
// Process returns the core.Process used to construct this Process.
func (p *Process) Process() *core.Process {
return p.proc
}
func (p *Process) Goroutines() []*Goroutine {
return p.goroutines
}
// Stats returns a breakdown of the program's memory use by category.
func (p *Process) Stats() *Stats {
return p.stats
}
// BuildVersion returns the Go version that was used to build the inferior binary.
func (p *Process) BuildVersion() string {
return p.buildVersion
}
func (p *Process) Globals() []*Root {
return p.globals
}
// FindFunc returns the function which contains the code at address pc, if any.
func (p *Process) FindFunc(pc core.Address) *Func {
return p.funcTab.find(pc)
}
func (p *Process) findType(name string) *Type {
s := p.runtimeNameMap[name]
if len(s) == 0 {
panic("can't find type " + name)
}
return s[0]
}
// Core takes a loaded core file and extracts Go information from it.
func Core(proc *core.Process) (p *Process, err error) {
// Make sure we have DWARF info.
if _, err := proc.DWARF(); err != nil {
return nil, fmt.Errorf("error reading dwarf: %w", err)
}
// Guard against failures of proc.Read* routines.
/*
defer func() {
e := recover()
if e == nil {
return
}
p = nil
if x, ok := e.(error); ok {
err = x
return
}
panic(e) // Not an error, re-panic it.
}()
*/
p = &Process{
proc: proc,
runtimeMap: map[core.Address]*Type{},
dwarfMap: map[dwarf.Type]*Type{},
}
// Initialize everything that just depends on DWARF.
p.readDWARFTypes()
p.readRuntimeConstants()
p.readGlobals()
// Find runtime globals we care about. Initialize regions for them.
p.rtGlobals = map[string]region{}
for _, g := range p.globals {
if strings.HasPrefix(g.Name, "runtime.") {
p.rtGlobals[g.Name[8:]] = region{p: p, a: g.Addr, typ: g.Type}
}
}
// Read all the data that depend on runtime globals.
p.buildVersion = p.rtGlobals["buildVersion"].String()
// runtime._type varint name length encoding, and mheap curArena
// counting changed behavior in 1.17 without explicitly related type
// changes, making the difference difficult to detect. As a workaround,
// we check on the version explicitly.
//
// Go 1.17 added runtime._func.flag, so use that as a sentinal for this
// version.
p.is117OrGreater = p.findType("runtime._func").HasField("flag")
p.readModules()
p.readHeap()
p.readGs()
p.readStackVars() // needs to be after readGs.
p.markObjects() // needs to be after readGlobals, readStackVars.
return p, nil
}
type arena struct {
heapMin core.Address
heapMax core.Address
bitmapMin core.Address
bitmapMax core.Address
spanTableMin core.Address
spanTableMax core.Address
}
func (p *Process) getArenaBaseOffset() int64 {
if x, ok := p.rtConstants["arenaBaseOffsetUintptr"]; ok { // go1.15+
// arenaBaseOffset changed sign in 1.15. Callers treat this
// value as it was specified in 1.14, so we negate it here.
return -x
}
return p.rtConstants["arenaBaseOffset"]
}
func (p *Process) readHeap() {
ptrSize := p.proc.PtrSize()
logPtrSize := p.proc.LogPtrSize()
p.pageTable = map[core.Address]*pageTableEntry{}
mheap := p.rtGlobals["mheap_"]
var arenas []arena
if mheap.HasField("spans") {
// go 1.9 or 1.10. There is a single arena.
arenaStart := core.Address(mheap.Field("arena_start").Uintptr())
arenaUsed := core.Address(mheap.Field("arena_used").Uintptr())
arenaEnd := core.Address(mheap.Field("arena_end").Uintptr())
bitmapEnd := core.Address(mheap.Field("bitmap").Uintptr())
bitmapStart := bitmapEnd.Add(-int64(mheap.Field("bitmap_mapped").Uintptr()))
spanTableStart := mheap.Field("spans").SlicePtr().Address()
spanTableEnd := spanTableStart.Add(mheap.Field("spans").SliceCap() * ptrSize)
arenas = append(arenas, arena{
heapMin: arenaStart,
heapMax: arenaEnd,
bitmapMin: bitmapStart,
bitmapMax: bitmapEnd,
spanTableMin: spanTableStart,
spanTableMax: spanTableEnd,
})
// Copy pointer bits to heap info.
// Note that the pointer bits are stored backwards.
for a := arenaStart; a < arenaUsed; a = a.Add(ptrSize) {
off := a.Sub(arenaStart) >> logPtrSize
if p.proc.ReadUint8(bitmapEnd.Add(-(off>>2)-1))>>uint(off&3)&1 != 0 {
p.setHeapPtr(a)
}
}
} else {
// go 1.11+. Has multiple arenas.
arenaSize := p.rtConstants["heapArenaBytes"]
if arenaSize%heapInfoSize != 0 {
panic("arenaSize not a multiple of heapInfoSize")
}
arenaBaseOffset := p.getArenaBaseOffset()
if ptrSize == 4 && arenaBaseOffset != 0 {
panic("arenaBaseOffset must be 0 for 32-bit inferior")
}
level1Table := mheap.Field("arenas")
level1size := level1Table.ArrayLen()
for level1 := int64(0); level1 < level1size; level1++ {
ptr := level1Table.ArrayIndex(level1)
if ptr.Address() == 0 {
continue
}
level2table := ptr.Deref()
level2size := level2table.ArrayLen()
for level2 := int64(0); level2 < level2size; level2++ {
ptr = level2table.ArrayIndex(level2)
if ptr.Address() == 0 {
continue
}
a := ptr.Deref()
min := core.Address(arenaSize*(level2+level1*level2size) - arenaBaseOffset)
max := min.Add(arenaSize)
bitmap := a.Field("bitmap")
oneBitBitmap := a.HasField("noMorePtrs") // Starting in 1.20.
spans := a.Field("spans")
arenas = append(arenas, arena{
heapMin: min,
heapMax: max,
bitmapMin: bitmap.a,
bitmapMax: bitmap.a.Add(bitmap.ArrayLen()),
spanTableMin: spans.a,
spanTableMax: spans.a.Add(spans.ArrayLen() * ptrSize),
})
// Copy out ptr/nonptr bits
n := bitmap.ArrayLen()
for i := int64(0); i < n; i++ {
if oneBitBitmap {
// The array uses 1 bit per word of heap. See mbitmap.go for
// more information.
m := bitmap.ArrayIndex(i).Uintptr()
bits := 8 * ptrSize
for j := int64(0); j < bits; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*bits + j) * ptrSize))
}
}
} else {
// The nth byte is composed of 4 object bits and 4 live/dead
// bits. We ignore the 4 live/dead bits, which are on the
// high order side of the byte.
//
// See mbitmap.go for more information on the format of
// the bitmap field of heapArena.
m := bitmap.ArrayIndex(i).Uint8()
for j := int64(0); j < 4; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*4 + j) * ptrSize))
}
}
}
}
}
}
}
p.readSpans(mheap, arenas)
}
func (p *Process) readSpans(mheap region, arenas []arena) {
var all int64
var text int64
var readOnly int64
var heap int64
var spanTable int64
var bitmap int64
var data int64
var bss int64 // also includes mmap'd regions
for _, m := range p.proc.Mappings() {
size := m.Size()
all += size
switch m.Perm() {
case core.Read:
readOnly += size
case core.Read | core.Exec:
text += size
case core.Read | core.Write:
if m.CopyOnWrite() {
// Check if m.file == text's file? That could distinguish
// data segment from mmapped file.
data += size
break
}
attribute := func(x, y core.Address, p *int64) {
a := x.Max(m.Min())
b := y.Min(m.Max())
if a < b {
*p += b.Sub(a)
size -= b.Sub(a)
}
}
for _, a := range arenas {
attribute(a.heapMin, a.heapMax, &heap)
attribute(a.bitmapMin, a.bitmapMax, &bitmap)
attribute(a.spanTableMin, a.spanTableMax, &spanTable)
}
// Any other anonymous mapping is bss.
// TODO: how to distinguish original bss from anonymous mmap?
bss += size
default:
panic("weird mapping " + m.Perm().String())
}
}
if !p.is117OrGreater && mheap.HasField("curArena") {
// 1.13.3 and up have curArena. Subtract unallocated space in
// the current arena from the heap.
//
// As of 1.17, the runtime does this automatically
// (https://go.dev/cl/270537).
ca := mheap.Field("curArena")
unused := int64(ca.Field("end").Uintptr() - ca.Field("base").Uintptr())
heap -= unused
all -= unused
}
pageSize := p.rtConstants["_PageSize"]
// Span types
spanInUse := uint8(p.rtConstants["_MSpanInUse"])
spanManual := uint8(p.rtConstants["_MSpanManual"])
spanDead := uint8(p.rtConstants["_MSpanDead"])
spanFree := uint8(p.rtConstants["_MSpanFree"])
// Process spans.
if pageSize%heapInfoSize != 0 {
panic(fmt.Sprintf("page size not a multiple of %d", heapInfoSize))
}
allspans := mheap.Field("allspans")
var freeSpanSize int64
var releasedSpanSize int64
var manualSpanSize int64
var inUseSpanSize int64
var allocSize int64
var freeSize int64
var spanRoundSize int64
var manualAllocSize int64
var manualFreeSize int64
n := allspans.SliceLen()
for i := int64(0); i < n; i++ {
s := allspans.SliceIndex(i).Deref()
min := core.Address(s.Field("startAddr").Uintptr())
elemSize := int64(s.Field("elemsize").Uintptr())
nPages := int64(s.Field("npages").Uintptr())
spanSize := nPages * pageSize
max := min.Add(spanSize)
for a := min; a != max; a = a.Add(pageSize) {
if !p.proc.Readable(a) {
// Sometimes allocated but not yet touched pages or
// MADV_DONTNEEDed pages are not written
// to the core file. Don't count these pages toward
// space usage (otherwise it can look like the heap
// is larger than the total memory used).
spanSize -= pageSize
}
}
st := s.Field("state")
if st.IsStruct() && st.HasField("s") { // go1.14+
st = st.Field("s")
}
if st.IsStruct() && st.HasField("value") { // go1.20+
st = st.Field("value")
}
switch st.Uint8() {
case spanInUse:
inUseSpanSize += spanSize
n := int64(s.Field("nelems").Uintptr())
// An object is allocated if it is marked as
// allocated or it is below freeindex.
x := s.Field("allocBits").Address()
alloc := make([]bool, n)
for i := int64(0); i < n; i++ {
alloc[i] = p.proc.ReadUint8(x.Add(i/8))>>uint(i%8)&1 != 0
}
k := int64(s.Field("freeindex").Uintptr())
for i := int64(0); i < k; i++ {
alloc[i] = true
}
for i := int64(0); i < n; i++ {
if alloc[i] {
allocSize += elemSize
} else {
freeSize += elemSize
}
}
spanRoundSize += spanSize - n*elemSize
// initialize heap info records for all inuse spans.
for a := min; a < max; a += heapInfoSize {
h := p.allocHeapInfo(a)
h.base = min
h.size = elemSize
}
// Process special records.
for sp := s.Field("specials"); sp.Address() != 0; sp = sp.Field("next") {
sp = sp.Deref() // *special to special
if sp.Field("kind").Uint8() != uint8(p.rtConstants["_KindSpecialFinalizer"]) {
// All other specials (just profile records) can't point into the heap.
continue
}
obj := min.Add(int64(sp.Field("offset").Uint16()))
p.globals = append(p.globals,
&Root{
Name: fmt.Sprintf("finalizer for %x", obj),
Addr: sp.a,
Type: p.findType("runtime.specialfinalizer"),
Frame: nil,
})
// TODO: these aren't really "globals", as they
// are kept alive by the object they reference being alive.
// But we have no way of adding edges from an object to
// the corresponding finalizer data, so we punt on that thorny
// issue for now.
}
case spanFree:
freeSpanSize += spanSize
if s.HasField("npreleased") { // go 1.11 and earlier
nReleased := int64(s.Field("npreleased").Uintptr())
releasedSpanSize += nReleased * pageSize
} else { // go 1.12 and beyond
if s.Field("scavenged").Bool() {
releasedSpanSize += spanSize
}
}
case spanDead:
// These are just deallocated span descriptors. They use no heap.
case spanManual:
manualSpanSize += spanSize
manualAllocSize += spanSize
for x := core.Address(s.Field("manualFreeList").Cast("uintptr").Uintptr()); x != 0; x = p.proc.ReadPtr(x) {
manualAllocSize -= elemSize
manualFreeSize += elemSize
}
}
}
if mheap.HasField("pages") { // go1.14+
// There are no longer "free" mspans to represent unused pages.
// Instead, there are just holes in the pagemap into which we can allocate.
// Look through the page allocator and count the total free space.
// Also keep track of how much has been scavenged.
pages := mheap.Field("pages")
chunks := pages.Field("chunks")
arenaBaseOffset := p.getArenaBaseOffset()
pallocChunkBytes := p.rtConstants["pallocChunkBytes"]
pallocChunksL1Bits := p.rtConstants["pallocChunksL1Bits"]
pallocChunksL2Bits := p.rtConstants["pallocChunksL2Bits"]
inuse := pages.Field("inUse")
ranges := inuse.Field("ranges")
for i := int64(0); i < ranges.SliceLen(); i++ {
r := ranges.SliceIndex(i)
baseField := r.Field("base")
if baseField.IsStruct() { // go 1.15+
baseField = baseField.Field("a")
}
base := core.Address(baseField.Uintptr())
limitField := r.Field("limit")
if limitField.IsStruct() { // go 1.15+
limitField = limitField.Field("a")
}
limit := core.Address(limitField.Uintptr())
chunkBase := (int64(base) + arenaBaseOffset) / pallocChunkBytes
chunkLimit := (int64(limit) + arenaBaseOffset) / pallocChunkBytes
for chunkIdx := chunkBase; chunkIdx < chunkLimit; chunkIdx++ {
var l1, l2 int64
if pallocChunksL1Bits == 0 {
l2 = chunkIdx
} else {
l1 = chunkIdx >> uint(pallocChunksL2Bits)
l2 = chunkIdx & (1<<uint(pallocChunksL2Bits) - 1)
}
chunk := chunks.ArrayIndex(l1).Deref().ArrayIndex(l2)
// Count the free bits in this chunk.
alloc := chunk.Field("pallocBits")
for i := int64(0); i < pallocChunkBytes/pageSize/64; i++ {
freeSpanSize += int64(bits.OnesCount64(^alloc.ArrayIndex(i).Uint64())) * pageSize
}
// Count the scavenged bits in this chunk.
scavenged := chunk.Field("scavenged")
for i := int64(0); i < pallocChunkBytes/pageSize/64; i++ {
releasedSpanSize += int64(bits.OnesCount64(scavenged.ArrayIndex(i).Uint64())) * pageSize
}
}
}
// Also count pages in the page cache for each P.
allp := p.rtGlobals["allp"]
for i := int64(0); i < allp.SliceLen(); i++ {
pcache := allp.SliceIndex(i).Deref().Field("pcache")
freeSpanSize += int64(bits.OnesCount64(pcache.Field("cache").Uint64())) * pageSize
releasedSpanSize += int64(bits.OnesCount64(pcache.Field("scav").Uint64())) * pageSize
}
}
p.stats = &Stats{"all", all, []*Stats{
&Stats{"text", text, nil},
&Stats{"readonly", readOnly, nil},
&Stats{"data", data, nil},
&Stats{"bss", bss, nil},
&Stats{"heap", heap, []*Stats{
&Stats{"in use spans", inUseSpanSize, []*Stats{
&Stats{"alloc", allocSize, nil},
&Stats{"free", freeSize, nil},
&Stats{"round", spanRoundSize, nil},
}},
&Stats{"manual spans", manualSpanSize, []*Stats{
&Stats{"alloc", manualAllocSize, nil},
&Stats{"free", manualFreeSize, nil},
}},
&Stats{"free spans", freeSpanSize, []*Stats{
&Stats{"retained", freeSpanSize - releasedSpanSize, nil},
&Stats{"released", releasedSpanSize, nil},
}},
}},
&Stats{"ptr bitmap", bitmap, nil},
&Stats{"span table", spanTable, nil},
}}
var check func(*Stats)
check = func(s *Stats) {
if len(s.Children) == 0 {
return
}
var sum int64
for _, c := range s.Children {
sum += c.Size
}
if sum != s.Size {
panic(fmt.Sprintf("check failed for %s: %d vs %d", s.Name, s.Size, sum))
}
for _, c := range s.Children {
check(c)
}
}
check(p.stats)
}
func (p *Process) readGs() {
// TODO: figure out how to "flush" running Gs.
allgs := p.rtGlobals["allgs"]
n := allgs.SliceLen()
for i := int64(0); i < n; i++ {
r := allgs.SliceIndex(i).Deref()
g := p.readG(r)
if g == nil {
continue
}
p.goroutines = append(p.goroutines, g)
}
}
func (p *Process) readG(r region) *Goroutine {
g := &Goroutine{r: r}
stk := r.Field("stack")
g.stackSize = int64(stk.Field("hi").Uintptr() - stk.Field("lo").Uintptr())
var osT *core.Thread // os thread working on behalf of this G (if any).
mp := r.Field("m")
if mp.Address() != 0 {
m := mp.Deref()
pid := m.Field("procid").Uint64()
// TODO check that m.curg points to g?
for _, t := range p.proc.Threads() {
if t.Pid() == pid {
osT = t
}
}
}
st := r.Field("atomicstatus")
if st.IsStruct() && st.HasField("value") { // go1.20+
st = st.Field("value")
}
status := st.Uint32()
status &^= uint32(p.rtConstants["_Gscan"])
var sp, pc core.Address
switch status {
case uint32(p.rtConstants["_Gidle"]):
return g
case uint32(p.rtConstants["_Grunnable"]), uint32(p.rtConstants["_Gwaiting"]):
sched := r.Field("sched")
sp = core.Address(sched.Field("sp").Uintptr())
pc = core.Address(sched.Field("pc").Uintptr())
case uint32(p.rtConstants["_Grunning"]):
sp = osT.SP()
pc = osT.PC()
// TODO: back up to the calling frame?
case uint32(p.rtConstants["_Gsyscall"]):
sp = core.Address(r.Field("syscallsp").Uintptr())
pc = core.Address(r.Field("syscallpc").Uintptr())
// TODO: or should we use the osT registers?
case uint32(p.rtConstants["_Gdead"]):
return nil
// TODO: copystack, others?
default:
// Unknown state. We can't read the frames, so just bail now.
// TODO: make this switch complete and then panic here.
// TODO: or just return nil?
return g
}
for {
f, err := p.readFrame(sp, pc)
if err != nil {
fmt.Printf("warning: giving up on backtrace: %v\n", err)
break
}
if f.f.name == "runtime.goexit" {
break
}
if len(g.frames) > 0 {
g.frames[len(g.frames)-1].parent = f
}
g.frames = append(g.frames, f)
if f.f.name == "runtime.sigtrampgo" {
// Continue traceback at location where the signal
// interrupted normal execution.
ctxt := p.proc.ReadPtr(sp.Add(16)) // 3rd arg
//ctxt is a *ucontext
mctxt := ctxt.Add(5 * 8)
// mctxt is a *mcontext
sp = p.proc.ReadPtr(mctxt.Add(15 * 8))
pc = p.proc.ReadPtr(mctxt.Add(16 * 8))
// TODO: totally arch-dependent!
} else {
sp = f.max
pc = core.Address(p.proc.ReadUintptr(sp - 8)) // TODO:amd64 only
}
if pc == 0 {
// TODO: when would this happen?
break
}
if f.f.name == "runtime.systemstack" {
// switch over to goroutine stack
sched := r.Field("sched")
sp = core.Address(sched.Field("sp").Uintptr())
pc = core.Address(sched.Field("pc").Uintptr())
}
}
return g
}
func (p *Process) readFrame(sp, pc core.Address) (*Frame, error) {
f := p.funcTab.find(pc)
if f == nil {
return nil, fmt.Errorf("cannot find func for pc=%#x", pc)
}
off := pc.Sub(f.entry)
size, err := f.frameSize.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read frame size at pc=%#x: %v", pc, err)
}
size += p.proc.PtrSize() // TODO: on amd64, the pushed return address
frame := &Frame{f: f, pc: pc, min: sp, max: sp.Add(size)}
// Find live ptrs in locals
live := map[core.Address]bool{}
if x := int(p.rtConstants["_FUNCDATA_LocalsPointerMaps"]); x < len(f.funcdata) {
addr := f.funcdata[x]
// TODO: Ideally we should have the same frame size check as
// runtime.getStackSize to detect errors when we are missing
// the stackmap.
if addr != 0 {
locals := region{p: p, a: addr, typ: p.findType("runtime.stackmap")}
n := locals.Field("n").Int32() // # of bitmaps
nbit := locals.Field("nbit").Int32() // # of bits per bitmap
idx, err := f.stackMap.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read stack map at pc=%#x: %v", pc, err)
}
if idx < 0 {
idx = 0
}
if idx < int64(n) {
bits := locals.Field("bytedata").a.Add(int64(nbit+7) / 8 * idx)
base := frame.max.Add(-16).Add(-int64(nbit) * p.proc.PtrSize())
// TODO: -16 for amd64. Return address and parent's frame pointer
for i := int64(0); i < int64(nbit); i++ {
if p.proc.ReadUint8(bits.Add(i/8))>>uint(i&7)&1 != 0 {
live[base.Add(i*p.proc.PtrSize())] = true
}
}
}
}
}
// Same for args
if x := int(p.rtConstants["_FUNCDATA_ArgsPointerMaps"]); x < len(f.funcdata) {
addr := f.funcdata[x]
if addr != 0 {
args := region{p: p, a: addr, typ: p.findType("runtime.stackmap")}
n := args.Field("n").Int32() // # of bitmaps
nbit := args.Field("nbit").Int32() // # of bits per bitmap
idx, err := f.stackMap.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read stack map at pc=%#x: %v", pc, err)
}
if idx < 0 {
idx = 0
}
if idx < int64(n) {
bits := args.Field("bytedata").a.Add(int64(nbit+7) / 8 * idx)
base := frame.max
// TODO: add to base for LR archs.
for i := int64(0); i < int64(nbit); i++ {
if p.proc.ReadUint8(bits.Add(i/8))>>uint(i&7)&1 != 0 {
live[base.Add(i*p.proc.PtrSize())] = true
}
}
}
}
}
frame.Live = live
return frame, nil
}
// A Stats struct is the node of a tree representing the entire memory
// usage of the Go program. Children of a node break its usage down
// by category.
// We maintain the invariant that, if there are children,
// Size == sum(c.Size for c in Children).
type Stats struct {
Name string
Size int64
Children []*Stats
}
func (s *Stats) Child(name string) *Stats | {
for _, c := range s.Children {
if c.Name == name {
return c
}
}
return nil
} | identifier_body | |
process.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocore
import (
"debug/dwarf"
"fmt"
"math/bits"
"strings"
"sync"
"golang.org/x/debug/internal/core"
)
// A Process represents the state of a Go process that core dumped.
type Process struct {
proc *core.Process
// data structure for fast object finding
// The key to these maps is the object address divided by
// pageTableSize * heapInfoSize.
pageTable map[core.Address]*pageTableEntry
pages []core.Address // deterministic ordering of keys of pageTable
// number of live objects
nObj int
goroutines []*Goroutine
// runtime info
rtGlobals map[string]region
rtConstants map[string]int64
// A module is a loadable unit. Most Go programs have 1, programs
// which load plugins will have more.
modules []*module
// address -> function mapping
funcTab funcTab
// map from dwarf type to *Type
dwarfMap map[dwarf.Type]*Type
// map from address of runtime._type to *Type
runtimeMap map[core.Address]*Type
// map from runtime type name to the set of *Type with that name
// Used to find candidates to put in the runtimeMap map.
runtimeNameMap map[string][]*Type
// memory usage by category
stats *Stats
buildVersion string
// This is a Go 1.17 process, or higher. This field is used for
// differences in behavior that otherwise can't be detected via the
// type system.
is117OrGreater bool
globals []*Root
// Types of each object, indexed by object index.
initTypeHeap sync.Once
types []typeInfo
// Reverse edges.
// The reverse edges for object #i are redge[ridx[i]:ridx[i+1]].
// A "reverse edge" for object #i is a location in memory where a pointer
// to object #i lives.
initReverseEdges sync.Once
redge []core.Address
ridx []int64
// Sorted list of all roots.
// Only initialized if FlagReverse is passed to Core.
rootIdx []*Root
}
// Process returns the core.Process used to construct this Process.
func (p *Process) Process() *core.Process {
return p.proc
}
func (p *Process) Goroutines() []*Goroutine {
return p.goroutines
}
// Stats returns a breakdown of the program's memory use by category.
func (p *Process) Stats() *Stats {
return p.stats
}
// BuildVersion returns the Go version that was used to build the inferior binary.
func (p *Process) BuildVersion() string {
return p.buildVersion
}
func (p *Process) Globals() []*Root {
return p.globals
}
// FindFunc returns the function which contains the code at address pc, if any.
func (p *Process) FindFunc(pc core.Address) *Func {
return p.funcTab.find(pc)
}
func (p *Process) findType(name string) *Type {
s := p.runtimeNameMap[name]
if len(s) == 0 {
panic("can't find type " + name)
}
return s[0]
}
// Core takes a loaded core file and extracts Go information from it.
func Core(proc *core.Process) (p *Process, err error) {
// Make sure we have DWARF info.
if _, err := proc.DWARF(); err != nil {
return nil, fmt.Errorf("error reading dwarf: %w", err)
}
// Guard against failures of proc.Read* routines.
/*
defer func() {
e := recover()
if e == nil {
return
}
p = nil
if x, ok := e.(error); ok {
err = x
return
}
panic(e) // Not an error, re-panic it.
}()
*/
p = &Process{
proc: proc,
runtimeMap: map[core.Address]*Type{},
dwarfMap: map[dwarf.Type]*Type{},
}
// Initialize everything that just depends on DWARF.
p.readDWARFTypes()
p.readRuntimeConstants()
p.readGlobals()
// Find runtime globals we care about. Initialize regions for them.
p.rtGlobals = map[string]region{}
for _, g := range p.globals {
if strings.HasPrefix(g.Name, "runtime.") {
p.rtGlobals[g.Name[8:]] = region{p: p, a: g.Addr, typ: g.Type}
}
}
// Read all the data that depend on runtime globals.
p.buildVersion = p.rtGlobals["buildVersion"].String()
// runtime._type varint name length encoding, and mheap curArena
// counting changed behavior in 1.17 without explicitly related type
// changes, making the difference difficult to detect. As a workaround,
// we check on the version explicitly.
//
// Go 1.17 added runtime._func.flag, so use that as a sentinal for this
// version.
p.is117OrGreater = p.findType("runtime._func").HasField("flag")
p.readModules()
p.readHeap()
p.readGs()
p.readStackVars() // needs to be after readGs.
p.markObjects() // needs to be after readGlobals, readStackVars.
return p, nil
}
type arena struct {
heapMin core.Address
heapMax core.Address
bitmapMin core.Address
bitmapMax core.Address
spanTableMin core.Address
spanTableMax core.Address
}
func (p *Process) getArenaBaseOffset() int64 {
if x, ok := p.rtConstants["arenaBaseOffsetUintptr"]; ok { // go1.15+
// arenaBaseOffset changed sign in 1.15. Callers treat this
// value as it was specified in 1.14, so we negate it here.
return -x
}
return p.rtConstants["arenaBaseOffset"]
}
func (p *Process) readHeap() {
ptrSize := p.proc.PtrSize()
logPtrSize := p.proc.LogPtrSize()
p.pageTable = map[core.Address]*pageTableEntry{}
mheap := p.rtGlobals["mheap_"]
var arenas []arena
if mheap.HasField("spans") {
// go 1.9 or 1.10. There is a single arena.
arenaStart := core.Address(mheap.Field("arena_start").Uintptr())
arenaUsed := core.Address(mheap.Field("arena_used").Uintptr())
arenaEnd := core.Address(mheap.Field("arena_end").Uintptr())
bitmapEnd := core.Address(mheap.Field("bitmap").Uintptr())
bitmapStart := bitmapEnd.Add(-int64(mheap.Field("bitmap_mapped").Uintptr()))
spanTableStart := mheap.Field("spans").SlicePtr().Address()
spanTableEnd := spanTableStart.Add(mheap.Field("spans").SliceCap() * ptrSize)
arenas = append(arenas, arena{
heapMin: arenaStart,
heapMax: arenaEnd,
bitmapMin: bitmapStart,
bitmapMax: bitmapEnd,
spanTableMin: spanTableStart,
spanTableMax: spanTableEnd,
})
// Copy pointer bits to heap info.
// Note that the pointer bits are stored backwards.
for a := arenaStart; a < arenaUsed; a = a.Add(ptrSize) {
off := a.Sub(arenaStart) >> logPtrSize
if p.proc.ReadUint8(bitmapEnd.Add(-(off>>2)-1))>>uint(off&3)&1 != 0 {
p.setHeapPtr(a)
}
}
} else {
// go 1.11+. Has multiple arenas.
arenaSize := p.rtConstants["heapArenaBytes"]
if arenaSize%heapInfoSize != 0 {
panic("arenaSize not a multiple of heapInfoSize")
}
arenaBaseOffset := p.getArenaBaseOffset()
if ptrSize == 4 && arenaBaseOffset != 0 {
panic("arenaBaseOffset must be 0 for 32-bit inferior")
}
level1Table := mheap.Field("arenas")
level1size := level1Table.ArrayLen()
for level1 := int64(0); level1 < level1size; level1++ {
ptr := level1Table.ArrayIndex(level1)
if ptr.Address() == 0 {
continue
}
level2table := ptr.Deref()
level2size := level2table.ArrayLen()
for level2 := int64(0); level2 < level2size; level2++ {
ptr = level2table.ArrayIndex(level2)
if ptr.Address() == 0 {
continue
}
a := ptr.Deref()
min := core.Address(arenaSize*(level2+level1*level2size) - arenaBaseOffset)
max := min.Add(arenaSize)
bitmap := a.Field("bitmap")
oneBitBitmap := a.HasField("noMorePtrs") // Starting in 1.20.
spans := a.Field("spans")
arenas = append(arenas, arena{
heapMin: min,
heapMax: max,
bitmapMin: bitmap.a,
bitmapMax: bitmap.a.Add(bitmap.ArrayLen()),
spanTableMin: spans.a,
spanTableMax: spans.a.Add(spans.ArrayLen() * ptrSize),
})
// Copy out ptr/nonptr bits
n := bitmap.ArrayLen()
for i := int64(0); i < n; i++ {
if oneBitBitmap {
// The array uses 1 bit per word of heap. See mbitmap.go for
// more information.
m := bitmap.ArrayIndex(i).Uintptr()
bits := 8 * ptrSize
for j := int64(0); j < bits; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*bits + j) * ptrSize))
}
}
} else {
// The nth byte is composed of 4 object bits and 4 live/dead
// bits. We ignore the 4 live/dead bits, which are on the
// high order side of the byte.
//
// See mbitmap.go for more information on the format of
// the bitmap field of heapArena.
m := bitmap.ArrayIndex(i).Uint8()
for j := int64(0); j < 4; j++ {
if m>>uint(j)&1 != 0 {
p.setHeapPtr(min.Add((i*4 + j) * ptrSize))
}
}
}
}
}
}
}
p.readSpans(mheap, arenas)
}
func (p *Process) readSpans(mheap region, arenas []arena) {
var all int64
var text int64
var readOnly int64
var heap int64
var spanTable int64
var bitmap int64
var data int64
var bss int64 // also includes mmap'd regions
for _, m := range p.proc.Mappings() {
size := m.Size()
all += size
switch m.Perm() {
case core.Read:
readOnly += size
case core.Read | core.Exec:
text += size
case core.Read | core.Write:
if m.CopyOnWrite() {
// Check if m.file == text's file? That could distinguish
// data segment from mmapped file.
data += size
break
}
attribute := func(x, y core.Address, p *int64) {
a := x.Max(m.Min())
b := y.Min(m.Max())
if a < b {
*p += b.Sub(a)
size -= b.Sub(a)
}
}
for _, a := range arenas {
attribute(a.heapMin, a.heapMax, &heap)
attribute(a.bitmapMin, a.bitmapMax, &bitmap)
attribute(a.spanTableMin, a.spanTableMax, &spanTable)
}
// Any other anonymous mapping is bss.
// TODO: how to distinguish original bss from anonymous mmap?
bss += size
default:
panic("weird mapping " + m.Perm().String())
}
}
if !p.is117OrGreater && mheap.HasField("curArena") {
// 1.13.3 and up have curArena. Subtract unallocated space in
// the current arena from the heap.
//
// As of 1.17, the runtime does this automatically
// (https://go.dev/cl/270537).
ca := mheap.Field("curArena")
unused := int64(ca.Field("end").Uintptr() - ca.Field("base").Uintptr())
heap -= unused
all -= unused
}
pageSize := p.rtConstants["_PageSize"]
// Span types
spanInUse := uint8(p.rtConstants["_MSpanInUse"])
spanManual := uint8(p.rtConstants["_MSpanManual"])
spanDead := uint8(p.rtConstants["_MSpanDead"])
spanFree := uint8(p.rtConstants["_MSpanFree"])
// Process spans.
if pageSize%heapInfoSize != 0 {
panic(fmt.Sprintf("page size not a multiple of %d", heapInfoSize))
}
allspans := mheap.Field("allspans")
var freeSpanSize int64
var releasedSpanSize int64
var manualSpanSize int64
var inUseSpanSize int64
var allocSize int64
var freeSize int64
var spanRoundSize int64
var manualAllocSize int64
var manualFreeSize int64
n := allspans.SliceLen()
for i := int64(0); i < n; i++ {
s := allspans.SliceIndex(i).Deref()
min := core.Address(s.Field("startAddr").Uintptr())
elemSize := int64(s.Field("elemsize").Uintptr())
nPages := int64(s.Field("npages").Uintptr())
spanSize := nPages * pageSize
max := min.Add(spanSize)
for a := min; a != max; a = a.Add(pageSize) {
if !p.proc.Readable(a) {
// Sometimes allocated but not yet touched pages or
// MADV_DONTNEEDed pages are not written
// to the core file. Don't count these pages toward
// space usage (otherwise it can look like the heap
// is larger than the total memory used).
spanSize -= pageSize
}
}
st := s.Field("state")
if st.IsStruct() && st.HasField("s") { // go1.14+
st = st.Field("s")
}
if st.IsStruct() && st.HasField("value") { // go1.20+
st = st.Field("value")
}
switch st.Uint8() {
case spanInUse:
inUseSpanSize += spanSize
n := int64(s.Field("nelems").Uintptr())
// An object is allocated if it is marked as
// allocated or it is below freeindex.
x := s.Field("allocBits").Address()
alloc := make([]bool, n)
for i := int64(0); i < n; i++ {
alloc[i] = p.proc.ReadUint8(x.Add(i/8))>>uint(i%8)&1 != 0
}
k := int64(s.Field("freeindex").Uintptr())
for i := int64(0); i < k; i++ {
alloc[i] = true
}
for i := int64(0); i < n; i++ {
if alloc[i] {
allocSize += elemSize
} else {
freeSize += elemSize
} |
// initialize heap info records for all inuse spans.
for a := min; a < max; a += heapInfoSize {
h := p.allocHeapInfo(a)
h.base = min
h.size = elemSize
}
// Process special records.
for sp := s.Field("specials"); sp.Address() != 0; sp = sp.Field("next") {
sp = sp.Deref() // *special to special
if sp.Field("kind").Uint8() != uint8(p.rtConstants["_KindSpecialFinalizer"]) {
// All other specials (just profile records) can't point into the heap.
continue
}
obj := min.Add(int64(sp.Field("offset").Uint16()))
p.globals = append(p.globals,
&Root{
Name: fmt.Sprintf("finalizer for %x", obj),
Addr: sp.a,
Type: p.findType("runtime.specialfinalizer"),
Frame: nil,
})
// TODO: these aren't really "globals", as they
// are kept alive by the object they reference being alive.
// But we have no way of adding edges from an object to
// the corresponding finalizer data, so we punt on that thorny
// issue for now.
}
case spanFree:
freeSpanSize += spanSize
if s.HasField("npreleased") { // go 1.11 and earlier
nReleased := int64(s.Field("npreleased").Uintptr())
releasedSpanSize += nReleased * pageSize
} else { // go 1.12 and beyond
if s.Field("scavenged").Bool() {
releasedSpanSize += spanSize
}
}
case spanDead:
// These are just deallocated span descriptors. They use no heap.
case spanManual:
manualSpanSize += spanSize
manualAllocSize += spanSize
for x := core.Address(s.Field("manualFreeList").Cast("uintptr").Uintptr()); x != 0; x = p.proc.ReadPtr(x) {
manualAllocSize -= elemSize
manualFreeSize += elemSize
}
}
}
if mheap.HasField("pages") { // go1.14+
// There are no longer "free" mspans to represent unused pages.
// Instead, there are just holes in the pagemap into which we can allocate.
// Look through the page allocator and count the total free space.
// Also keep track of how much has been scavenged.
pages := mheap.Field("pages")
chunks := pages.Field("chunks")
arenaBaseOffset := p.getArenaBaseOffset()
pallocChunkBytes := p.rtConstants["pallocChunkBytes"]
pallocChunksL1Bits := p.rtConstants["pallocChunksL1Bits"]
pallocChunksL2Bits := p.rtConstants["pallocChunksL2Bits"]
inuse := pages.Field("inUse")
ranges := inuse.Field("ranges")
for i := int64(0); i < ranges.SliceLen(); i++ {
r := ranges.SliceIndex(i)
baseField := r.Field("base")
if baseField.IsStruct() { // go 1.15+
baseField = baseField.Field("a")
}
base := core.Address(baseField.Uintptr())
limitField := r.Field("limit")
if limitField.IsStruct() { // go 1.15+
limitField = limitField.Field("a")
}
limit := core.Address(limitField.Uintptr())
chunkBase := (int64(base) + arenaBaseOffset) / pallocChunkBytes
chunkLimit := (int64(limit) + arenaBaseOffset) / pallocChunkBytes
for chunkIdx := chunkBase; chunkIdx < chunkLimit; chunkIdx++ {
var l1, l2 int64
if pallocChunksL1Bits == 0 {
l2 = chunkIdx
} else {
l1 = chunkIdx >> uint(pallocChunksL2Bits)
l2 = chunkIdx & (1<<uint(pallocChunksL2Bits) - 1)
}
chunk := chunks.ArrayIndex(l1).Deref().ArrayIndex(l2)
// Count the free bits in this chunk.
alloc := chunk.Field("pallocBits")
for i := int64(0); i < pallocChunkBytes/pageSize/64; i++ {
freeSpanSize += int64(bits.OnesCount64(^alloc.ArrayIndex(i).Uint64())) * pageSize
}
// Count the scavenged bits in this chunk.
scavenged := chunk.Field("scavenged")
for i := int64(0); i < pallocChunkBytes/pageSize/64; i++ {
releasedSpanSize += int64(bits.OnesCount64(scavenged.ArrayIndex(i).Uint64())) * pageSize
}
}
}
// Also count pages in the page cache for each P.
allp := p.rtGlobals["allp"]
for i := int64(0); i < allp.SliceLen(); i++ {
pcache := allp.SliceIndex(i).Deref().Field("pcache")
freeSpanSize += int64(bits.OnesCount64(pcache.Field("cache").Uint64())) * pageSize
releasedSpanSize += int64(bits.OnesCount64(pcache.Field("scav").Uint64())) * pageSize
}
}
p.stats = &Stats{"all", all, []*Stats{
&Stats{"text", text, nil},
&Stats{"readonly", readOnly, nil},
&Stats{"data", data, nil},
&Stats{"bss", bss, nil},
&Stats{"heap", heap, []*Stats{
&Stats{"in use spans", inUseSpanSize, []*Stats{
&Stats{"alloc", allocSize, nil},
&Stats{"free", freeSize, nil},
&Stats{"round", spanRoundSize, nil},
}},
&Stats{"manual spans", manualSpanSize, []*Stats{
&Stats{"alloc", manualAllocSize, nil},
&Stats{"free", manualFreeSize, nil},
}},
&Stats{"free spans", freeSpanSize, []*Stats{
&Stats{"retained", freeSpanSize - releasedSpanSize, nil},
&Stats{"released", releasedSpanSize, nil},
}},
}},
&Stats{"ptr bitmap", bitmap, nil},
&Stats{"span table", spanTable, nil},
}}
var check func(*Stats)
check = func(s *Stats) {
if len(s.Children) == 0 {
return
}
var sum int64
for _, c := range s.Children {
sum += c.Size
}
if sum != s.Size {
panic(fmt.Sprintf("check failed for %s: %d vs %d", s.Name, s.Size, sum))
}
for _, c := range s.Children {
check(c)
}
}
check(p.stats)
}
func (p *Process) readGs() {
// TODO: figure out how to "flush" running Gs.
allgs := p.rtGlobals["allgs"]
n := allgs.SliceLen()
for i := int64(0); i < n; i++ {
r := allgs.SliceIndex(i).Deref()
g := p.readG(r)
if g == nil {
continue
}
p.goroutines = append(p.goroutines, g)
}
}
func (p *Process) readG(r region) *Goroutine {
g := &Goroutine{r: r}
stk := r.Field("stack")
g.stackSize = int64(stk.Field("hi").Uintptr() - stk.Field("lo").Uintptr())
var osT *core.Thread // os thread working on behalf of this G (if any).
mp := r.Field("m")
if mp.Address() != 0 {
m := mp.Deref()
pid := m.Field("procid").Uint64()
// TODO check that m.curg points to g?
for _, t := range p.proc.Threads() {
if t.Pid() == pid {
osT = t
}
}
}
st := r.Field("atomicstatus")
if st.IsStruct() && st.HasField("value") { // go1.20+
st = st.Field("value")
}
status := st.Uint32()
status &^= uint32(p.rtConstants["_Gscan"])
var sp, pc core.Address
switch status {
case uint32(p.rtConstants["_Gidle"]):
return g
case uint32(p.rtConstants["_Grunnable"]), uint32(p.rtConstants["_Gwaiting"]):
sched := r.Field("sched")
sp = core.Address(sched.Field("sp").Uintptr())
pc = core.Address(sched.Field("pc").Uintptr())
case uint32(p.rtConstants["_Grunning"]):
sp = osT.SP()
pc = osT.PC()
// TODO: back up to the calling frame?
case uint32(p.rtConstants["_Gsyscall"]):
sp = core.Address(r.Field("syscallsp").Uintptr())
pc = core.Address(r.Field("syscallpc").Uintptr())
// TODO: or should we use the osT registers?
case uint32(p.rtConstants["_Gdead"]):
return nil
// TODO: copystack, others?
default:
// Unknown state. We can't read the frames, so just bail now.
// TODO: make this switch complete and then panic here.
// TODO: or just return nil?
return g
}
for {
f, err := p.readFrame(sp, pc)
if err != nil {
fmt.Printf("warning: giving up on backtrace: %v\n", err)
break
}
if f.f.name == "runtime.goexit" {
break
}
if len(g.frames) > 0 {
g.frames[len(g.frames)-1].parent = f
}
g.frames = append(g.frames, f)
if f.f.name == "runtime.sigtrampgo" {
// Continue traceback at location where the signal
// interrupted normal execution.
ctxt := p.proc.ReadPtr(sp.Add(16)) // 3rd arg
//ctxt is a *ucontext
mctxt := ctxt.Add(5 * 8)
// mctxt is a *mcontext
sp = p.proc.ReadPtr(mctxt.Add(15 * 8))
pc = p.proc.ReadPtr(mctxt.Add(16 * 8))
// TODO: totally arch-dependent!
} else {
sp = f.max
pc = core.Address(p.proc.ReadUintptr(sp - 8)) // TODO:amd64 only
}
if pc == 0 {
// TODO: when would this happen?
break
}
if f.f.name == "runtime.systemstack" {
// switch over to goroutine stack
sched := r.Field("sched")
sp = core.Address(sched.Field("sp").Uintptr())
pc = core.Address(sched.Field("pc").Uintptr())
}
}
return g
}
func (p *Process) readFrame(sp, pc core.Address) (*Frame, error) {
f := p.funcTab.find(pc)
if f == nil {
return nil, fmt.Errorf("cannot find func for pc=%#x", pc)
}
off := pc.Sub(f.entry)
size, err := f.frameSize.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read frame size at pc=%#x: %v", pc, err)
}
size += p.proc.PtrSize() // TODO: on amd64, the pushed return address
frame := &Frame{f: f, pc: pc, min: sp, max: sp.Add(size)}
// Find live ptrs in locals
live := map[core.Address]bool{}
if x := int(p.rtConstants["_FUNCDATA_LocalsPointerMaps"]); x < len(f.funcdata) {
addr := f.funcdata[x]
// TODO: Ideally we should have the same frame size check as
// runtime.getStackSize to detect errors when we are missing
// the stackmap.
if addr != 0 {
locals := region{p: p, a: addr, typ: p.findType("runtime.stackmap")}
n := locals.Field("n").Int32() // # of bitmaps
nbit := locals.Field("nbit").Int32() // # of bits per bitmap
idx, err := f.stackMap.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read stack map at pc=%#x: %v", pc, err)
}
if idx < 0 {
idx = 0
}
if idx < int64(n) {
bits := locals.Field("bytedata").a.Add(int64(nbit+7) / 8 * idx)
base := frame.max.Add(-16).Add(-int64(nbit) * p.proc.PtrSize())
// TODO: -16 for amd64. Return address and parent's frame pointer
for i := int64(0); i < int64(nbit); i++ {
if p.proc.ReadUint8(bits.Add(i/8))>>uint(i&7)&1 != 0 {
live[base.Add(i*p.proc.PtrSize())] = true
}
}
}
}
}
// Same for args
if x := int(p.rtConstants["_FUNCDATA_ArgsPointerMaps"]); x < len(f.funcdata) {
addr := f.funcdata[x]
if addr != 0 {
args := region{p: p, a: addr, typ: p.findType("runtime.stackmap")}
n := args.Field("n").Int32() // # of bitmaps
nbit := args.Field("nbit").Int32() // # of bits per bitmap
idx, err := f.stackMap.find(off)
if err != nil {
return nil, fmt.Errorf("cannot read stack map at pc=%#x: %v", pc, err)
}
if idx < 0 {
idx = 0
}
if idx < int64(n) {
bits := args.Field("bytedata").a.Add(int64(nbit+7) / 8 * idx)
base := frame.max
// TODO: add to base for LR archs.
for i := int64(0); i < int64(nbit); i++ {
if p.proc.ReadUint8(bits.Add(i/8))>>uint(i&7)&1 != 0 {
live[base.Add(i*p.proc.PtrSize())] = true
}
}
}
}
}
frame.Live = live
return frame, nil
}
// A Stats struct is the node of a tree representing the entire memory
// usage of the Go program. Children of a node break its usage down
// by category.
// We maintain the invariant that, if there are children,
// Size == sum(c.Size for c in Children).
type Stats struct {
Name string
Size int64
Children []*Stats
}
func (s *Stats) Child(name string) *Stats {
for _, c := range s.Children {
if c.Name == name {
return c
}
}
return nil
} | }
spanRoundSize += spanSize - n*elemSize | random_line_split |
server.js | var mysql= require('mysql');
var express= require('express');
var http = require('http');
var app= express();
var morgan=require('morgan');
var bodyParser = require('body-parser');
var session = require('express-session');
var bcrypt=require('bcrypt');
var setCookie = require('set-cookie');
var CookieParser = require('restify-cookies');
var translate = require('google-translate-api');
var server = app.listen(process.env.PORT || 3000);
var io = require('socket.io').listen(server); //pass a http.Server instance
var server = http.createServer(app);
// var path = require('path');
// var port=process.env.PORT || 3000;
app.use(CookieParser.parse);
app.use(bodyParser.json());
app.use(express.static(__dirname+'/front/dist/'));
app.use(bodyParser.urlencoded({ extended: false}));
app.use(morgan('dev'));
app.use(session({
secret: 'shhh, it\'s a secret',
resave:false,
saveUninitialized: true
}));
// Host: sql9.freesqldatabase.com
// Database name: sql9203547
// Database user: sql9203547
// Database password: hhldFiMrKp
// Port number: 3306
var connect = mysql.createConnection({
host: 'sql9.freesqldatabase.com',
user:'sql9203547',
password:'hhldFiMrKp',
database:'sql9203547'
});
// --------------------------Data base side----------------------------------------
// ---------------------create tables and connection--------------------------------
connect.connect(function () {
var userTable = 'CREATE TABLE IF NOT EXISTS users( \
id INT AUTO_INCREMENT PRIMARY KEY, \
username varchar(255) NOT NULL UNIQUE,\
password varchar(255),\
Nationallity varchar(60),\
Birthday varchar(60) ,\
status varchar(255) ,\
imag longtext,\
Location varchar(60))';
// check it tomorrow??
var commentTable = 'CREATE TABLE IF NOT EXISTS comments( \
id INT AUTO_INCREMENT PRIMARY KEY, \
comment varchar(255) ,\
username varchar(255) ,\
roomID int ,\
FOREIGN KEY (roomID) REFERENCES rooms(id))';
// FOREIGN KEY (usernmae) REFERENCES users(id) ,\
var roomTable = 'CREATE TABLE IF NOT EXISTS rooms(id INT AUTO_INCREMENT PRIMARY KEY,location varchar(60),image longtext,discribtion varchar(255),contactInfo varchar(100),userID int,userName varchar(60),FOREIGN KEY (userID) REFERENCES users(id))';
connect.query(userTable);
connect.query(commentTable);
connect.query(roomTable);
});
// -----------------Sign Up ----and ------Login------------------------------------
// ----------------------sign up----------------------------------------
app.post('/signup',function (req,res) {
var password='';
var username= req.body.username;
var Image=req.body.image;
bcrypt.hash(req.body.password,3,function (err,hash) {
password=hash;
})
var Nationallity=req.body.nationality;
var Birthday=req.body.birthday;
var location=req.body.location;
var signup = 'SELECT * FROM users WHERE username=\''+username+'\'';
connect.query(signup,function (err,checkeduser) {
if(checkeduser.length<1){// user not exist
var data = 'INSERT INTO users (username,password,Nationallity,Birthday,location,imag) VALUES (\''+username+'\',\''+password+'\',\''+Nationallity+'\',\''+Birthday+'\',\''+location +'\',\''+Image+'\')';
connect.query(data);
res.send('true');
}else{
res.send('false');
}
});
});
// ---------------------login-----------------------------------------
var users=[];
var flag='false';
var x;
app.post('/login',function(req,res){
var results;
connect.query('SELECT * FROM users WHERE username=\''+req.body.username+'\'', function (err,result) {
console.log('hhhh',result,req.body.username )
if(result[0]!==undefined){
results=result;
compare();
}else{
flag=false;
res.send(flag)
}
});
function compare() |
var createSession = function(req, responce, newUser) {
return req.session.regenerate(function() {
//newuser>>>> { id: 2, username: 'hananmajali', password: 'hananmajali' }
bcrypt.hash(req.body.password,3,function (err,hash) {
console.log(hash)
// x={'infog':['u',username,'p',hash]}
})
req.session.user = newUser;
users.push(req.session.user.username)
// console.log('after login ',req.session.user.username)
// console.log('true from server')
// console.log('flag is ',flag);
// console.log('hhhhh',flag)
res.send(flag)
});
};
});
//--------------------logout-----------------------------------
//Logout function destroys the open session.
app.get('/logout',function (req,res) {
users.splice(users.indexOf(req.session.user.username),1)
flag = 'false';
req.session.destroy();
res.clearCookie('info');
res.send(flag);
});
app.get('/show',function(req,res){
res.send(flag)
})
//----------------create and save inside roomtable---------------
app.post('/post',function(req,res) {
console.log('in post ',req.session.user.username,req.session.user.id)
var location = req.body.location;
var discribtion = req.body.discribtion;
var contactInfo = req.body.contactInfo;
var Image = req.body.image
var post = 'INSERT INTO rooms (location,discribtion,contactInfo,userID,userName,image) VALUES (\''+location+'\',\''+discribtion+'\',\''+contactInfo+'\',\''+req.session.user.id+'\',\''+req.session.user.username+'\',\''+Image+'\')';
connect.query(post);
res.send(req.session.user.username);
});
//-----return all roomdata to the client side in the main page for all users-------
app.get('/main',function(req,res) {
var rooms = 'SELECT rooms.id,rooms.location,rooms.image,rooms.discribtion,rooms.contactInfo,rooms.userName,users.imag FROM users INNER JOIN rooms ON rooms.userID = users.id';
connect.query(rooms,function (err,result) {
res.send(result)
})
});
//-----return all roomdata to the client side in the profile page for one user-------
app.get('/profile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.session.user.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.session.user.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
//-------------------clicked on specific name to take me to that profile---------
app.post('/Userprofile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.body.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.body.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
// -----------------delete room -----------------------------------------------
app.post('/deleteroom',function(req,res){
var roomId=req.body.id // I will recieve it from client side
var deleteroom= 'DELETE FROM rooms WHERE id=\''+roomId +'\'';
connect.query(deleteroom);
})
// --------------post comment and send all the comment-------------------------
app.post('/postcomment',function(req,res){
var roomId= req.body.roomid;
var Comment=req.body.commet;
var Comment2='INSERT INTO comments (comment,username,roomID) VALUES (\''+Comment+'\',\''+req.session.user.username+'\',\''+roomId+'\')';
connect.query(Comment2);
var allcomments='SELECT comments.username,comments.comment,users.imag FROM comments INNER JOIN users ON comments.username=users.username AND comments.roomID=\''+roomId+'\' ORDER BY comments.id';
connect.query(allcomments,function(err,allcommentss){
res.send(allcommentss)
});
});
//---------languge-----------------------------
app.post('/translate',function(req,response){
var value=req.body;
translate(req.body.text, {from:req.body.languageFrom+'', to: req.body.languageTo+'' })
.then(res => {
console.log(res.text);
//=> I speak English
//console.log(res.from.language.iso);
//=> nl
response.send(JSON.stringify(res.text))
})
.catch(err => {
console.error(err);
});
})
//------------status of the users in their profiles------------
app.put('/status',function(req,res){
var Status='UPDATE users SET status=\''+req.body.status+'\' WHERE username=\''+req.session.user.username+'\'';
connect.query(Status);
})
app.get('/Chat', function(req,res){
console.log('hanan',req.session.user.username)
res.send(req.session.user.username)
})
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-----------------------------------
var numUsers = 0;
console.log('numUsers',numUsers);
var lans=[];
var lan;
io.on('connection', function (socket) {
console.log('connected');
var addedUser = false;
// when the client emits 'new message', this listens and executes
socket.on('new message', function (data) {
if(lans.indexOf(data.lan)===-1){
lans.push(data.lan+'')
console.log(data.lan)
}
// for(var i=0;i>lans.length;i++){
// if(data.lan+''!==lans[i]){
// data.lan=lans[i]
// }
// }
if(lans[0]===data.lan+''){
data.lan=lans[1];
}else{
data.lan=lans[0]
}
console.log('array ',lans);
console.log('lan ',data.lan);
translate(data.message, { to:data.lan})
.then(res => {
console.log('hanan',res.text)
// we tell the client to execute 'new message'
socket.broadcast.emit('new message', {
username: socket.username,
message: res.text,
lan:data.lan
});
console.log('hanan',res.text)
})
});
// when the client emits 'add user', this listens and executes
socket.on('add user', function (username) {
if (addedUser) return;
// we store the username in the socket session for this client
socket.username = username.username;
++numUsers;
addedUser = true;
socket.emit('login', {
numUsers: numUsers
});
// echo globally (all clients) that a person has connected
socket.broadcast.emit('user joined', {
username: socket.username,
numUsers: numUsers
});
});
// when the client emits 'typing', we broadcast it to others
socket.on('typing', function () {
socket.broadcast.emit('typing', {
username: socket.username
});
});
// when the client emits 'stop typing', we broadcast it to others
socket.on('stop typing', function () {
socket.broadcast.emit('stop typing', {
username: socket.username
});
});
// when the user disconnects.. perform this
socket.on('disconnect', function () {
if (addedUser) {
--numUsers;
// echo globally that this client has left
socket.broadcast.emit('user left', {
username: socket.username,
numUsers: numUsers
});
}
});
/////////////////////////////////////////////////////////////////////////////
// socket.on('stop speaking' || 'stop typing',function (data) {
// console.log('stop speaking ',data)
// translate(data.text, {from:'en', to: 'en'})
// .then(res => {
// console.log('stop skeaoking tranzlate console here is ',res.text);
// socket.emit('speaked',res.text);
// //=> I speak English
// //console.log(res.from.language.iso);
// //=> nl
// // response.send(JSON.stringify(res.text))
// })
// .catch(err => {
// console.error(err);
// });
// })
});
// -------------------------------------------------------------------------------
// app.listen(port,function(){
// });
| {
bcrypt.compare(req.body.password,results[0].password,function (err,match) {
if(err){
console.log(err)
}
if(match){
console.log('this user is correct')
flag = 'true';
console.log('flag now is true')
createSession(req,res,results[0]);
}else{
console.log('this user is very bad')
console.log('flag now is false in else')
flag='false';
res.send(flag)
}
})
} | identifier_body |
server.js | var mysql= require('mysql');
var express= require('express');
var http = require('http');
var app= express();
var morgan=require('morgan');
var bodyParser = require('body-parser');
var session = require('express-session');
var bcrypt=require('bcrypt');
var setCookie = require('set-cookie');
var CookieParser = require('restify-cookies');
var translate = require('google-translate-api');
var server = app.listen(process.env.PORT || 3000);
var io = require('socket.io').listen(server); //pass a http.Server instance
var server = http.createServer(app);
// var path = require('path');
// var port=process.env.PORT || 3000;
app.use(CookieParser.parse);
app.use(bodyParser.json());
app.use(express.static(__dirname+'/front/dist/'));
app.use(bodyParser.urlencoded({ extended: false}));
app.use(morgan('dev'));
app.use(session({
secret: 'shhh, it\'s a secret',
resave:false,
saveUninitialized: true
}));
// Host: sql9.freesqldatabase.com
// Database name: sql9203547
// Database user: sql9203547
// Database password: hhldFiMrKp
// Port number: 3306
var connect = mysql.createConnection({
host: 'sql9.freesqldatabase.com',
user:'sql9203547',
password:'hhldFiMrKp',
database:'sql9203547'
});
// --------------------------Data base side----------------------------------------
// ---------------------create tables and connection--------------------------------
connect.connect(function () {
var userTable = 'CREATE TABLE IF NOT EXISTS users( \
id INT AUTO_INCREMENT PRIMARY KEY, \
username varchar(255) NOT NULL UNIQUE,\
password varchar(255),\
Nationallity varchar(60),\
Birthday varchar(60) ,\
status varchar(255) ,\
imag longtext,\
Location varchar(60))';
// check it tomorrow??
var commentTable = 'CREATE TABLE IF NOT EXISTS comments( \
id INT AUTO_INCREMENT PRIMARY KEY, \
comment varchar(255) ,\
username varchar(255) ,\
roomID int ,\
FOREIGN KEY (roomID) REFERENCES rooms(id))';
// FOREIGN KEY (usernmae) REFERENCES users(id) ,\
var roomTable = 'CREATE TABLE IF NOT EXISTS rooms(id INT AUTO_INCREMENT PRIMARY KEY,location varchar(60),image longtext,discribtion varchar(255),contactInfo varchar(100),userID int,userName varchar(60),FOREIGN KEY (userID) REFERENCES users(id))';
connect.query(userTable);
connect.query(commentTable);
connect.query(roomTable);
});
// -----------------Sign Up ----and ------Login------------------------------------
// ----------------------sign up----------------------------------------
app.post('/signup',function (req,res) {
var password='';
var username= req.body.username;
var Image=req.body.image;
bcrypt.hash(req.body.password,3,function (err,hash) {
password=hash;
})
var Nationallity=req.body.nationality;
var Birthday=req.body.birthday;
var location=req.body.location;
var signup = 'SELECT * FROM users WHERE username=\''+username+'\'';
connect.query(signup,function (err,checkeduser) {
if(checkeduser.length<1){// user not exist
var data = 'INSERT INTO users (username,password,Nationallity,Birthday,location,imag) VALUES (\''+username+'\',\''+password+'\',\''+Nationallity+'\',\''+Birthday+'\',\''+location +'\',\''+Image+'\')';
connect.query(data);
res.send('true');
}else{
res.send('false');
}
});
});
// ---------------------login-----------------------------------------
var users=[];
var flag='false';
var x;
app.post('/login',function(req,res){
var results;
connect.query('SELECT * FROM users WHERE username=\''+req.body.username+'\'', function (err,result) {
console.log('hhhh',result,req.body.username )
if(result[0]!==undefined){
results=result;
compare();
}else{
flag=false;
res.send(flag)
}
});
function compare() {
bcrypt.compare(req.body.password,results[0].password,function (err,match) {
if(err){
console.log(err)
}
if(match){
console.log('this user is correct')
flag = 'true';
console.log('flag now is true')
createSession(req,res,results[0]);
}else{
console.log('this user is very bad')
console.log('flag now is false in else')
flag='false';
res.send(flag)
}
})
}
var createSession = function(req, responce, newUser) {
return req.session.regenerate(function() {
//newuser>>>> { id: 2, username: 'hananmajali', password: 'hananmajali' }
bcrypt.hash(req.body.password,3,function (err,hash) {
console.log(hash)
// x={'infog':['u',username,'p',hash]}
})
req.session.user = newUser;
users.push(req.session.user.username)
// console.log('after login ',req.session.user.username)
// console.log('true from server')
// console.log('flag is ',flag);
// console.log('hhhhh',flag)
res.send(flag)
});
};
});
//--------------------logout-----------------------------------
//Logout function destroys the open session.
app.get('/logout',function (req,res) {
users.splice(users.indexOf(req.session.user.username),1)
flag = 'false';
req.session.destroy();
res.clearCookie('info');
res.send(flag);
});
app.get('/show',function(req,res){
res.send(flag)
})
//----------------create and save inside roomtable---------------
app.post('/post',function(req,res) {
console.log('in post ',req.session.user.username,req.session.user.id)
var location = req.body.location;
var discribtion = req.body.discribtion;
var contactInfo = req.body.contactInfo;
var Image = req.body.image
var post = 'INSERT INTO rooms (location,discribtion,contactInfo,userID,userName,image) VALUES (\''+location+'\',\''+discribtion+'\',\''+contactInfo+'\',\''+req.session.user.id+'\',\''+req.session.user.username+'\',\''+Image+'\')';
connect.query(post);
res.send(req.session.user.username);
});
//-----return all roomdata to the client side in the main page for all users-------
app.get('/main',function(req,res) {
var rooms = 'SELECT rooms.id,rooms.location,rooms.image,rooms.discribtion,rooms.contactInfo,rooms.userName,users.imag FROM users INNER JOIN rooms ON rooms.userID = users.id';
connect.query(rooms,function (err,result) {
res.send(result)
})
});
//-----return all roomdata to the client side in the profile page for one user-------
app.get('/profile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.session.user.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.session.user.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
//-------------------clicked on specific name to take me to that profile---------
app.post('/Userprofile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.body.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.body.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
// -----------------delete room -----------------------------------------------
app.post('/deleteroom',function(req,res){
var roomId=req.body.id // I will recieve it from client side
var deleteroom= 'DELETE FROM rooms WHERE id=\''+roomId +'\'';
connect.query(deleteroom);
})
// --------------post comment and send all the comment-------------------------
app.post('/postcomment',function(req,res){
var roomId= req.body.roomid;
var Comment=req.body.commet;
var Comment2='INSERT INTO comments (comment,username,roomID) VALUES (\''+Comment+'\',\''+req.session.user.username+'\',\''+roomId+'\')';
connect.query(Comment2);
var allcomments='SELECT comments.username,comments.comment,users.imag FROM comments INNER JOIN users ON comments.username=users.username AND comments.roomID=\''+roomId+'\' ORDER BY comments.id';
connect.query(allcomments,function(err,allcommentss){
res.send(allcommentss)
}); |
});
//---------languge-----------------------------
app.post('/translate',function(req,response){
var value=req.body;
translate(req.body.text, {from:req.body.languageFrom+'', to: req.body.languageTo+'' })
.then(res => {
console.log(res.text);
//=> I speak English
//console.log(res.from.language.iso);
//=> nl
response.send(JSON.stringify(res.text))
})
.catch(err => {
console.error(err);
});
})
//------------status of the users in their profiles------------
app.put('/status',function(req,res){
var Status='UPDATE users SET status=\''+req.body.status+'\' WHERE username=\''+req.session.user.username+'\'';
connect.query(Status);
})
app.get('/Chat', function(req,res){
console.log('hanan',req.session.user.username)
res.send(req.session.user.username)
})
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-----------------------------------
var numUsers = 0;
console.log('numUsers',numUsers);
var lans=[];
var lan;
io.on('connection', function (socket) {
console.log('connected');
var addedUser = false;
// when the client emits 'new message', this listens and executes
socket.on('new message', function (data) {
if(lans.indexOf(data.lan)===-1){
lans.push(data.lan+'')
console.log(data.lan)
}
// for(var i=0;i>lans.length;i++){
// if(data.lan+''!==lans[i]){
// data.lan=lans[i]
// }
// }
if(lans[0]===data.lan+''){
data.lan=lans[1];
}else{
data.lan=lans[0]
}
console.log('array ',lans);
console.log('lan ',data.lan);
translate(data.message, { to:data.lan})
.then(res => {
console.log('hanan',res.text)
// we tell the client to execute 'new message'
socket.broadcast.emit('new message', {
username: socket.username,
message: res.text,
lan:data.lan
});
console.log('hanan',res.text)
})
});
// when the client emits 'add user', this listens and executes
socket.on('add user', function (username) {
if (addedUser) return;
// we store the username in the socket session for this client
socket.username = username.username;
++numUsers;
addedUser = true;
socket.emit('login', {
numUsers: numUsers
});
// echo globally (all clients) that a person has connected
socket.broadcast.emit('user joined', {
username: socket.username,
numUsers: numUsers
});
});
// when the client emits 'typing', we broadcast it to others
socket.on('typing', function () {
socket.broadcast.emit('typing', {
username: socket.username
});
});
// when the client emits 'stop typing', we broadcast it to others
socket.on('stop typing', function () {
socket.broadcast.emit('stop typing', {
username: socket.username
});
});
// when the user disconnects.. perform this
socket.on('disconnect', function () {
if (addedUser) {
--numUsers;
// echo globally that this client has left
socket.broadcast.emit('user left', {
username: socket.username,
numUsers: numUsers
});
}
});
/////////////////////////////////////////////////////////////////////////////
// socket.on('stop speaking' || 'stop typing',function (data) {
// console.log('stop speaking ',data)
// translate(data.text, {from:'en', to: 'en'})
// .then(res => {
// console.log('stop skeaoking tranzlate console here is ',res.text);
// socket.emit('speaked',res.text);
// //=> I speak English
// //console.log(res.from.language.iso);
// //=> nl
// // response.send(JSON.stringify(res.text))
// })
// .catch(err => {
// console.error(err);
// });
// })
});
// -------------------------------------------------------------------------------
// app.listen(port,function(){
// }); | random_line_split | |
server.js | var mysql= require('mysql');
var express= require('express');
var http = require('http');
var app= express();
var morgan=require('morgan');
var bodyParser = require('body-parser');
var session = require('express-session');
var bcrypt=require('bcrypt');
var setCookie = require('set-cookie');
var CookieParser = require('restify-cookies');
var translate = require('google-translate-api');
var server = app.listen(process.env.PORT || 3000);
var io = require('socket.io').listen(server); //pass a http.Server instance
var server = http.createServer(app);
// var path = require('path');
// var port=process.env.PORT || 3000;
app.use(CookieParser.parse);
app.use(bodyParser.json());
app.use(express.static(__dirname+'/front/dist/'));
app.use(bodyParser.urlencoded({ extended: false}));
app.use(morgan('dev'));
app.use(session({
secret: 'shhh, it\'s a secret',
resave:false,
saveUninitialized: true
}));
// Host: sql9.freesqldatabase.com
// Database name: sql9203547
// Database user: sql9203547
// Database password: hhldFiMrKp
// Port number: 3306
var connect = mysql.createConnection({
host: 'sql9.freesqldatabase.com',
user:'sql9203547',
password:'hhldFiMrKp',
database:'sql9203547'
});
// --------------------------Data base side----------------------------------------
// ---------------------create tables and connection--------------------------------
connect.connect(function () {
var userTable = 'CREATE TABLE IF NOT EXISTS users( \
id INT AUTO_INCREMENT PRIMARY KEY, \
username varchar(255) NOT NULL UNIQUE,\
password varchar(255),\
Nationallity varchar(60),\
Birthday varchar(60) ,\
status varchar(255) ,\
imag longtext,\
Location varchar(60))';
// check it tomorrow??
var commentTable = 'CREATE TABLE IF NOT EXISTS comments( \
id INT AUTO_INCREMENT PRIMARY KEY, \
comment varchar(255) ,\
username varchar(255) ,\
roomID int ,\
FOREIGN KEY (roomID) REFERENCES rooms(id))';
// FOREIGN KEY (usernmae) REFERENCES users(id) ,\
var roomTable = 'CREATE TABLE IF NOT EXISTS rooms(id INT AUTO_INCREMENT PRIMARY KEY,location varchar(60),image longtext,discribtion varchar(255),contactInfo varchar(100),userID int,userName varchar(60),FOREIGN KEY (userID) REFERENCES users(id))';
connect.query(userTable);
connect.query(commentTable);
connect.query(roomTable);
});
// -----------------Sign Up ----and ------Login------------------------------------
// ----------------------sign up----------------------------------------
app.post('/signup',function (req,res) {
var password='';
var username= req.body.username;
var Image=req.body.image;
bcrypt.hash(req.body.password,3,function (err,hash) {
password=hash;
})
var Nationallity=req.body.nationality;
var Birthday=req.body.birthday;
var location=req.body.location;
var signup = 'SELECT * FROM users WHERE username=\''+username+'\'';
connect.query(signup,function (err,checkeduser) {
if(checkeduser.length<1){// user not exist
var data = 'INSERT INTO users (username,password,Nationallity,Birthday,location,imag) VALUES (\''+username+'\',\''+password+'\',\''+Nationallity+'\',\''+Birthday+'\',\''+location +'\',\''+Image+'\')';
connect.query(data);
res.send('true');
}else{
res.send('false');
}
});
});
// ---------------------login-----------------------------------------
var users=[];
var flag='false';
var x;
app.post('/login',function(req,res){
var results;
connect.query('SELECT * FROM users WHERE username=\''+req.body.username+'\'', function (err,result) {
console.log('hhhh',result,req.body.username )
if(result[0]!==undefined){
results=result;
compare();
}else{
flag=false;
res.send(flag)
}
});
function compare() {
bcrypt.compare(req.body.password,results[0].password,function (err,match) {
if(err){
console.log(err)
}
if(match){
console.log('this user is correct')
flag = 'true';
console.log('flag now is true')
createSession(req,res,results[0]);
}else{
console.log('this user is very bad')
console.log('flag now is false in else')
flag='false';
res.send(flag)
}
})
}
var createSession = function(req, responce, newUser) {
return req.session.regenerate(function() {
//newuser>>>> { id: 2, username: 'hananmajali', password: 'hananmajali' }
bcrypt.hash(req.body.password,3,function (err,hash) {
console.log(hash)
// x={'infog':['u',username,'p',hash]}
})
req.session.user = newUser;
users.push(req.session.user.username)
// console.log('after login ',req.session.user.username)
// console.log('true from server')
// console.log('flag is ',flag);
// console.log('hhhhh',flag)
res.send(flag)
});
};
});
//--------------------logout-----------------------------------
//Logout function destroys the open session.
app.get('/logout',function (req,res) {
users.splice(users.indexOf(req.session.user.username),1)
flag = 'false';
req.session.destroy();
res.clearCookie('info');
res.send(flag);
});
app.get('/show',function(req,res){
res.send(flag)
})
//----------------create and save inside roomtable---------------
app.post('/post',function(req,res) {
console.log('in post ',req.session.user.username,req.session.user.id)
var location = req.body.location;
var discribtion = req.body.discribtion;
var contactInfo = req.body.contactInfo;
var Image = req.body.image
var post = 'INSERT INTO rooms (location,discribtion,contactInfo,userID,userName,image) VALUES (\''+location+'\',\''+discribtion+'\',\''+contactInfo+'\',\''+req.session.user.id+'\',\''+req.session.user.username+'\',\''+Image+'\')';
connect.query(post);
res.send(req.session.user.username);
});
//-----return all roomdata to the client side in the main page for all users-------
app.get('/main',function(req,res) {
var rooms = 'SELECT rooms.id,rooms.location,rooms.image,rooms.discribtion,rooms.contactInfo,rooms.userName,users.imag FROM users INNER JOIN rooms ON rooms.userID = users.id';
connect.query(rooms,function (err,result) {
res.send(result)
})
});
//-----return all roomdata to the client side in the profile page for one user-------
app.get('/profile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.session.user.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.session.user.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
//-------------------clicked on specific name to take me to that profile---------
app.post('/Userprofile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.body.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.body.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
// -----------------delete room -----------------------------------------------
app.post('/deleteroom',function(req,res){
var roomId=req.body.id // I will recieve it from client side
var deleteroom= 'DELETE FROM rooms WHERE id=\''+roomId +'\'';
connect.query(deleteroom);
})
// --------------post comment and send all the comment-------------------------
app.post('/postcomment',function(req,res){
var roomId= req.body.roomid;
var Comment=req.body.commet;
var Comment2='INSERT INTO comments (comment,username,roomID) VALUES (\''+Comment+'\',\''+req.session.user.username+'\',\''+roomId+'\')';
connect.query(Comment2);
var allcomments='SELECT comments.username,comments.comment,users.imag FROM comments INNER JOIN users ON comments.username=users.username AND comments.roomID=\''+roomId+'\' ORDER BY comments.id';
connect.query(allcomments,function(err,allcommentss){
res.send(allcommentss)
});
});
//---------languge-----------------------------
app.post('/translate',function(req,response){
var value=req.body;
translate(req.body.text, {from:req.body.languageFrom+'', to: req.body.languageTo+'' })
.then(res => {
console.log(res.text);
//=> I speak English
//console.log(res.from.language.iso);
//=> nl
response.send(JSON.stringify(res.text))
})
.catch(err => {
console.error(err);
});
})
//------------status of the users in their profiles------------
app.put('/status',function(req,res){
var Status='UPDATE users SET status=\''+req.body.status+'\' WHERE username=\''+req.session.user.username+'\'';
connect.query(Status);
})
app.get('/Chat', function(req,res){
console.log('hanan',req.session.user.username)
res.send(req.session.user.username)
})
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-----------------------------------
var numUsers = 0;
console.log('numUsers',numUsers);
var lans=[];
var lan;
io.on('connection', function (socket) {
console.log('connected');
var addedUser = false;
// when the client emits 'new message', this listens and executes
socket.on('new message', function (data) {
if(lans.indexOf(data.lan)===-1){
lans.push(data.lan+'')
console.log(data.lan)
}
// for(var i=0;i>lans.length;i++){
// if(data.lan+''!==lans[i]){
// data.lan=lans[i]
// }
// }
if(lans[0]===data.lan+''){
data.lan=lans[1];
}else{
data.lan=lans[0]
}
console.log('array ',lans);
console.log('lan ',data.lan);
translate(data.message, { to:data.lan})
.then(res => {
console.log('hanan',res.text)
// we tell the client to execute 'new message'
socket.broadcast.emit('new message', {
username: socket.username,
message: res.text,
lan:data.lan
});
console.log('hanan',res.text)
})
});
// when the client emits 'add user', this listens and executes
socket.on('add user', function (username) {
if (addedUser) return;
// we store the username in the socket session for this client
socket.username = username.username;
++numUsers;
addedUser = true;
socket.emit('login', {
numUsers: numUsers
});
// echo globally (all clients) that a person has connected
socket.broadcast.emit('user joined', {
username: socket.username,
numUsers: numUsers
});
});
// when the client emits 'typing', we broadcast it to others
socket.on('typing', function () {
socket.broadcast.emit('typing', {
username: socket.username
});
});
// when the client emits 'stop typing', we broadcast it to others
socket.on('stop typing', function () {
socket.broadcast.emit('stop typing', {
username: socket.username
});
});
// when the user disconnects.. perform this
socket.on('disconnect', function () {
if (addedUser) |
});
/////////////////////////////////////////////////////////////////////////////
// socket.on('stop speaking' || 'stop typing',function (data) {
// console.log('stop speaking ',data)
// translate(data.text, {from:'en', to: 'en'})
// .then(res => {
// console.log('stop skeaoking tranzlate console here is ',res.text);
// socket.emit('speaked',res.text);
// //=> I speak English
// //console.log(res.from.language.iso);
// //=> nl
// // response.send(JSON.stringify(res.text))
// })
// .catch(err => {
// console.error(err);
// });
// })
});
// -------------------------------------------------------------------------------
// app.listen(port,function(){
// });
| {
--numUsers;
// echo globally that this client has left
socket.broadcast.emit('user left', {
username: socket.username,
numUsers: numUsers
});
} | conditional_block |
server.js | var mysql= require('mysql');
var express= require('express');
var http = require('http');
var app= express();
var morgan=require('morgan');
var bodyParser = require('body-parser');
var session = require('express-session');
var bcrypt=require('bcrypt');
var setCookie = require('set-cookie');
var CookieParser = require('restify-cookies');
var translate = require('google-translate-api');
var server = app.listen(process.env.PORT || 3000);
var io = require('socket.io').listen(server); //pass a http.Server instance
var server = http.createServer(app);
// var path = require('path');
// var port=process.env.PORT || 3000;
app.use(CookieParser.parse);
app.use(bodyParser.json());
app.use(express.static(__dirname+'/front/dist/'));
app.use(bodyParser.urlencoded({ extended: false}));
app.use(morgan('dev'));
app.use(session({
secret: 'shhh, it\'s a secret',
resave:false,
saveUninitialized: true
}));
// Host: sql9.freesqldatabase.com
// Database name: sql9203547
// Database user: sql9203547
// Database password: hhldFiMrKp
// Port number: 3306
var connect = mysql.createConnection({
host: 'sql9.freesqldatabase.com',
user:'sql9203547',
password:'hhldFiMrKp',
database:'sql9203547'
});
// --------------------------Data base side----------------------------------------
// ---------------------create tables and connection--------------------------------
connect.connect(function () {
var userTable = 'CREATE TABLE IF NOT EXISTS users( \
id INT AUTO_INCREMENT PRIMARY KEY, \
username varchar(255) NOT NULL UNIQUE,\
password varchar(255),\
Nationallity varchar(60),\
Birthday varchar(60) ,\
status varchar(255) ,\
imag longtext,\
Location varchar(60))';
// check it tomorrow??
var commentTable = 'CREATE TABLE IF NOT EXISTS comments( \
id INT AUTO_INCREMENT PRIMARY KEY, \
comment varchar(255) ,\
username varchar(255) ,\
roomID int ,\
FOREIGN KEY (roomID) REFERENCES rooms(id))';
// FOREIGN KEY (usernmae) REFERENCES users(id) ,\
var roomTable = 'CREATE TABLE IF NOT EXISTS rooms(id INT AUTO_INCREMENT PRIMARY KEY,location varchar(60),image longtext,discribtion varchar(255),contactInfo varchar(100),userID int,userName varchar(60),FOREIGN KEY (userID) REFERENCES users(id))';
connect.query(userTable);
connect.query(commentTable);
connect.query(roomTable);
});
// -----------------Sign Up ----and ------Login------------------------------------
// ----------------------sign up----------------------------------------
app.post('/signup',function (req,res) {
var password='';
var username= req.body.username;
var Image=req.body.image;
bcrypt.hash(req.body.password,3,function (err,hash) {
password=hash;
})
var Nationallity=req.body.nationality;
var Birthday=req.body.birthday;
var location=req.body.location;
var signup = 'SELECT * FROM users WHERE username=\''+username+'\'';
connect.query(signup,function (err,checkeduser) {
if(checkeduser.length<1){// user not exist
var data = 'INSERT INTO users (username,password,Nationallity,Birthday,location,imag) VALUES (\''+username+'\',\''+password+'\',\''+Nationallity+'\',\''+Birthday+'\',\''+location +'\',\''+Image+'\')';
connect.query(data);
res.send('true');
}else{
res.send('false');
}
});
});
// ---------------------login-----------------------------------------
var users=[];
var flag='false';
var x;
app.post('/login',function(req,res){
var results;
connect.query('SELECT * FROM users WHERE username=\''+req.body.username+'\'', function (err,result) {
console.log('hhhh',result,req.body.username )
if(result[0]!==undefined){
results=result;
compare();
}else{
flag=false;
res.send(flag)
}
});
function | () {
bcrypt.compare(req.body.password,results[0].password,function (err,match) {
if(err){
console.log(err)
}
if(match){
console.log('this user is correct')
flag = 'true';
console.log('flag now is true')
createSession(req,res,results[0]);
}else{
console.log('this user is very bad')
console.log('flag now is false in else')
flag='false';
res.send(flag)
}
})
}
var createSession = function(req, responce, newUser) {
return req.session.regenerate(function() {
//newuser>>>> { id: 2, username: 'hananmajali', password: 'hananmajali' }
bcrypt.hash(req.body.password,3,function (err,hash) {
console.log(hash)
// x={'infog':['u',username,'p',hash]}
})
req.session.user = newUser;
users.push(req.session.user.username)
// console.log('after login ',req.session.user.username)
// console.log('true from server')
// console.log('flag is ',flag);
// console.log('hhhhh',flag)
res.send(flag)
});
};
});
//--------------------logout-----------------------------------
//Logout function destroys the open session.
app.get('/logout',function (req,res) {
users.splice(users.indexOf(req.session.user.username),1)
flag = 'false';
req.session.destroy();
res.clearCookie('info');
res.send(flag);
});
app.get('/show',function(req,res){
res.send(flag)
})
//----------------create and save inside roomtable---------------
app.post('/post',function(req,res) {
console.log('in post ',req.session.user.username,req.session.user.id)
var location = req.body.location;
var discribtion = req.body.discribtion;
var contactInfo = req.body.contactInfo;
var Image = req.body.image
var post = 'INSERT INTO rooms (location,discribtion,contactInfo,userID,userName,image) VALUES (\''+location+'\',\''+discribtion+'\',\''+contactInfo+'\',\''+req.session.user.id+'\',\''+req.session.user.username+'\',\''+Image+'\')';
connect.query(post);
res.send(req.session.user.username);
});
//-----return all roomdata to the client side in the main page for all users-------
app.get('/main',function(req,res) {
var rooms = 'SELECT rooms.id,rooms.location,rooms.image,rooms.discribtion,rooms.contactInfo,rooms.userName,users.imag FROM users INNER JOIN rooms ON rooms.userID = users.id';
connect.query(rooms,function (err,result) {
res.send(result)
})
});
//-----return all roomdata to the client side in the profile page for one user-------
app.get('/profile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.session.user.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.session.user.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
//-------------------clicked on specific name to take me to that profile---------
app.post('/Userprofile',function(req,res) {
var userroom = 'SELECT * FROM rooms WHERE userName=\''+req.body.username+'\'';
var userinfo= 'SELECT * FROM users WHERE userName=\''+req.body.username+'\'';
var userinformation1;
connect.query(userinfo,function(err,userinfomation){
userinfomation1=userinfomation
})
connect.query(userroom,function (err,info) {
var total=[];
var str = info
total.push(str);
total.push(userinfomation1)
res.send(total);
});
});
// -----------------delete room -----------------------------------------------
app.post('/deleteroom',function(req,res){
var roomId=req.body.id // I will recieve it from client side
var deleteroom= 'DELETE FROM rooms WHERE id=\''+roomId +'\'';
connect.query(deleteroom);
})
// --------------post comment and send all the comment-------------------------
app.post('/postcomment',function(req,res){
var roomId= req.body.roomid;
var Comment=req.body.commet;
var Comment2='INSERT INTO comments (comment,username,roomID) VALUES (\''+Comment+'\',\''+req.session.user.username+'\',\''+roomId+'\')';
connect.query(Comment2);
var allcomments='SELECT comments.username,comments.comment,users.imag FROM comments INNER JOIN users ON comments.username=users.username AND comments.roomID=\''+roomId+'\' ORDER BY comments.id';
connect.query(allcomments,function(err,allcommentss){
res.send(allcommentss)
});
});
//---------languge-----------------------------
app.post('/translate',function(req,response){
var value=req.body;
translate(req.body.text, {from:req.body.languageFrom+'', to: req.body.languageTo+'' })
.then(res => {
console.log(res.text);
//=> I speak English
//console.log(res.from.language.iso);
//=> nl
response.send(JSON.stringify(res.text))
})
.catch(err => {
console.error(err);
});
})
//------------status of the users in their profiles------------
app.put('/status',function(req,res){
var Status='UPDATE users SET status=\''+req.body.status+'\' WHERE username=\''+req.session.user.username+'\'';
connect.query(Status);
})
app.get('/Chat', function(req,res){
console.log('hanan',req.session.user.username)
res.send(req.session.user.username)
})
//_-_-_-_-_-_-_-_-_-_-_-_-_-_-----------------------------------
var numUsers = 0;
console.log('numUsers',numUsers);
var lans=[];
var lan;
io.on('connection', function (socket) {
console.log('connected');
var addedUser = false;
// when the client emits 'new message', this listens and executes
socket.on('new message', function (data) {
if(lans.indexOf(data.lan)===-1){
lans.push(data.lan+'')
console.log(data.lan)
}
// for(var i=0;i>lans.length;i++){
// if(data.lan+''!==lans[i]){
// data.lan=lans[i]
// }
// }
if(lans[0]===data.lan+''){
data.lan=lans[1];
}else{
data.lan=lans[0]
}
console.log('array ',lans);
console.log('lan ',data.lan);
translate(data.message, { to:data.lan})
.then(res => {
console.log('hanan',res.text)
// we tell the client to execute 'new message'
socket.broadcast.emit('new message', {
username: socket.username,
message: res.text,
lan:data.lan
});
console.log('hanan',res.text)
})
});
// when the client emits 'add user', this listens and executes
socket.on('add user', function (username) {
if (addedUser) return;
// we store the username in the socket session for this client
socket.username = username.username;
++numUsers;
addedUser = true;
socket.emit('login', {
numUsers: numUsers
});
// echo globally (all clients) that a person has connected
socket.broadcast.emit('user joined', {
username: socket.username,
numUsers: numUsers
});
});
// when the client emits 'typing', we broadcast it to others
socket.on('typing', function () {
socket.broadcast.emit('typing', {
username: socket.username
});
});
// when the client emits 'stop typing', we broadcast it to others
socket.on('stop typing', function () {
socket.broadcast.emit('stop typing', {
username: socket.username
});
});
// when the user disconnects.. perform this
socket.on('disconnect', function () {
if (addedUser) {
--numUsers;
// echo globally that this client has left
socket.broadcast.emit('user left', {
username: socket.username,
numUsers: numUsers
});
}
});
/////////////////////////////////////////////////////////////////////////////
// socket.on('stop speaking' || 'stop typing',function (data) {
// console.log('stop speaking ',data)
// translate(data.text, {from:'en', to: 'en'})
// .then(res => {
// console.log('stop skeaoking tranzlate console here is ',res.text);
// socket.emit('speaked',res.text);
// //=> I speak English
// //console.log(res.from.language.iso);
// //=> nl
// // response.send(JSON.stringify(res.text))
// })
// .catch(err => {
// console.error(err);
// });
// })
});
// -------------------------------------------------------------------------------
// app.listen(port,function(){
// });
| compare | identifier_name |
msc_chart.py | from datetime import datetime, timedelta
from enum import Enum
from time import sleep
from typing import Dict, List, Optional, Tuple
from github import Github
from github.GithubException import RateLimitExceededException
from github.Issue import Issue
from github.IssueEvent import IssueEvent
from github.Label import Label
from plotly import graph_objects as go
from progress.bar import Bar
class ChartType(Enum):
PIE = 1
STACKED_AREA = 2
class MSCState(Enum):
NEW = 1
FCP = 2
MERGED = 3
POSTPONED = 4
CLOSED = 5
class MSCChart(object):
"""A chart representing Matrix Spec Changes
Args:
pygithub: A pygithub Github object for the library to use. If not set, github_token
must be set instead
github_token: A github auth token to perform API queries with
print_progress: Whether to print progress of chart generation to stdout
"""
def __init__(
self,
pygithub: Optional[Github] = None,
github_token: Optional[str] = None,
print_progress: bool = True,
):
self.print_progress = print_progress
if pygithub: | elif github_token:
g = Github(github_token)
else:
raise Exception(
"Either pygithub or github_token must be set when initializing MSCChart"
)
# Create a Github instance. The token only needs read:public_repo
self.repository = g.get_repo("matrix-org/matrix-doc")
def generate(self, type: ChartType, filepath: str):
"""Generate the chart
Args:
type: The type of chart to generate
filepath: Where to place the generated chart
"""
# Choose which chart type to generate
if type == ChartType.PIE:
self._generate_msc_pie_chart(filepath)
elif type == ChartType.STACKED_AREA:
self._generate_stacked_area_chart(filepath)
def _generate_stacked_area_chart(self, filepath: str):
"""Generates a historical stacked area chart of msc status"""
# Get time of the earliest issue
mscs = list(
self.repository.get_issues(
sort="created", state="all", direction="asc", labels=["proposal"],
)
)
# There are some MSCs that date all the way back to 2014. These skew the chart a bit,
# so lop those off
outlier_threshold = datetime.fromisoformat("2018-04-29T00:00:00")
# Generate list of weeks since the first msc
weeks = []
t = mscs[0].created_at
while t < datetime.now():
if t > outlier_threshold:
# Add t to our list of weeks
weeks.append(t)
# Move forward by three weeks
t = t + timedelta(weeks=1)
# And calculate it for today
weeks.append(datetime.now())
# Extract MSC event data beforehand so we don't do so again every week
msc_events = []
bar = Bar("Grabbing list of events for each MSC...", max=len(mscs))
for msc in mscs:
# TODO: We could theoretically optimize this by saving a list of events per
# MSC in a DB between runs. If the count of events for a given MSC number
# hasn't changed, then don't update the events
# This would prevent us from needing to fetch the label for each event
# Also try the GraphQL API
# Loop until we succeeded in getting the events for this MSC
while True:
try:
# Pre-request the event labels. This apparently takes another API call
event_label_tuples = []
for event in msc.get_events():
event_label_tuples.append(
(event, event.label if event.event == "labeled" else None)
)
# Events retrieved, break out of the inner loop
msc_events.append(event_label_tuples)
break
except RateLimitExceededException:
# Wait a bit and retry
if self.print_progress:
print("\nHit Ratelimit. Waiting 1 minute...")
sleep(60)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
if self.print_progress:
print("Got", sum((len(events) for events in msc_events)), "total events")
# Get the count of each MSC type at a given week
new_mscs = []
fcp_mscs = []
closed_mscs = []
merged_mscs = []
bar = Bar("Processing MSC state snapshots...", max=len(weeks))
for week in weeks:
new_msc_count = 0
fcp_msc_count = 0
closed_msc_count = 0
merged_msc_count = 0
for index, msc in enumerate(mscs):
msc_state = self._get_msc_state_at_time(msc, msc_events[index], week)
if msc_state == MSCState.NEW:
new_msc_count += 1
elif msc_state == MSCState.FCP:
fcp_msc_count += 1
elif msc_state == MSCState.CLOSED:
closed_msc_count += 1
elif msc_state == MSCState.MERGED:
merged_msc_count += 1
# Note down all counts for this week
new_mscs.append(new_msc_count)
fcp_mscs.append(fcp_msc_count)
closed_mscs.append(closed_msc_count)
merged_mscs.append(merged_msc_count)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
str_weeks = [dt.strftime("%d-%m-%Y") for dt in weeks]
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=str_weeks,
y=merged_mscs,
hoverinfo="x+y",
mode="lines",
name="Merged",
line=dict(width=0.5, color="#6f42c1"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=closed_mscs,
hoverinfo="x+y",
mode="lines",
name="Closed",
line=dict(width=0.5, color="#ce303d"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=fcp_mscs,
hoverinfo="x+y",
mode="lines",
name="FCP",
line=dict(width=0.5, color="yellow"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=new_mscs,
hoverinfo="x+y",
mode="lines",
name="New",
line=dict(width=0.5, color="#28a745"),
stackgroup="one",
)
)
# Add a nice title
fig.update_layout(
title={
"text": "Matrix Spec Change Proposals",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
font=dict(family="Arial", size=18, color="#222222",),
)
fig.write_image(filepath)
def _get_msc_state_at_time(
self,
msc: Issue,
msc_events: List[Tuple[IssueEvent, Optional[Label]]],
dt: datetime,
) -> MSCState:
"""Given a datetime, get the state of an MSC at that time
Args:
msc: The MSC to target,
msc_events: A cached List of github issue events to process, as well as cached label
information if the issue event relates to labels. We cache all this information
up front as pass it is as otherwise we'd have to do separate API requests for
each of them.
dt: The threshold at which to stop processing issue events, thus giving you the
state of an MSC at this given time.
"""
# Iterate through MSC events and calculate the current state of the issue at a given
# time
# Initially assume it doesn't exist. Change the state as we iterate through events
state = {
"prev_state": None,
"state": None,
} # type: Dict[str, Optional[MSCState]]
finished_fcp = False
def update_state(new_state: MSCState):
state["prev_state"] = state["state"]
state["state"] = new_state
disposition_state = None
is_closed = False
has_label_merged = False
rejected_or_abandoned = False
for event, label in msc_events:
if event.created_at > dt:
# We've reached our datetime threshold
break
# Classify the event
if label:
label_name = label.name
# This is a label event
if label_name == "proposal":
update_state(MSCState.NEW)
elif label_name == "final-comment-period":
update_state(MSCState.FCP)
elif label_name == "disposition-merge":
disposition_state = MSCState.MERGED
elif label_name == "disposition-close":
disposition_state = MSCState.CLOSED
elif label_name == "disposition-postpone":
disposition_state = MSCState.POSTPONED
# Some issues have this silly label
# i.e https://github.com/matrix-org/matrix-doc/issues/1466
elif label_name == "merged":
update_state(MSCState.MERGED)
has_label_merged = True
elif label_name == "finished-final-comment-period":
# Prevent issues which have finished FCP but associated PRs have not
# merged yet to not get stuck in FCP state forever.
# i.e https://github.com/matrix-org/matrix-doc/issues/1219
update_state(
disposition_state if disposition_state else MSCState.NEW
)
finished_fcp = True
elif label_name == "abandoned" or label_name == "rejected":
update_state(MSCState.CLOSED)
elif event.event == "reopened":
# TODO: What does mscbot-python do in this case? New or previous state?
update_state(state["prev_state"])
is_closed = False
elif event.event == "closed":
# The MSC was closed
if msc.pull_request:
if state != MSCState.MERGED:
update_state(MSCState.CLOSED)
# Issues that are closed count as closed MSCs
else:
if has_label_merged:
update_state(MSCState.MERGED)
else:
update_state(MSCState.CLOSED)
elif event.event == "merged":
# The MSC was merged
if finished_fcp:
update_state(MSCState.MERGED)
if is_closed and rejected_or_abandoned:
update_state(MSCState.CLOSED)
return state["state"]
def _generate_msc_pie_chart(self, filepath: str):
# Get total number of {closed, open, merged, postponed, fcp} MSCs
fcp_mscs = self.repository.get_issues(
state="open", labels=["proposal", "final-comment-period"],
).totalCount
open_mscs = (
self.repository.get_issues(state="open", labels=["proposal"]).totalCount
- fcp_mscs
)
closed_mscs = self.repository.get_issues(
state="closed", labels=["proposal", "rejected"],
).totalCount
postponed_mscs = self.repository.get_issues(
state="open",
labels=[
"proposal",
"finished-final-comment-period",
"disposition-postpone",
],
).totalCount
merged_mscs = (
self.repository.get_issues(state="closed", labels=["proposal"],).totalCount
- closed_mscs
- postponed_mscs
)
# Create the pie chart
labels = ["Open", "Merged", "Closed", "FCP", "Postponed"]
colors = ["#28a745", "#6f42c1", "#ce303d", "yellow", "grey"]
values = [open_mscs, merged_mscs, closed_mscs, fcp_mscs, postponed_mscs]
# Add the respective count to each label
for idx, label in enumerate(labels):
labels[idx] = f"{label} ({values[idx]})"
fig = go.Figure(
data=[
go.Pie(
labels=labels,
values=values,
sort=False, # Use order of lists above instead of sorting by size
)
],
)
# Make a nice title
fig.update_layout(
title={
"text": "Matrix Spec Change Proposals",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
font=dict(family="Arial", size=18, color="#222222",),
)
fig.update_traces(
hoverinfo="label+percent",
textinfo="value",
textfont_size=20,
marker=dict(colors=colors, line=dict(color="#000000", width=2)),
)
fig.write_image(filepath) | g = pygithub | random_line_split |
msc_chart.py | from datetime import datetime, timedelta
from enum import Enum
from time import sleep
from typing import Dict, List, Optional, Tuple
from github import Github
from github.GithubException import RateLimitExceededException
from github.Issue import Issue
from github.IssueEvent import IssueEvent
from github.Label import Label
from plotly import graph_objects as go
from progress.bar import Bar
class ChartType(Enum):
PIE = 1
STACKED_AREA = 2
class MSCState(Enum):
NEW = 1
FCP = 2
MERGED = 3
POSTPONED = 4
CLOSED = 5
class MSCChart(object):
"""A chart representing Matrix Spec Changes
Args:
pygithub: A pygithub Github object for the library to use. If not set, github_token
must be set instead
github_token: A github auth token to perform API queries with
print_progress: Whether to print progress of chart generation to stdout
"""
def __init__(
self,
pygithub: Optional[Github] = None,
github_token: Optional[str] = None,
print_progress: bool = True,
):
self.print_progress = print_progress
if pygithub:
g = pygithub
elif github_token:
g = Github(github_token)
else:
raise Exception(
"Either pygithub or github_token must be set when initializing MSCChart"
)
# Create a Github instance. The token only needs read:public_repo
self.repository = g.get_repo("matrix-org/matrix-doc")
def generate(self, type: ChartType, filepath: str):
"""Generate the chart
Args:
type: The type of chart to generate
filepath: Where to place the generated chart
"""
# Choose which chart type to generate
if type == ChartType.PIE:
self._generate_msc_pie_chart(filepath)
elif type == ChartType.STACKED_AREA:
self._generate_stacked_area_chart(filepath)
def _generate_stacked_area_chart(self, filepath: str):
"""Generates a historical stacked area chart of msc status"""
# Get time of the earliest issue
mscs = list(
self.repository.get_issues(
sort="created", state="all", direction="asc", labels=["proposal"],
)
)
# There are some MSCs that date all the way back to 2014. These skew the chart a bit,
# so lop those off
outlier_threshold = datetime.fromisoformat("2018-04-29T00:00:00")
# Generate list of weeks since the first msc
weeks = []
t = mscs[0].created_at
while t < datetime.now():
if t > outlier_threshold:
# Add t to our list of weeks
weeks.append(t)
# Move forward by three weeks
t = t + timedelta(weeks=1)
# And calculate it for today
weeks.append(datetime.now())
# Extract MSC event data beforehand so we don't do so again every week
msc_events = []
bar = Bar("Grabbing list of events for each MSC...", max=len(mscs))
for msc in mscs:
# TODO: We could theoretically optimize this by saving a list of events per
# MSC in a DB between runs. If the count of events for a given MSC number
# hasn't changed, then don't update the events
# This would prevent us from needing to fetch the label for each event
# Also try the GraphQL API
# Loop until we succeeded in getting the events for this MSC
while True:
try:
# Pre-request the event labels. This apparently takes another API call
event_label_tuples = []
for event in msc.get_events():
event_label_tuples.append(
(event, event.label if event.event == "labeled" else None)
)
# Events retrieved, break out of the inner loop
msc_events.append(event_label_tuples)
break
except RateLimitExceededException:
# Wait a bit and retry
if self.print_progress:
print("\nHit Ratelimit. Waiting 1 minute...")
sleep(60)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
if self.print_progress:
print("Got", sum((len(events) for events in msc_events)), "total events")
# Get the count of each MSC type at a given week
new_mscs = []
fcp_mscs = []
closed_mscs = []
merged_mscs = []
bar = Bar("Processing MSC state snapshots...", max=len(weeks))
for week in weeks:
new_msc_count = 0
fcp_msc_count = 0
closed_msc_count = 0
merged_msc_count = 0
for index, msc in enumerate(mscs):
msc_state = self._get_msc_state_at_time(msc, msc_events[index], week)
if msc_state == MSCState.NEW:
new_msc_count += 1
elif msc_state == MSCState.FCP:
fcp_msc_count += 1
elif msc_state == MSCState.CLOSED:
closed_msc_count += 1
elif msc_state == MSCState.MERGED:
merged_msc_count += 1
# Note down all counts for this week
new_mscs.append(new_msc_count)
fcp_mscs.append(fcp_msc_count)
closed_mscs.append(closed_msc_count)
merged_mscs.append(merged_msc_count)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
str_weeks = [dt.strftime("%d-%m-%Y") for dt in weeks]
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=str_weeks,
y=merged_mscs,
hoverinfo="x+y",
mode="lines",
name="Merged",
line=dict(width=0.5, color="#6f42c1"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=closed_mscs,
hoverinfo="x+y",
mode="lines",
name="Closed",
line=dict(width=0.5, color="#ce303d"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=fcp_mscs,
hoverinfo="x+y",
mode="lines",
name="FCP",
line=dict(width=0.5, color="yellow"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=new_mscs,
hoverinfo="x+y",
mode="lines",
name="New",
line=dict(width=0.5, color="#28a745"),
stackgroup="one",
)
)
# Add a nice title
fig.update_layout(
title={
"text": "Matrix Spec Change Proposals",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
font=dict(family="Arial", size=18, color="#222222",),
)
fig.write_image(filepath)
def | (
self,
msc: Issue,
msc_events: List[Tuple[IssueEvent, Optional[Label]]],
dt: datetime,
) -> MSCState:
"""Given a datetime, get the state of an MSC at that time
Args:
msc: The MSC to target,
msc_events: A cached List of github issue events to process, as well as cached label
information if the issue event relates to labels. We cache all this information
up front as pass it is as otherwise we'd have to do separate API requests for
each of them.
dt: The threshold at which to stop processing issue events, thus giving you the
state of an MSC at this given time.
"""
# Iterate through MSC events and calculate the current state of the issue at a given
# time
# Initially assume it doesn't exist. Change the state as we iterate through events
state = {
"prev_state": None,
"state": None,
} # type: Dict[str, Optional[MSCState]]
finished_fcp = False
def update_state(new_state: MSCState):
state["prev_state"] = state["state"]
state["state"] = new_state
disposition_state = None
is_closed = False
has_label_merged = False
rejected_or_abandoned = False
for event, label in msc_events:
if event.created_at > dt:
# We've reached our datetime threshold
break
# Classify the event
if label:
label_name = label.name
# This is a label event
if label_name == "proposal":
update_state(MSCState.NEW)
elif label_name == "final-comment-period":
update_state(MSCState.FCP)
elif label_name == "disposition-merge":
disposition_state = MSCState.MERGED
elif label_name == "disposition-close":
disposition_state = MSCState.CLOSED
elif label_name == "disposition-postpone":
disposition_state = MSCState.POSTPONED
# Some issues have this silly label
# i.e https://github.com/matrix-org/matrix-doc/issues/1466
elif label_name == "merged":
update_state(MSCState.MERGED)
has_label_merged = True
elif label_name == "finished-final-comment-period":
# Prevent issues which have finished FCP but associated PRs have not
# merged yet to not get stuck in FCP state forever.
# i.e https://github.com/matrix-org/matrix-doc/issues/1219
update_state(
disposition_state if disposition_state else MSCState.NEW
)
finished_fcp = True
elif label_name == "abandoned" or label_name == "rejected":
update_state(MSCState.CLOSED)
elif event.event == "reopened":
# TODO: What does mscbot-python do in this case? New or previous state?
update_state(state["prev_state"])
is_closed = False
elif event.event == "closed":
# The MSC was closed
if msc.pull_request:
if state != MSCState.MERGED:
update_state(MSCState.CLOSED)
# Issues that are closed count as closed MSCs
else:
if has_label_merged:
update_state(MSCState.MERGED)
else:
update_state(MSCState.CLOSED)
elif event.event == "merged":
# The MSC was merged
if finished_fcp:
update_state(MSCState.MERGED)
if is_closed and rejected_or_abandoned:
update_state(MSCState.CLOSED)
return state["state"]
def _generate_msc_pie_chart(self, filepath: str):
# Get total number of {closed, open, merged, postponed, fcp} MSCs
fcp_mscs = self.repository.get_issues(
state="open", labels=["proposal", "final-comment-period"],
).totalCount
open_mscs = (
self.repository.get_issues(state="open", labels=["proposal"]).totalCount
- fcp_mscs
)
closed_mscs = self.repository.get_issues(
state="closed", labels=["proposal", "rejected"],
).totalCount
postponed_mscs = self.repository.get_issues(
state="open",
labels=[
"proposal",
"finished-final-comment-period",
"disposition-postpone",
],
).totalCount
merged_mscs = (
self.repository.get_issues(state="closed", labels=["proposal"],).totalCount
- closed_mscs
- postponed_mscs
)
# Create the pie chart
labels = ["Open", "Merged", "Closed", "FCP", "Postponed"]
colors = ["#28a745", "#6f42c1", "#ce303d", "yellow", "grey"]
values = [open_mscs, merged_mscs, closed_mscs, fcp_mscs, postponed_mscs]
# Add the respective count to each label
for idx, label in enumerate(labels):
labels[idx] = f"{label} ({values[idx]})"
fig = go.Figure(
data=[
go.Pie(
labels=labels,
values=values,
sort=False, # Use order of lists above instead of sorting by size
)
],
)
# Make a nice title
fig.update_layout(
title={
"text": "Matrix Spec Change Proposals",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
font=dict(family="Arial", size=18, color="#222222",),
)
fig.update_traces(
hoverinfo="label+percent",
textinfo="value",
textfont_size=20,
marker=dict(colors=colors, line=dict(color="#000000", width=2)),
)
fig.write_image(filepath)
| _get_msc_state_at_time | identifier_name |
msc_chart.py | from datetime import datetime, timedelta
from enum import Enum
from time import sleep
from typing import Dict, List, Optional, Tuple
from github import Github
from github.GithubException import RateLimitExceededException
from github.Issue import Issue
from github.IssueEvent import IssueEvent
from github.Label import Label
from plotly import graph_objects as go
from progress.bar import Bar
class ChartType(Enum):
PIE = 1
STACKED_AREA = 2
class MSCState(Enum):
NEW = 1
FCP = 2
MERGED = 3
POSTPONED = 4
CLOSED = 5
class MSCChart(object):
"""A chart representing Matrix Spec Changes
Args:
pygithub: A pygithub Github object for the library to use. If not set, github_token
must be set instead
github_token: A github auth token to perform API queries with
print_progress: Whether to print progress of chart generation to stdout
"""
def __init__(
self,
pygithub: Optional[Github] = None,
github_token: Optional[str] = None,
print_progress: bool = True,
):
self.print_progress = print_progress
if pygithub:
g = pygithub
elif github_token:
g = Github(github_token)
else:
raise Exception(
"Either pygithub or github_token must be set when initializing MSCChart"
)
# Create a Github instance. The token only needs read:public_repo
self.repository = g.get_repo("matrix-org/matrix-doc")
def generate(self, type: ChartType, filepath: str):
"""Generate the chart
Args:
type: The type of chart to generate
filepath: Where to place the generated chart
"""
# Choose which chart type to generate
if type == ChartType.PIE:
self._generate_msc_pie_chart(filepath)
elif type == ChartType.STACKED_AREA:
self._generate_stacked_area_chart(filepath)
def _generate_stacked_area_chart(self, filepath: str):
"""Generates a historical stacked area chart of msc status"""
# Get time of the earliest issue
mscs = list(
self.repository.get_issues(
sort="created", state="all", direction="asc", labels=["proposal"],
)
)
# There are some MSCs that date all the way back to 2014. These skew the chart a bit,
# so lop those off
outlier_threshold = datetime.fromisoformat("2018-04-29T00:00:00")
# Generate list of weeks since the first msc
weeks = []
t = mscs[0].created_at
while t < datetime.now():
if t > outlier_threshold:
# Add t to our list of weeks
weeks.append(t)
# Move forward by three weeks
t = t + timedelta(weeks=1)
# And calculate it for today
weeks.append(datetime.now())
# Extract MSC event data beforehand so we don't do so again every week
msc_events = []
bar = Bar("Grabbing list of events for each MSC...", max=len(mscs))
for msc in mscs:
# TODO: We could theoretically optimize this by saving a list of events per
# MSC in a DB between runs. If the count of events for a given MSC number
# hasn't changed, then don't update the events
# This would prevent us from needing to fetch the label for each event
# Also try the GraphQL API
# Loop until we succeeded in getting the events for this MSC
while True:
try:
# Pre-request the event labels. This apparently takes another API call
event_label_tuples = []
for event in msc.get_events():
event_label_tuples.append(
(event, event.label if event.event == "labeled" else None)
)
# Events retrieved, break out of the inner loop
msc_events.append(event_label_tuples)
break
except RateLimitExceededException:
# Wait a bit and retry
if self.print_progress:
print("\nHit Ratelimit. Waiting 1 minute...")
sleep(60)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
if self.print_progress:
print("Got", sum((len(events) for events in msc_events)), "total events")
# Get the count of each MSC type at a given week
new_mscs = []
fcp_mscs = []
closed_mscs = []
merged_mscs = []
bar = Bar("Processing MSC state snapshots...", max=len(weeks))
for week in weeks:
new_msc_count = 0
fcp_msc_count = 0
closed_msc_count = 0
merged_msc_count = 0
for index, msc in enumerate(mscs):
msc_state = self._get_msc_state_at_time(msc, msc_events[index], week)
if msc_state == MSCState.NEW:
new_msc_count += 1
elif msc_state == MSCState.FCP:
fcp_msc_count += 1
elif msc_state == MSCState.CLOSED:
closed_msc_count += 1
elif msc_state == MSCState.MERGED:
merged_msc_count += 1
# Note down all counts for this week
new_mscs.append(new_msc_count)
fcp_mscs.append(fcp_msc_count)
closed_mscs.append(closed_msc_count)
merged_mscs.append(merged_msc_count)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
str_weeks = [dt.strftime("%d-%m-%Y") for dt in weeks]
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=str_weeks,
y=merged_mscs,
hoverinfo="x+y",
mode="lines",
name="Merged",
line=dict(width=0.5, color="#6f42c1"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=closed_mscs,
hoverinfo="x+y",
mode="lines",
name="Closed",
line=dict(width=0.5, color="#ce303d"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=fcp_mscs,
hoverinfo="x+y",
mode="lines",
name="FCP",
line=dict(width=0.5, color="yellow"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=new_mscs,
hoverinfo="x+y",
mode="lines",
name="New",
line=dict(width=0.5, color="#28a745"),
stackgroup="one",
)
)
# Add a nice title
fig.update_layout(
title={
"text": "Matrix Spec Change Proposals",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
font=dict(family="Arial", size=18, color="#222222",),
)
fig.write_image(filepath)
def _get_msc_state_at_time(
self,
msc: Issue,
msc_events: List[Tuple[IssueEvent, Optional[Label]]],
dt: datetime,
) -> MSCState:
"""Given a datetime, get the state of an MSC at that time
Args:
msc: The MSC to target,
msc_events: A cached List of github issue events to process, as well as cached label
information if the issue event relates to labels. We cache all this information
up front as pass it is as otherwise we'd have to do separate API requests for
each of them.
dt: The threshold at which to stop processing issue events, thus giving you the
state of an MSC at this given time.
"""
# Iterate through MSC events and calculate the current state of the issue at a given
# time
# Initially assume it doesn't exist. Change the state as we iterate through events
state = {
"prev_state": None,
"state": None,
} # type: Dict[str, Optional[MSCState]]
finished_fcp = False
def update_state(new_state: MSCState):
state["prev_state"] = state["state"]
state["state"] = new_state
disposition_state = None
is_closed = False
has_label_merged = False
rejected_or_abandoned = False
for event, label in msc_events:
if event.created_at > dt:
# We've reached our datetime threshold
break
# Classify the event
if label:
label_name = label.name
# This is a label event
if label_name == "proposal":
update_state(MSCState.NEW)
elif label_name == "final-comment-period":
update_state(MSCState.FCP)
elif label_name == "disposition-merge":
disposition_state = MSCState.MERGED
elif label_name == "disposition-close":
disposition_state = MSCState.CLOSED
elif label_name == "disposition-postpone":
disposition_state = MSCState.POSTPONED
# Some issues have this silly label
# i.e https://github.com/matrix-org/matrix-doc/issues/1466
elif label_name == "merged":
update_state(MSCState.MERGED)
has_label_merged = True
elif label_name == "finished-final-comment-period":
# Prevent issues which have finished FCP but associated PRs have not
# merged yet to not get stuck in FCP state forever.
# i.e https://github.com/matrix-org/matrix-doc/issues/1219
update_state(
disposition_state if disposition_state else MSCState.NEW
)
finished_fcp = True
elif label_name == "abandoned" or label_name == "rejected":
update_state(MSCState.CLOSED)
elif event.event == "reopened":
# TODO: What does mscbot-python do in this case? New or previous state?
update_state(state["prev_state"])
is_closed = False
elif event.event == "closed":
# The MSC was closed
if msc.pull_request:
if state != MSCState.MERGED:
update_state(MSCState.CLOSED)
# Issues that are closed count as closed MSCs
else:
if has_label_merged:
update_state(MSCState.MERGED)
else:
update_state(MSCState.CLOSED)
elif event.event == "merged":
# The MSC was merged
if finished_fcp:
update_state(MSCState.MERGED)
if is_closed and rejected_or_abandoned:
update_state(MSCState.CLOSED)
return state["state"]
def _generate_msc_pie_chart(self, filepath: str):
# Get total number of {closed, open, merged, postponed, fcp} MSCs
fcp_mscs = self.repository.get_issues(
state="open", labels=["proposal", "final-comment-period"],
).totalCount
open_mscs = (
self.repository.get_issues(state="open", labels=["proposal"]).totalCount
- fcp_mscs
)
closed_mscs = self.repository.get_issues(
state="closed", labels=["proposal", "rejected"],
).totalCount
postponed_mscs = self.repository.get_issues(
state="open",
labels=[
"proposal",
"finished-final-comment-period",
"disposition-postpone",
],
).totalCount
merged_mscs = (
self.repository.get_issues(state="closed", labels=["proposal"],).totalCount
- closed_mscs
- postponed_mscs
)
# Create the pie chart
labels = ["Open", "Merged", "Closed", "FCP", "Postponed"]
colors = ["#28a745", "#6f42c1", "#ce303d", "yellow", "grey"]
values = [open_mscs, merged_mscs, closed_mscs, fcp_mscs, postponed_mscs]
# Add the respective count to each label
for idx, label in enumerate(labels):
|
fig = go.Figure(
data=[
go.Pie(
labels=labels,
values=values,
sort=False, # Use order of lists above instead of sorting by size
)
],
)
# Make a nice title
fig.update_layout(
title={
"text": "Matrix Spec Change Proposals",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
font=dict(family="Arial", size=18, color="#222222",),
)
fig.update_traces(
hoverinfo="label+percent",
textinfo="value",
textfont_size=20,
marker=dict(colors=colors, line=dict(color="#000000", width=2)),
)
fig.write_image(filepath)
| labels[idx] = f"{label} ({values[idx]})" | conditional_block |
msc_chart.py | from datetime import datetime, timedelta
from enum import Enum
from time import sleep
from typing import Dict, List, Optional, Tuple
from github import Github
from github.GithubException import RateLimitExceededException
from github.Issue import Issue
from github.IssueEvent import IssueEvent
from github.Label import Label
from plotly import graph_objects as go
from progress.bar import Bar
class ChartType(Enum):
PIE = 1
STACKED_AREA = 2
class MSCState(Enum):
NEW = 1
FCP = 2
MERGED = 3
POSTPONED = 4
CLOSED = 5
class MSCChart(object):
"""A chart representing Matrix Spec Changes
Args:
pygithub: A pygithub Github object for the library to use. If not set, github_token
must be set instead
github_token: A github auth token to perform API queries with
print_progress: Whether to print progress of chart generation to stdout
"""
def __init__(
self,
pygithub: Optional[Github] = None,
github_token: Optional[str] = None,
print_progress: bool = True,
):
self.print_progress = print_progress
if pygithub:
g = pygithub
elif github_token:
g = Github(github_token)
else:
raise Exception(
"Either pygithub or github_token must be set when initializing MSCChart"
)
# Create a Github instance. The token only needs read:public_repo
self.repository = g.get_repo("matrix-org/matrix-doc")
def generate(self, type: ChartType, filepath: str):
|
def _generate_stacked_area_chart(self, filepath: str):
"""Generates a historical stacked area chart of msc status"""
# Get time of the earliest issue
mscs = list(
self.repository.get_issues(
sort="created", state="all", direction="asc", labels=["proposal"],
)
)
# There are some MSCs that date all the way back to 2014. These skew the chart a bit,
# so lop those off
outlier_threshold = datetime.fromisoformat("2018-04-29T00:00:00")
# Generate list of weeks since the first msc
weeks = []
t = mscs[0].created_at
while t < datetime.now():
if t > outlier_threshold:
# Add t to our list of weeks
weeks.append(t)
# Move forward by three weeks
t = t + timedelta(weeks=1)
# And calculate it for today
weeks.append(datetime.now())
# Extract MSC event data beforehand so we don't do so again every week
msc_events = []
bar = Bar("Grabbing list of events for each MSC...", max=len(mscs))
for msc in mscs:
# TODO: We could theoretically optimize this by saving a list of events per
# MSC in a DB between runs. If the count of events for a given MSC number
# hasn't changed, then don't update the events
# This would prevent us from needing to fetch the label for each event
# Also try the GraphQL API
# Loop until we succeeded in getting the events for this MSC
while True:
try:
# Pre-request the event labels. This apparently takes another API call
event_label_tuples = []
for event in msc.get_events():
event_label_tuples.append(
(event, event.label if event.event == "labeled" else None)
)
# Events retrieved, break out of the inner loop
msc_events.append(event_label_tuples)
break
except RateLimitExceededException:
# Wait a bit and retry
if self.print_progress:
print("\nHit Ratelimit. Waiting 1 minute...")
sleep(60)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
if self.print_progress:
print("Got", sum((len(events) for events in msc_events)), "total events")
# Get the count of each MSC type at a given week
new_mscs = []
fcp_mscs = []
closed_mscs = []
merged_mscs = []
bar = Bar("Processing MSC state snapshots...", max=len(weeks))
for week in weeks:
new_msc_count = 0
fcp_msc_count = 0
closed_msc_count = 0
merged_msc_count = 0
for index, msc in enumerate(mscs):
msc_state = self._get_msc_state_at_time(msc, msc_events[index], week)
if msc_state == MSCState.NEW:
new_msc_count += 1
elif msc_state == MSCState.FCP:
fcp_msc_count += 1
elif msc_state == MSCState.CLOSED:
closed_msc_count += 1
elif msc_state == MSCState.MERGED:
merged_msc_count += 1
# Note down all counts for this week
new_mscs.append(new_msc_count)
fcp_mscs.append(fcp_msc_count)
closed_mscs.append(closed_msc_count)
merged_mscs.append(merged_msc_count)
if self.print_progress:
bar.next()
if self.print_progress:
bar.finish()
str_weeks = [dt.strftime("%d-%m-%Y") for dt in weeks]
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=str_weeks,
y=merged_mscs,
hoverinfo="x+y",
mode="lines",
name="Merged",
line=dict(width=0.5, color="#6f42c1"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=closed_mscs,
hoverinfo="x+y",
mode="lines",
name="Closed",
line=dict(width=0.5, color="#ce303d"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=fcp_mscs,
hoverinfo="x+y",
mode="lines",
name="FCP",
line=dict(width=0.5, color="yellow"),
stackgroup="one",
)
)
fig.add_trace(
go.Scatter(
x=str_weeks,
y=new_mscs,
hoverinfo="x+y",
mode="lines",
name="New",
line=dict(width=0.5, color="#28a745"),
stackgroup="one",
)
)
# Add a nice title
fig.update_layout(
title={
"text": "Matrix Spec Change Proposals",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
font=dict(family="Arial", size=18, color="#222222",),
)
fig.write_image(filepath)
def _get_msc_state_at_time(
self,
msc: Issue,
msc_events: List[Tuple[IssueEvent, Optional[Label]]],
dt: datetime,
) -> MSCState:
"""Given a datetime, get the state of an MSC at that time
Args:
msc: The MSC to target,
msc_events: A cached List of github issue events to process, as well as cached label
information if the issue event relates to labels. We cache all this information
up front as pass it is as otherwise we'd have to do separate API requests for
each of them.
dt: The threshold at which to stop processing issue events, thus giving you the
state of an MSC at this given time.
"""
# Iterate through MSC events and calculate the current state of the issue at a given
# time
# Initially assume it doesn't exist. Change the state as we iterate through events
state = {
"prev_state": None,
"state": None,
} # type: Dict[str, Optional[MSCState]]
finished_fcp = False
def update_state(new_state: MSCState):
state["prev_state"] = state["state"]
state["state"] = new_state
disposition_state = None
is_closed = False
has_label_merged = False
rejected_or_abandoned = False
for event, label in msc_events:
if event.created_at > dt:
# We've reached our datetime threshold
break
# Classify the event
if label:
label_name = label.name
# This is a label event
if label_name == "proposal":
update_state(MSCState.NEW)
elif label_name == "final-comment-period":
update_state(MSCState.FCP)
elif label_name == "disposition-merge":
disposition_state = MSCState.MERGED
elif label_name == "disposition-close":
disposition_state = MSCState.CLOSED
elif label_name == "disposition-postpone":
disposition_state = MSCState.POSTPONED
# Some issues have this silly label
# i.e https://github.com/matrix-org/matrix-doc/issues/1466
elif label_name == "merged":
update_state(MSCState.MERGED)
has_label_merged = True
elif label_name == "finished-final-comment-period":
# Prevent issues which have finished FCP but associated PRs have not
# merged yet to not get stuck in FCP state forever.
# i.e https://github.com/matrix-org/matrix-doc/issues/1219
update_state(
disposition_state if disposition_state else MSCState.NEW
)
finished_fcp = True
elif label_name == "abandoned" or label_name == "rejected":
update_state(MSCState.CLOSED)
elif event.event == "reopened":
# TODO: What does mscbot-python do in this case? New or previous state?
update_state(state["prev_state"])
is_closed = False
elif event.event == "closed":
# The MSC was closed
if msc.pull_request:
if state != MSCState.MERGED:
update_state(MSCState.CLOSED)
# Issues that are closed count as closed MSCs
else:
if has_label_merged:
update_state(MSCState.MERGED)
else:
update_state(MSCState.CLOSED)
elif event.event == "merged":
# The MSC was merged
if finished_fcp:
update_state(MSCState.MERGED)
if is_closed and rejected_or_abandoned:
update_state(MSCState.CLOSED)
return state["state"]
def _generate_msc_pie_chart(self, filepath: str):
# Get total number of {closed, open, merged, postponed, fcp} MSCs
fcp_mscs = self.repository.get_issues(
state="open", labels=["proposal", "final-comment-period"],
).totalCount
open_mscs = (
self.repository.get_issues(state="open", labels=["proposal"]).totalCount
- fcp_mscs
)
closed_mscs = self.repository.get_issues(
state="closed", labels=["proposal", "rejected"],
).totalCount
postponed_mscs = self.repository.get_issues(
state="open",
labels=[
"proposal",
"finished-final-comment-period",
"disposition-postpone",
],
).totalCount
merged_mscs = (
self.repository.get_issues(state="closed", labels=["proposal"],).totalCount
- closed_mscs
- postponed_mscs
)
# Create the pie chart
labels = ["Open", "Merged", "Closed", "FCP", "Postponed"]
colors = ["#28a745", "#6f42c1", "#ce303d", "yellow", "grey"]
values = [open_mscs, merged_mscs, closed_mscs, fcp_mscs, postponed_mscs]
# Add the respective count to each label
for idx, label in enumerate(labels):
labels[idx] = f"{label} ({values[idx]})"
fig = go.Figure(
data=[
go.Pie(
labels=labels,
values=values,
sort=False, # Use order of lists above instead of sorting by size
)
],
)
# Make a nice title
fig.update_layout(
title={
"text": "Matrix Spec Change Proposals",
"y": 0.95,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
font=dict(family="Arial", size=18, color="#222222",),
)
fig.update_traces(
hoverinfo="label+percent",
textinfo="value",
textfont_size=20,
marker=dict(colors=colors, line=dict(color="#000000", width=2)),
)
fig.write_image(filepath)
| """Generate the chart
Args:
type: The type of chart to generate
filepath: Where to place the generated chart
"""
# Choose which chart type to generate
if type == ChartType.PIE:
self._generate_msc_pie_chart(filepath)
elif type == ChartType.STACKED_AREA:
self._generate_stacked_area_chart(filepath) | identifier_body |
main.rs | extern crate dotenv;
extern crate iron;
extern crate handlebars;
extern crate handlebars_iron as hbs;
#[macro_use]
extern crate router;
#[cfg(not(feature = "serde_type"))]
extern crate rustc_serialize;
extern crate mount;
extern crate staticfile;
extern crate reqwest;
extern crate serde_json;
extern crate iron_sessionstorage;
extern crate urlencoded;
use iron::prelude::*;
use iron::headers::ContentType;
use iron::modifiers::Redirect;
use iron::{Url, status};
use hbs::{Template, HandlebarsEngine, DirectorySource};
use rustc_serialize::json::{Json};
use staticfile::Static;
use mount::Mount;
use serde_json::Value;
use iron_sessionstorage::traits::*;
use iron_sessionstorage::SessionStorage;
use iron_sessionstorage::backends::SignedCookieBackend;
use urlencoded::UrlEncodedQuery;
use dotenv::dotenv;
use std::env;
use std::io::Read;
use std::collections::BTreeMap;
use std::path::Path;
use std::collections::HashMap;
static INSTAGRAM_OAUTH_URI: &'static str = "https://api.instagram.com/oauth/authorize/";
static GRANT_TYPE: &'static str = "authorization_code";
fn value_to_json(x: Value) -> Json {
match x {
Value::Number(ref x) if x.is_i64() => Json::I64(x.as_i64().unwrap()),
Value::Number(ref x) if x.is_u64() => Json::U64(x.as_u64().unwrap()),
Value::Number(ref x) if x.is_f64() => Json::F64(x.as_f64().unwrap()),
Value::String(x) => Json::String(x),
Value::Array(x) => Json::Array(x
.into_iter()
.map(|x| value_to_json(x))
.collect::<Vec<Json>>()
),
Value::Object(x) => {
let mut buf = BTreeMap::<String, Json>::new();
for (key, value) in x.into_iter() {
buf.insert(key, value_to_json(value));
}
Json::Object(buf)
},
Value::Bool(x) => Json::Boolean(x),
_ => Json::Null,
}
}
#[derive(Debug)]
struct AccessToken(String);
impl iron_sessionstorage::Value for AccessToken {
fn get_key() -> &'static str { "access_token" }
fn into_raw(self) -> String { self.0 }
fn from_raw(value: String) -> Option<Self> {
Some(AccessToken(value))
}
}
fn main() {
dotenv().ok();
let port = match env::var("PORT") {
Ok(p) => p,
Err(_) => "3000".to_string(),
};
let redirect_url = env::var("REDIRECT_URL").expect("lack of redirect url.");
let client_id = env::var("INSTAGRAM_CLIENT_ID").expect("lack of instagram client id.");
let client_secret = env::var("INSTAGRAM_CLIENT_SECRET").expect("lack of instagram client secret.");
let authorization_uri = format!("{}?client_id={}&redirect_uri={}&response_type=code&scope={}",
INSTAGRAM_OAUTH_URI,
client_id,
redirect_url,
"public_content".to_string());
let router = router!(
index: get "/" => move |req: &mut Request| {
match req.url.clone().query() {
Some(query) => {
let code = query.split("=").last().expect("query parsing is failed").to_string();
let params = [
("client_id", client_id.clone()),
("client_secret", client_secret.clone()),
("grant_type", GRANT_TYPE.clone().to_string()),
("redirect_uri", redirect_url.clone()),
("code", code.to_string())
];
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut result = http_client.post("https://api.instagram.com/oauth/access_token")
.form(¶ms)
.send()
.expect("send Request failed");
let result_json = result.json::<HashMap<String, Value>>().expect("Parse JSON failed");
let data = match result_json.get("access_token") {
Some(at) => {
let access_token = at.as_str().unwrap();
req.session().set(AccessToken(access_token.to_string())).unwrap();
let url = format!("https://api.instagram.com/v1/tags/nofilter/media/recent?access_token={}", access_token);
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.fold(HashMap::<String, Json>::new(), |mut acc, (key, value)| {
acc.insert(key, value_to_json(value));
acc | let mut resp = Response::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(Response::with((status::Found, Redirect(
Url::parse(redirect_url.as_str()).expect("parse url failed")
))))
},
None => {
let mut resp = Response::new();
let data = BTreeMap::<String, Json>::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(resp)
},
}
},
oauth: get "/oauth" => move |_: &mut Request| {
Ok(Response::with((status::Found, Redirect(
Url::parse(authorization_uri.as_str()).expect(format!("authorization_uri is invalid => {}", authorization_uri).as_str())
))))
},
api_username: get "/api/username" => move |req: &mut Request| {
let username = match req.url.clone().query() {
Some(query) => query.split("=").last().expect("query parsing is failed"),
_ => ""
}.to_string();
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
if access_token.len() == 0 {
return Ok(Response::with((ContentType::json().0, status::Ok, "{}")))
};
let url = format!("https://api.instagram.com/v1/users/search?q={}&access_token={}", username, access_token.to_string());
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut buffer = String::new();
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.read_to_string(&mut buffer)
.expect("read JSON string failed")
;
Ok(Response::with((ContentType::json().0, status::Ok, buffer)))
},
api_hashtag: get "/api/hashtag" => move |req: &mut Request| {
fn get_query(x: Option<&Vec<String>>) -> &str {
match x {
Some(y) => match y.first() {
Some(z) => z.as_str(),
None => "",
},
None => "",
}
}
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
let (user_id, hashtag) = match req.get_ref::<UrlEncodedQuery>() {
Ok(queries) => (get_query(queries.get("user_id")), get_query(queries.get("hashtag"))),
_ => ("", "")
};
let url = format!(
"https://api.instagram.com/v1/users/{}/media/recent/?access_token={}",
user_id.to_string(),
access_token.to_string()
);
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let response = http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.filter(|x| { (&x.0).as_str() == "data" })
.map(|x| {
match x.1 {
Value::Array(ys) => {
ys
.into_iter()
.filter(|media| {
if let &Value::Object(ref m) = media {
if let &Value::Array(ref tags) = m.get("tags").unwrap() {
tags.contains(&Value::String(hashtag.to_string()))
} else { false }
} else { false }
})
.map(value_to_json)
.collect::<Vec<Json>>()
},
_ => vec![],
}
})
.fold(vec![], |mut acc, mut xs| {
acc.append(&mut xs);
acc
})
;
Ok(Response::with((ContentType::json().0, status::Ok, Json::Array(response).to_string())))
}
);
let mut hbse = HandlebarsEngine::new();
hbse.add(Box::new(DirectorySource::new("./templates/", ".hbs")));
hbse.reload().expect("template can't reload collectory.");
let mut mount = Mount::new();
mount
.mount("/css", Static::new(Path::new("assets/css")))
.mount("/js", Static::new(Path::new("assets/js")))
.mount("/", router);
let mut chain = Chain::new(mount);
let session = SessionStorage::new(SignedCookieBackend::new(b"my_cookie_secret".to_vec()));
chain.link_around(session);
chain.link_after(hbse);
println!("Server start on {}", port);
Iron::new(chain).http(format!("0.0.0.0:{}", port)).expect("Server start process is failed.");
} | })
},
None => HashMap::<String, Json>::new(),
};
| random_line_split |
main.rs | extern crate dotenv;
extern crate iron;
extern crate handlebars;
extern crate handlebars_iron as hbs;
#[macro_use]
extern crate router;
#[cfg(not(feature = "serde_type"))]
extern crate rustc_serialize;
extern crate mount;
extern crate staticfile;
extern crate reqwest;
extern crate serde_json;
extern crate iron_sessionstorage;
extern crate urlencoded;
use iron::prelude::*;
use iron::headers::ContentType;
use iron::modifiers::Redirect;
use iron::{Url, status};
use hbs::{Template, HandlebarsEngine, DirectorySource};
use rustc_serialize::json::{Json};
use staticfile::Static;
use mount::Mount;
use serde_json::Value;
use iron_sessionstorage::traits::*;
use iron_sessionstorage::SessionStorage;
use iron_sessionstorage::backends::SignedCookieBackend;
use urlencoded::UrlEncodedQuery;
use dotenv::dotenv;
use std::env;
use std::io::Read;
use std::collections::BTreeMap;
use std::path::Path;
use std::collections::HashMap;
static INSTAGRAM_OAUTH_URI: &'static str = "https://api.instagram.com/oauth/authorize/";
static GRANT_TYPE: &'static str = "authorization_code";
fn value_to_json(x: Value) -> Json |
#[derive(Debug)]
struct AccessToken(String);
impl iron_sessionstorage::Value for AccessToken {
fn get_key() -> &'static str { "access_token" }
fn into_raw(self) -> String { self.0 }
fn from_raw(value: String) -> Option<Self> {
Some(AccessToken(value))
}
}
fn main() {
dotenv().ok();
let port = match env::var("PORT") {
Ok(p) => p,
Err(_) => "3000".to_string(),
};
let redirect_url = env::var("REDIRECT_URL").expect("lack of redirect url.");
let client_id = env::var("INSTAGRAM_CLIENT_ID").expect("lack of instagram client id.");
let client_secret = env::var("INSTAGRAM_CLIENT_SECRET").expect("lack of instagram client secret.");
let authorization_uri = format!("{}?client_id={}&redirect_uri={}&response_type=code&scope={}",
INSTAGRAM_OAUTH_URI,
client_id,
redirect_url,
"public_content".to_string());
let router = router!(
index: get "/" => move |req: &mut Request| {
match req.url.clone().query() {
Some(query) => {
let code = query.split("=").last().expect("query parsing is failed").to_string();
let params = [
("client_id", client_id.clone()),
("client_secret", client_secret.clone()),
("grant_type", GRANT_TYPE.clone().to_string()),
("redirect_uri", redirect_url.clone()),
("code", code.to_string())
];
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut result = http_client.post("https://api.instagram.com/oauth/access_token")
.form(¶ms)
.send()
.expect("send Request failed");
let result_json = result.json::<HashMap<String, Value>>().expect("Parse JSON failed");
let data = match result_json.get("access_token") {
Some(at) => {
let access_token = at.as_str().unwrap();
req.session().set(AccessToken(access_token.to_string())).unwrap();
let url = format!("https://api.instagram.com/v1/tags/nofilter/media/recent?access_token={}", access_token);
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.fold(HashMap::<String, Json>::new(), |mut acc, (key, value)| {
acc.insert(key, value_to_json(value));
acc
})
},
None => HashMap::<String, Json>::new(),
};
let mut resp = Response::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(Response::with((status::Found, Redirect(
Url::parse(redirect_url.as_str()).expect("parse url failed")
))))
},
None => {
let mut resp = Response::new();
let data = BTreeMap::<String, Json>::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(resp)
},
}
},
oauth: get "/oauth" => move |_: &mut Request| {
Ok(Response::with((status::Found, Redirect(
Url::parse(authorization_uri.as_str()).expect(format!("authorization_uri is invalid => {}", authorization_uri).as_str())
))))
},
api_username: get "/api/username" => move |req: &mut Request| {
let username = match req.url.clone().query() {
Some(query) => query.split("=").last().expect("query parsing is failed"),
_ => ""
}.to_string();
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
if access_token.len() == 0 {
return Ok(Response::with((ContentType::json().0, status::Ok, "{}")))
};
let url = format!("https://api.instagram.com/v1/users/search?q={}&access_token={}", username, access_token.to_string());
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut buffer = String::new();
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.read_to_string(&mut buffer)
.expect("read JSON string failed")
;
Ok(Response::with((ContentType::json().0, status::Ok, buffer)))
},
api_hashtag: get "/api/hashtag" => move |req: &mut Request| {
fn get_query(x: Option<&Vec<String>>) -> &str {
match x {
Some(y) => match y.first() {
Some(z) => z.as_str(),
None => "",
},
None => "",
}
}
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
let (user_id, hashtag) = match req.get_ref::<UrlEncodedQuery>() {
Ok(queries) => (get_query(queries.get("user_id")), get_query(queries.get("hashtag"))),
_ => ("", "")
};
let url = format!(
"https://api.instagram.com/v1/users/{}/media/recent/?access_token={}",
user_id.to_string(),
access_token.to_string()
);
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let response = http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.filter(|x| { (&x.0).as_str() == "data" })
.map(|x| {
match x.1 {
Value::Array(ys) => {
ys
.into_iter()
.filter(|media| {
if let &Value::Object(ref m) = media {
if let &Value::Array(ref tags) = m.get("tags").unwrap() {
tags.contains(&Value::String(hashtag.to_string()))
} else { false }
} else { false }
})
.map(value_to_json)
.collect::<Vec<Json>>()
},
_ => vec![],
}
})
.fold(vec![], |mut acc, mut xs| {
acc.append(&mut xs);
acc
})
;
Ok(Response::with((ContentType::json().0, status::Ok, Json::Array(response).to_string())))
}
);
let mut hbse = HandlebarsEngine::new();
hbse.add(Box::new(DirectorySource::new("./templates/", ".hbs")));
hbse.reload().expect("template can't reload collectory.");
let mut mount = Mount::new();
mount
.mount("/css", Static::new(Path::new("assets/css")))
.mount("/js", Static::new(Path::new("assets/js")))
.mount("/", router);
let mut chain = Chain::new(mount);
let session = SessionStorage::new(SignedCookieBackend::new(b"my_cookie_secret".to_vec()));
chain.link_around(session);
chain.link_after(hbse);
println!("Server start on {}", port);
Iron::new(chain).http(format!("0.0.0.0:{}", port)).expect("Server start process is failed.");
}
| {
match x {
Value::Number(ref x) if x.is_i64() => Json::I64(x.as_i64().unwrap()),
Value::Number(ref x) if x.is_u64() => Json::U64(x.as_u64().unwrap()),
Value::Number(ref x) if x.is_f64() => Json::F64(x.as_f64().unwrap()),
Value::String(x) => Json::String(x),
Value::Array(x) => Json::Array(x
.into_iter()
.map(|x| value_to_json(x))
.collect::<Vec<Json>>()
),
Value::Object(x) => {
let mut buf = BTreeMap::<String, Json>::new();
for (key, value) in x.into_iter() {
buf.insert(key, value_to_json(value));
}
Json::Object(buf)
},
Value::Bool(x) => Json::Boolean(x),
_ => Json::Null,
}
} | identifier_body |
main.rs | extern crate dotenv;
extern crate iron;
extern crate handlebars;
extern crate handlebars_iron as hbs;
#[macro_use]
extern crate router;
#[cfg(not(feature = "serde_type"))]
extern crate rustc_serialize;
extern crate mount;
extern crate staticfile;
extern crate reqwest;
extern crate serde_json;
extern crate iron_sessionstorage;
extern crate urlencoded;
use iron::prelude::*;
use iron::headers::ContentType;
use iron::modifiers::Redirect;
use iron::{Url, status};
use hbs::{Template, HandlebarsEngine, DirectorySource};
use rustc_serialize::json::{Json};
use staticfile::Static;
use mount::Mount;
use serde_json::Value;
use iron_sessionstorage::traits::*;
use iron_sessionstorage::SessionStorage;
use iron_sessionstorage::backends::SignedCookieBackend;
use urlencoded::UrlEncodedQuery;
use dotenv::dotenv;
use std::env;
use std::io::Read;
use std::collections::BTreeMap;
use std::path::Path;
use std::collections::HashMap;
static INSTAGRAM_OAUTH_URI: &'static str = "https://api.instagram.com/oauth/authorize/";
static GRANT_TYPE: &'static str = "authorization_code";
fn value_to_json(x: Value) -> Json {
match x {
Value::Number(ref x) if x.is_i64() => Json::I64(x.as_i64().unwrap()),
Value::Number(ref x) if x.is_u64() => Json::U64(x.as_u64().unwrap()),
Value::Number(ref x) if x.is_f64() => Json::F64(x.as_f64().unwrap()),
Value::String(x) => Json::String(x),
Value::Array(x) => Json::Array(x
.into_iter()
.map(|x| value_to_json(x))
.collect::<Vec<Json>>()
),
Value::Object(x) => {
let mut buf = BTreeMap::<String, Json>::new();
for (key, value) in x.into_iter() {
buf.insert(key, value_to_json(value));
}
Json::Object(buf)
},
Value::Bool(x) => Json::Boolean(x),
_ => Json::Null,
}
}
#[derive(Debug)]
struct | (String);
impl iron_sessionstorage::Value for AccessToken {
fn get_key() -> &'static str { "access_token" }
fn into_raw(self) -> String { self.0 }
fn from_raw(value: String) -> Option<Self> {
Some(AccessToken(value))
}
}
fn main() {
dotenv().ok();
let port = match env::var("PORT") {
Ok(p) => p,
Err(_) => "3000".to_string(),
};
let redirect_url = env::var("REDIRECT_URL").expect("lack of redirect url.");
let client_id = env::var("INSTAGRAM_CLIENT_ID").expect("lack of instagram client id.");
let client_secret = env::var("INSTAGRAM_CLIENT_SECRET").expect("lack of instagram client secret.");
let authorization_uri = format!("{}?client_id={}&redirect_uri={}&response_type=code&scope={}",
INSTAGRAM_OAUTH_URI,
client_id,
redirect_url,
"public_content".to_string());
let router = router!(
index: get "/" => move |req: &mut Request| {
match req.url.clone().query() {
Some(query) => {
let code = query.split("=").last().expect("query parsing is failed").to_string();
let params = [
("client_id", client_id.clone()),
("client_secret", client_secret.clone()),
("grant_type", GRANT_TYPE.clone().to_string()),
("redirect_uri", redirect_url.clone()),
("code", code.to_string())
];
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut result = http_client.post("https://api.instagram.com/oauth/access_token")
.form(¶ms)
.send()
.expect("send Request failed");
let result_json = result.json::<HashMap<String, Value>>().expect("Parse JSON failed");
let data = match result_json.get("access_token") {
Some(at) => {
let access_token = at.as_str().unwrap();
req.session().set(AccessToken(access_token.to_string())).unwrap();
let url = format!("https://api.instagram.com/v1/tags/nofilter/media/recent?access_token={}", access_token);
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.fold(HashMap::<String, Json>::new(), |mut acc, (key, value)| {
acc.insert(key, value_to_json(value));
acc
})
},
None => HashMap::<String, Json>::new(),
};
let mut resp = Response::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(Response::with((status::Found, Redirect(
Url::parse(redirect_url.as_str()).expect("parse url failed")
))))
},
None => {
let mut resp = Response::new();
let data = BTreeMap::<String, Json>::new();
resp.set_mut(Template::new("index", data)).set_mut(status::Ok);
Ok(resp)
},
}
},
oauth: get "/oauth" => move |_: &mut Request| {
Ok(Response::with((status::Found, Redirect(
Url::parse(authorization_uri.as_str()).expect(format!("authorization_uri is invalid => {}", authorization_uri).as_str())
))))
},
api_username: get "/api/username" => move |req: &mut Request| {
let username = match req.url.clone().query() {
Some(query) => query.split("=").last().expect("query parsing is failed"),
_ => ""
}.to_string();
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
if access_token.len() == 0 {
return Ok(Response::with((ContentType::json().0, status::Ok, "{}")))
};
let url = format!("https://api.instagram.com/v1/users/search?q={}&access_token={}", username, access_token.to_string());
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let mut buffer = String::new();
http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.read_to_string(&mut buffer)
.expect("read JSON string failed")
;
Ok(Response::with((ContentType::json().0, status::Ok, buffer)))
},
api_hashtag: get "/api/hashtag" => move |req: &mut Request| {
fn get_query(x: Option<&Vec<String>>) -> &str {
match x {
Some(y) => match y.first() {
Some(z) => z.as_str(),
None => "",
},
None => "",
}
}
let access_token = match try!(req.session().get::<AccessToken>()) {
Some(y) => y.0,
None => "Access token is Not Found".to_string(),
};
let (user_id, hashtag) = match req.get_ref::<UrlEncodedQuery>() {
Ok(queries) => (get_query(queries.get("user_id")), get_query(queries.get("hashtag"))),
_ => ("", "")
};
let url = format!(
"https://api.instagram.com/v1/users/{}/media/recent/?access_token={}",
user_id.to_string(),
access_token.to_string()
);
let http_client = reqwest::Client::new().expect("Create HTTP client is failed");
let response = http_client
.get(url.as_str())
.send()
.expect("send Request failed")
.json::<HashMap<String, Value>>()
.expect("Parse JSON failed")
.into_iter()
.filter(|x| { (&x.0).as_str() == "data" })
.map(|x| {
match x.1 {
Value::Array(ys) => {
ys
.into_iter()
.filter(|media| {
if let &Value::Object(ref m) = media {
if let &Value::Array(ref tags) = m.get("tags").unwrap() {
tags.contains(&Value::String(hashtag.to_string()))
} else { false }
} else { false }
})
.map(value_to_json)
.collect::<Vec<Json>>()
},
_ => vec![],
}
})
.fold(vec![], |mut acc, mut xs| {
acc.append(&mut xs);
acc
})
;
Ok(Response::with((ContentType::json().0, status::Ok, Json::Array(response).to_string())))
}
);
let mut hbse = HandlebarsEngine::new();
hbse.add(Box::new(DirectorySource::new("./templates/", ".hbs")));
hbse.reload().expect("template can't reload collectory.");
let mut mount = Mount::new();
mount
.mount("/css", Static::new(Path::new("assets/css")))
.mount("/js", Static::new(Path::new("assets/js")))
.mount("/", router);
let mut chain = Chain::new(mount);
let session = SessionStorage::new(SignedCookieBackend::new(b"my_cookie_secret".to_vec()));
chain.link_around(session);
chain.link_after(hbse);
println!("Server start on {}", port);
Iron::new(chain).http(format!("0.0.0.0:{}", port)).expect("Server start process is failed.");
}
| AccessToken | identifier_name |
MySVN.py | #!/usr/bin/env python
"""
svn2svn.py
Replicate changesets from one SVN repository to another,
includes diffs, comments, and Dates of each revision.
It's also possible to retain the Author info if the Target SVN URL
is in a local filesystem (ie, running svn2svn.py on Target SVN server),
or if Target SVN URL is managed through ssh tunnel.
In later case, please run 'ssh-add' (adds RSA or DSA identities to
the authentication agent) before invoking svn2svn.py.
For example (in Unix environment):
$ exec /usr/bin/ssh-agent $SHELL
$ /usr/bin/ssh-add
Enter passphrase for /home/user/.ssh/id_dsa:
Identity added: /home/user/.ssh/id_dsa (/home/user/.ssh/id_dsa)
$ python ./svn2svn.py -a SOURCE TARGET
Written and used on Ubuntu 7.04 (Feisty Fawn).
Provided as-is and absolutely no warranty - aka Don't bet your life on it.
This tool re-used some modules from svnclient.py on project hgsvn
(a tool can create Mercurial repository from SVN repository):
http://cheeseshop.python.org/pypi/hgsvn
License: GPLv2, the same as hgsvn.
version 0.1.1; Jul 31, 2007; simford dot dong at gmail dot com
"""
import os
import sys
import time
import locale
import shutil
import select
import calendar
import traceback
from optparse import OptionParser
from subprocess import Popen, PIPE
from datetime import datetime
try:
from xml.etree import cElementTree as ET
except ImportError:
try:
from xml.etree import ElementTree as ET
except ImportError:
try:
import cElementTree as ET
except ImportError:
from elementtree import ElementTree as ET
svn_log_args = ['log', '--xml', '-v']
svn_info_args = ['info', '--xml']
svn_checkout_args = ['checkout', '-q']
svn_status_args = ['status', '--xml', '-v', '--ignore-externals']
# define exception class
class ExternalCommandFailed(RuntimeError):
"""
An external command failed.
"""
class ParameterError(RuntimeError):
"""
An external command failed.
"""
def | (message, raise_exception = True):
"""
Display error message, then terminate.
"""
print "Error:", message
print
if raise_exception:
raise ExternalCommandFailed
else:
sys.exit(1)
# Windows compatibility code by Bill Baxter
if os.name == "nt":
def find_program(name):
"""
Find the name of the program for Popen.
Windows is finnicky about having the complete file name. Popen
won't search the %PATH% for you automatically.
(Adapted from ctypes.find_library)
"""
# See MSDN for the REAL search order.
base, ext = os.path.splitext(name)
if ext:
exts = [ext]
else:
exts = ['.bat', '.exe']
for directory in os.environ['PATH'].split(os.pathsep):
for e in exts:
fname = os.path.join(directory, base + e)
if os.path.exists(fname):
return fname
return None
else:
def find_program(name):
"""
Find the name of the program for Popen.
On Unix, popen isn't picky about having absolute paths.
"""
return name
def shell_quote(s):
if os.name == "nt":
q = '"'
else:
q = "'"
return q + s.replace('\\', '\\\\').replace("'", "'\"'\"'") + q
locale_encoding = locale.getpreferredencoding()
def run_svn(args, fail_if_stderr=False, encoding="utf-8"):
"""
Run svn cmd in PIPE
exit if svn cmd failed
"""
def _transform_arg(a):
if isinstance(a, unicode):
a = a.encode(encoding or locale_encoding)
elif not isinstance(a, str):
a = str(a)
return a
t_args = map(_transform_arg, args)
cmd = find_program("svn")
cmd_string = str(" ".join(map(shell_quote, [cmd] + t_args)))
print "*", cmd_string
pipe = Popen([cmd] + t_args, executable=cmd, stdout=PIPE, stderr=PIPE)
out, err = pipe.communicate()
if pipe.returncode != 0 or (fail_if_stderr and err.strip()):
display_error("External program failed (return code %d): %s\n%s"
% (pipe.returncode, cmd_string, err))
return out
def svn_date_to_timestamp(svn_date):
"""
Parse an SVN date as read from the XML output and
return the corresponding timestamp.
"""
# Strip microseconds and timezone (always UTC, hopefully)
# XXX there are various ISO datetime parsing routines out there,
# cf. http://seehuhn.de/comp/pdate
date = svn_date.split('.', 2)[0]
time_tuple = time.strptime(date, "%Y-%m-%dT%H:%M:%S")
return calendar.timegm(time_tuple)
def parse_svn_info_xml(xml_string):
"""
Parse the XML output from an "svn info" command and extract
useful information as a dict.
"""
d = {}
tree = ET.fromstring(xml_string)
entry = tree.find('.//entry')
if entry:
d['url'] = entry.find('url').text
d['revision'] = int(entry.get('revision'))
d['repos_url'] = tree.find('.//repository/root').text
d['last_changed_rev'] = int(tree.find('.//commit').get('revision'))
d['kind'] = entry.get('kind')
return d
def parse_svn_log_xml(xml_string):
"""
Parse the XML output from an "svn log" command and extract
useful information as a list of dicts (one per log changeset).
"""
l = []
tree = ET.fromstring(xml_string)
for entry in tree.findall('logentry'):
d = {}
d['revision'] = int(entry.get('revision'))
# Some revisions don't have authors, most notably
# the first revision in a repository.
author = entry.find('author')
d['author'] = author is not None and author.text or None
d['date'] = svn_date_to_timestamp(entry.find('date').text)
# Some revisions may have empty commit message
message = entry.find('msg')
message = message is not None and message.text is not None \
and message.text.strip() or ""
# Replace DOS return '\r\n' and MacOS return '\r' with unix return '\n'
d['message'] = message.replace('\r\n', '\n').replace('\n\r', '\n'). \
replace('\r', '\n')
paths = d['changed_paths'] = []
for path in entry.findall('.//path'):
copyfrom_rev = path.get('copyfrom-rev')
if copyfrom_rev:
copyfrom_rev = int(copyfrom_rev)
paths.append({
'path': path.text,
'action': path.get('action'),
'copyfrom_path': path.get('copyfrom-path'),
'copyfrom_revision': copyfrom_rev,
})
l.append(d)
return l
def parse_svn_status_xml(xml_string, base_dir=None):
"""
Parse the XML output from an "svn status" command and extract
useful info as a list of dicts (one per status entry).
"""
l = []
tree = ET.fromstring(xml_string)
for entry in tree.findall('.//entry'):
d = {}
path = entry.get('path')
if base_dir is not None:
assert path.startswith(base_dir)
path = path[len(base_dir):].lstrip('/\\')
d['path'] = path
wc_status = entry.find('wc-status')
if wc_status.get('item') == 'external':
d['type'] = 'external'
elif wc_status.get('revision') is not None:
d['type'] = 'normal'
else:
d['type'] = 'unversioned'
l.append(d)
return l
def get_svn_info(svn_url_or_wc, rev_number=None):
"""
Get SVN information for the given URL or working copy,
with an optionally specified revision number.
Returns a dict as created by parse_svn_info_xml().
"""
if rev_number is not None:
args = [svn_url_or_wc + "@" + str(rev_number)]
else:
args = [svn_url_or_wc]
xml_string = run_svn(svn_info_args + args,
fail_if_stderr=True)
return parse_svn_info_xml(xml_string)
def svn_checkout(svn_url, checkout_dir, rev_number=None):
"""
Checkout the given URL at an optional revision number.
"""
args = []
if rev_number is not None:
args += ['-r', rev_number]
args += [svn_url, checkout_dir]
return run_svn(svn_checkout_args + args)
def run_svn_log(svn_url_or_wc, rev_start, rev_end, limit, stop_on_copy=False):
"""
Fetch up to 'limit' SVN log entries between the given revisions.
"""
if stop_on_copy:
args = ['--stop-on-copy']
else:
args = []
args += ['-r', '%s:%s' % (rev_start, rev_end), '--limit',
str(limit), svn_url_or_wc]
xml_string = run_svn(svn_log_args + args)
return parse_svn_log_xml(xml_string)
def get_svn_status(svn_wc):
"""
Get SVN status information about the given working copy.
"""
# Ensure proper stripping by canonicalizing the path
svn_wc = os.path.abspath(svn_wc)
args = [svn_wc]
xml_string = run_svn(svn_status_args + args)
return parse_svn_status_xml(xml_string, svn_wc)
def get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=False):
"""
Get the first SVN log entry in the requested revision range.
"""
entries = run_svn_log(svn_url, rev_start, rev_end, 1, stop_on_copy)
if not entries:
display_error("No SVN log for %s between revisions %s and %s" %
(svn_url, rev_start, rev_end))
return entries[0]
def get_first_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the first log entry after/at the given revision number in an SVN branch.
By default the revision number is set to 0, which will give you the log
entry corresponding to the branch creaction.
NOTE: to know whether the branch creation corresponds to an SVN import or
a copy from another branch, inspect elements of the 'changed_paths' entry
in the returned dictionary.
"""
return get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=True)
def get_last_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the last log entry before/at the given revision number in an SVN branch.
By default the revision number is set to HEAD, which will give you the log
entry corresponding to the latest commit in branch.
"""
return get_one_svn_log_entry(svn_url, rev_end, rev_start, stop_on_copy=True)
log_duration_threshold = 10.0
log_min_chunk_length = 10
def iter_svn_log_entries(svn_url, first_rev, last_rev):
"""
Iterate over SVN log entries between first_rev and last_rev.
This function features chunked log fetching so that it isn't too nasty
to the SVN server if many entries are requested.
"""
cur_rev = first_rev
chunk_length = log_min_chunk_length
chunk_interval_factor = 1.0
while last_rev == "HEAD" or cur_rev <= last_rev:
start_t = time.time()
stop_rev = min(last_rev, cur_rev + int(chunk_length * chunk_interval_factor))
entries = run_svn_log(svn_url, cur_rev, stop_rev, chunk_length)
duration = time.time() - start_t
if not entries:
if stop_rev == last_rev:
break
cur_rev = stop_rev + 1
chunk_interval_factor *= 2.0
continue
for e in entries:
yield e
cur_rev = e['revision'] + 1
# Adapt chunk length based on measured request duration
if duration < log_duration_threshold:
chunk_length = int(chunk_length * 2.0)
elif duration > log_duration_threshold * 2:
chunk_length = max(log_min_chunk_length, int(chunk_length / 2.0))
def commit_from_svn_log_entry(entry, files=None, keep_author=False):
"""
Given an SVN log entry and an optional sequence of files, do an svn commit.
"""
# This will use the local timezone for displaying commit times
timestamp = int(entry['date'])
svn_date = str(datetime.fromtimestamp(timestamp))
# Uncomment this one one if you prefer UTC commit times
#svn_date = "%d 0" % timestamp
if keep_author:
options = ["ci", "--force-log", "-m", entry['message'] + "\nDate: " + svn_date, "--username", entry['author']]
else:
options = ["ci", "--force-log", "-m", entry['message'] + "\nDate: " + svn_date + "\nAuthor: " + entry['author']]
if files:
options += list(files)
run_svn(options)
def svn_add_dir(p):
# set p = "." when p = ""
#p = p.strip() or "."
if p.strip() and not os.path.exists(p + os.sep + ".svn"):
svn_add_dir(os.path.dirname(p))
if not os.path.exists(p):
os.makedirs(p)
run_svn(["add", p])
def pull_svn_rev(log_entry, svn_url, target_url, svn_path, original_wc, keep_author=False):
"""
Pull SVN changes from the given log entry.
Returns the new SVN revision.
If an exception occurs, it will rollback to revision 'svn_rev - 1'.
"""
svn_rev = log_entry['revision']
run_svn(["up", "--ignore-externals", "-r", svn_rev, original_wc])
removed_paths = []
merged_paths = []
unrelated_paths = []
commit_paths = []
for d in log_entry['changed_paths']:
# e.g. u'/branches/xmpp/twisted/words/test/test.py'
p = d['path']
if not p.startswith(svn_path + "/"):
# Ignore changed files that are not part of this subdir
if p != svn_path:
unrelated_paths.append(p)
continue
# e.g. u'twisted/words/test/test.py'
p = p[len(svn_path):].strip("/")
# Record for commit
action = d['action']
if action not in 'MARD':
display_error("In SVN rev. %d: action '%s' not supported. \
Please report a bug!" % (svn_rev, action))
if len (commit_paths) < 100:
commit_paths.append(p)
# Detect special cases
old_p = d['copyfrom_path']
if old_p and old_p.startswith(svn_path + "/"):
old_p = old_p[len(svn_path):].strip("/")
# Both paths can be identical if copied from an old rev.
# We treat like it a normal change.
if old_p != p:
if not os.path.exists(p + os.sep + '.svn'):
svn_add_dir(os.path.dirname(p))
run_svn(["up", old_p])
run_svn(["copy", old_p, p])
if os.path.isfile(p):
shutil.copy(original_wc + os.sep + p, p)
if action == 'R':
removed_paths.append(old_p)
if len (commit_paths) < 100:
commit_paths.append(old_p)
continue
if action == 'A':
if os.path.isdir(original_wc + os.sep + p):
svn_add_dir(p)
else:
p_path = os.path.dirname(p).strip() or '.'
svn_add_dir(p_path)
shutil.copy(original_wc + os.sep + p, p)
run_svn(["add", p])
elif action == 'D':
removed_paths.append(p)
else: # action == 'M'
merged_paths.append(p)
if removed_paths:
for r in removed_paths:
run_svn(["up", r])
run_svn(["remove", "--force", r])
if merged_paths:
for m in merged_paths:
run_svn(["up", m])
m_url = svn_url + "/" + m
out = run_svn(["merge", "-c", str(svn_rev), "--non-recursive",
m_url+"@"+str(svn_rev), m])
# if conflicts, use the copy from original_wc
if out and out.split()[0] == 'C':
print "\n### Conflicts ignored: %s, in revision: %s\n" \
% (m, svn_rev)
run_svn(["revert", "--recursive", m])
if os.path.isfile(m):
shutil.copy(original_wc + os.sep + m, m)
if unrelated_paths:
print "Unrelated paths: "
print "*", unrelated_paths
## too many files
if len (commit_paths) > 99:
commit_paths = []
try:
commit_from_svn_log_entry(log_entry, commit_paths,
keep_author=keep_author)
except ExternalCommandFailed:
# try to ignore the Properties conflicts on files and dirs
# use the copy from original_wc
has_Conflict = False
for d in log_entry['changed_paths']:
p = d['path']
p = p[len(svn_path):].strip("/")
if os.path.isfile(p):
if os.path.isfile(p + ".prej"):
has_Conflict = True
shutil.copy(original_wc + os.sep + p, p)
p2=os.sep + p.replace('_', '__').replace('/', '_') \
+ ".prej-" + str(svn_rev)
shutil.move(p + ".prej", os.path.dirname(original_wc) + p2)
w="\n### Properties conflicts ignored:"
print "%s %s, in revision: %s\n" % (w, p, svn_rev)
elif os.path.isdir(p):
if os.path.isfile(p + os.sep + "dir_conflicts.prej"):
has_Conflict = True
p2=os.sep + p.replace('_', '__').replace('/', '_') \
+ "_dir__conflicts.prej-" + str(svn_rev)
shutil.move(p + os.sep + "dir_conflicts.prej",
os.path.dirname(original_wc) + p2)
w="\n### Properties conflicts ignored:"
print "%s %s, in revision: %s\n" % (w, p, svn_rev)
out = run_svn(["propget", "svn:ignore",
original_wc + os.sep + p])
if out:
run_svn(["propset", "svn:ignore", out.strip(), p])
out = run_svn(["propget", "svn:externel",
original_wc + os.sep + p])
if out:
run_svn(["propset", "svn:external", out.strip(), p])
# try again
if has_Conflict:
commit_from_svn_log_entry(log_entry, commit_paths,
keep_author=keep_author)
else:
raise ExternalCommandFailed
def main():
usage = "Usage: %prog [-a] [-c] [-r SVN rev] <Source SVN URL> <Target SVN URL>"
parser = OptionParser(usage)
parser.add_option("-a", "--keep-author", action="store_true",
dest="keep_author", help="Keep revision Author or not")
parser.add_option("-c", "--continue-from-break", action="store_true",
dest="cont_from_break",
help="Continue from previous break")
parser.add_option("-r", "--svn-rev", type="int", dest="svn_rev",
help="SVN revision to checkout from")
(options, args) = parser.parse_args()
if len(args) != 2:
display_error("incorrect number of arguments\n\nTry: svn2svn.py --help",
False)
source_url = args.pop(0).rstrip("/")
target_url = args.pop(0).rstrip("/")
if options.keep_author:
keep_author = True
else:
keep_author = False
# Find the greatest_rev
# don't use 'svn info' to get greatest_rev, it doesn't work sometimes
svn_log = get_one_svn_log_entry(source_url, "HEAD", "HEAD")
greatest_rev = svn_log['revision']
original_wc = "_original_wc"
dup_wc = "_dup_wc"
## old working copy does not exist, disable continue mode
if not os.path.exists(dup_wc):
options.cont_from_break = False
if not options.cont_from_break:
# Warn if Target SVN URL existed
cmd = find_program("svn")
pipe = Popen([cmd] + ["list"] + [target_url], executable=cmd,
stdout=PIPE, stderr=PIPE)
out, err = pipe.communicate()
if pipe.returncode == 0:
print "Target SVN URL: %s existed!" % target_url
if out:
print out
print "Press 'Enter' to Continue, 'Ctrl + C' to Cancel..."
print "(Timeout in 5 seconds)"
rfds, wfds, efds = select.select([sys.stdin], [], [], 5)
# Get log entry for the SVN revision we will check out
if options.svn_rev:
# If specify a rev, get log entry just before or at rev
svn_start_log = get_last_svn_log_entry(source_url, 1,
options.svn_rev)
else:
# Otherwise, get log entry of branch creation
svn_start_log = get_first_svn_log_entry(source_url, 1,
greatest_rev)
# This is the revision we will checkout from
svn_rev = svn_start_log['revision']
# Check out first revision (changeset) from Source SVN URL
if os.path.exists(original_wc):
shutil.rmtree(original_wc)
svn_checkout(source_url, original_wc, svn_rev)
# Import first revision (changeset) into Target SVN URL
timestamp = int(svn_start_log['date'])
svn_date = str(datetime.fromtimestamp(timestamp))
if keep_author:
run_svn(["import", original_wc, target_url, "-m",
svn_start_log['message'] + "\nDate: " + svn_date,
"--username", svn_start_log['author']])
else:
run_svn(["import", original_wc, target_url, "-m",
svn_start_log['message'] + "\nDate: " + svn_date +
"\nAuthor: " + svn_start_log['author']])
# Check out a working copy
if os.path.exists(dup_wc):
shutil.rmtree(dup_wc)
svn_checkout(target_url, dup_wc)
original_wc = os.path.abspath(original_wc)
dup_wc = os.path.abspath(dup_wc)
os.chdir(dup_wc)
# Get SVN info
svn_info = get_svn_info(original_wc)
# e.g. u'svn://svn.twistedmatrix.com/svn/Twisted'
repos_url = svn_info['repos_url']
# e.g. u'svn://svn.twistedmatrix.com/svn/Twisted/branches/xmpp'
svn_url = svn_info['url']
assert svn_url.startswith(repos_url)
# e.g. u'/branches/xmpp'
svn_path = svn_url[len(repos_url):]
# e.g. 'xmpp'
svn_branch = svn_url.split("/")[-1]
if options.cont_from_break:
svn_rev = svn_info['revision'] - 1
if svn_rev < 1:
svn_rev = 1
# Load SVN log starting from svn_rev + 1
it_log_entries = iter_svn_log_entries(svn_url, svn_rev + 1, greatest_rev)
try:
for log_entry in it_log_entries:
pull_svn_rev(log_entry, svn_url, target_url, svn_path,
original_wc, keep_author)
except KeyboardInterrupt:
print "\nStopped by user."
run_svn(["cleanup"])
run_svn(["revert", "--recursive", "."])
except:
print "\nCommand failed with following error:\n"
traceback.print_exc()
run_svn(["cleanup"])
run_svn(["revert", "--recursive", "."])
finally:
run_svn(["up"])
print "\nFinished!"
if __name__ == "__main__":
main()
| display_error | identifier_name |
MySVN.py | #!/usr/bin/env python
"""
svn2svn.py
Replicate changesets from one SVN repository to another,
includes diffs, comments, and Dates of each revision.
It's also possible to retain the Author info if the Target SVN URL
is in a local filesystem (ie, running svn2svn.py on Target SVN server),
or if Target SVN URL is managed through ssh tunnel.
In later case, please run 'ssh-add' (adds RSA or DSA identities to
the authentication agent) before invoking svn2svn.py.
For example (in Unix environment):
$ exec /usr/bin/ssh-agent $SHELL
$ /usr/bin/ssh-add
Enter passphrase for /home/user/.ssh/id_dsa:
Identity added: /home/user/.ssh/id_dsa (/home/user/.ssh/id_dsa)
$ python ./svn2svn.py -a SOURCE TARGET
Written and used on Ubuntu 7.04 (Feisty Fawn).
Provided as-is and absolutely no warranty - aka Don't bet your life on it.
This tool re-used some modules from svnclient.py on project hgsvn
(a tool can create Mercurial repository from SVN repository):
http://cheeseshop.python.org/pypi/hgsvn
License: GPLv2, the same as hgsvn.
version 0.1.1; Jul 31, 2007; simford dot dong at gmail dot com
"""
import os
import sys
import time
import locale
import shutil
import select
import calendar
import traceback
from optparse import OptionParser
from subprocess import Popen, PIPE
from datetime import datetime
try:
from xml.etree import cElementTree as ET
except ImportError:
try:
from xml.etree import ElementTree as ET
except ImportError:
try:
import cElementTree as ET
except ImportError:
from elementtree import ElementTree as ET
svn_log_args = ['log', '--xml', '-v']
svn_info_args = ['info', '--xml']
svn_checkout_args = ['checkout', '-q']
svn_status_args = ['status', '--xml', '-v', '--ignore-externals']
# define exception class
class ExternalCommandFailed(RuntimeError):
"""
An external command failed.
"""
class ParameterError(RuntimeError):
"""
An external command failed.
"""
def display_error(message, raise_exception = True):
"""
Display error message, then terminate.
"""
print "Error:", message
print
if raise_exception:
raise ExternalCommandFailed
else:
sys.exit(1)
# Windows compatibility code by Bill Baxter
if os.name == "nt":
def find_program(name):
"""
Find the name of the program for Popen.
Windows is finnicky about having the complete file name. Popen
won't search the %PATH% for you automatically.
(Adapted from ctypes.find_library)
"""
# See MSDN for the REAL search order.
base, ext = os.path.splitext(name)
if ext:
exts = [ext]
else:
exts = ['.bat', '.exe']
for directory in os.environ['PATH'].split(os.pathsep):
for e in exts:
fname = os.path.join(directory, base + e)
if os.path.exists(fname):
return fname
return None
else:
def find_program(name):
"""
Find the name of the program for Popen.
On Unix, popen isn't picky about having absolute paths.
"""
return name
def shell_quote(s):
if os.name == "nt":
q = '"'
else:
q = "'"
return q + s.replace('\\', '\\\\').replace("'", "'\"'\"'") + q
locale_encoding = locale.getpreferredencoding()
def run_svn(args, fail_if_stderr=False, encoding="utf-8"):
"""
Run svn cmd in PIPE
exit if svn cmd failed
"""
def _transform_arg(a):
if isinstance(a, unicode):
a = a.encode(encoding or locale_encoding)
elif not isinstance(a, str):
a = str(a)
return a
t_args = map(_transform_arg, args)
cmd = find_program("svn")
cmd_string = str(" ".join(map(shell_quote, [cmd] + t_args)))
print "*", cmd_string
pipe = Popen([cmd] + t_args, executable=cmd, stdout=PIPE, stderr=PIPE)
out, err = pipe.communicate()
if pipe.returncode != 0 or (fail_if_stderr and err.strip()):
display_error("External program failed (return code %d): %s\n%s"
% (pipe.returncode, cmd_string, err))
return out
def svn_date_to_timestamp(svn_date):
"""
Parse an SVN date as read from the XML output and
return the corresponding timestamp.
"""
# Strip microseconds and timezone (always UTC, hopefully)
# XXX there are various ISO datetime parsing routines out there,
# cf. http://seehuhn.de/comp/pdate
date = svn_date.split('.', 2)[0]
time_tuple = time.strptime(date, "%Y-%m-%dT%H:%M:%S")
return calendar.timegm(time_tuple)
def parse_svn_info_xml(xml_string):
"""
Parse the XML output from an "svn info" command and extract
useful information as a dict.
"""
d = {}
tree = ET.fromstring(xml_string)
entry = tree.find('.//entry')
if entry:
d['url'] = entry.find('url').text
d['revision'] = int(entry.get('revision'))
d['repos_url'] = tree.find('.//repository/root').text
d['last_changed_rev'] = int(tree.find('.//commit').get('revision'))
d['kind'] = entry.get('kind')
return d
def parse_svn_log_xml(xml_string):
"""
Parse the XML output from an "svn log" command and extract
useful information as a list of dicts (one per log changeset).
"""
l = []
tree = ET.fromstring(xml_string)
for entry in tree.findall('logentry'):
d = {}
d['revision'] = int(entry.get('revision'))
# Some revisions don't have authors, most notably
# the first revision in a repository.
author = entry.find('author')
d['author'] = author is not None and author.text or None
d['date'] = svn_date_to_timestamp(entry.find('date').text)
# Some revisions may have empty commit message
message = entry.find('msg')
message = message is not None and message.text is not None \
and message.text.strip() or ""
# Replace DOS return '\r\n' and MacOS return '\r' with unix return '\n'
d['message'] = message.replace('\r\n', '\n').replace('\n\r', '\n'). \
replace('\r', '\n')
paths = d['changed_paths'] = []
for path in entry.findall('.//path'):
copyfrom_rev = path.get('copyfrom-rev')
if copyfrom_rev:
copyfrom_rev = int(copyfrom_rev)
paths.append({
'path': path.text,
'action': path.get('action'),
'copyfrom_path': path.get('copyfrom-path'),
'copyfrom_revision': copyfrom_rev,
})
l.append(d)
return l
def parse_svn_status_xml(xml_string, base_dir=None):
"""
Parse the XML output from an "svn status" command and extract
useful info as a list of dicts (one per status entry).
"""
l = []
tree = ET.fromstring(xml_string)
for entry in tree.findall('.//entry'):
d = {}
path = entry.get('path')
if base_dir is not None:
assert path.startswith(base_dir)
path = path[len(base_dir):].lstrip('/\\')
d['path'] = path
wc_status = entry.find('wc-status')
if wc_status.get('item') == 'external':
d['type'] = 'external'
elif wc_status.get('revision') is not None:
d['type'] = 'normal'
else:
d['type'] = 'unversioned'
l.append(d)
return l
def get_svn_info(svn_url_or_wc, rev_number=None):
"""
Get SVN information for the given URL or working copy,
with an optionally specified revision number.
Returns a dict as created by parse_svn_info_xml().
"""
if rev_number is not None:
args = [svn_url_or_wc + "@" + str(rev_number)]
else:
args = [svn_url_or_wc]
xml_string = run_svn(svn_info_args + args,
fail_if_stderr=True)
return parse_svn_info_xml(xml_string)
def svn_checkout(svn_url, checkout_dir, rev_number=None):
"""
Checkout the given URL at an optional revision number.
"""
args = []
if rev_number is not None:
args += ['-r', rev_number]
args += [svn_url, checkout_dir]
return run_svn(svn_checkout_args + args)
def run_svn_log(svn_url_or_wc, rev_start, rev_end, limit, stop_on_copy=False):
"""
Fetch up to 'limit' SVN log entries between the given revisions.
"""
if stop_on_copy:
args = ['--stop-on-copy']
else:
args = []
args += ['-r', '%s:%s' % (rev_start, rev_end), '--limit',
str(limit), svn_url_or_wc]
xml_string = run_svn(svn_log_args + args)
return parse_svn_log_xml(xml_string)
def get_svn_status(svn_wc):
"""
Get SVN status information about the given working copy.
"""
# Ensure proper stripping by canonicalizing the path
svn_wc = os.path.abspath(svn_wc)
args = [svn_wc]
xml_string = run_svn(svn_status_args + args)
return parse_svn_status_xml(xml_string, svn_wc)
def get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=False):
"""
Get the first SVN log entry in the requested revision range.
"""
entries = run_svn_log(svn_url, rev_start, rev_end, 1, stop_on_copy)
if not entries:
display_error("No SVN log for %s between revisions %s and %s" %
(svn_url, rev_start, rev_end))
return entries[0]
def get_first_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the first log entry after/at the given revision number in an SVN branch.
By default the revision number is set to 0, which will give you the log
entry corresponding to the branch creaction.
NOTE: to know whether the branch creation corresponds to an SVN import or
a copy from another branch, inspect elements of the 'changed_paths' entry
in the returned dictionary.
"""
return get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=True)
def get_last_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the last log entry before/at the given revision number in an SVN branch.
By default the revision number is set to HEAD, which will give you the log
entry corresponding to the latest commit in branch.
"""
return get_one_svn_log_entry(svn_url, rev_end, rev_start, stop_on_copy=True)
log_duration_threshold = 10.0
log_min_chunk_length = 10
def iter_svn_log_entries(svn_url, first_rev, last_rev):
"""
Iterate over SVN log entries between first_rev and last_rev.
This function features chunked log fetching so that it isn't too nasty
to the SVN server if many entries are requested.
"""
cur_rev = first_rev
chunk_length = log_min_chunk_length
chunk_interval_factor = 1.0
while last_rev == "HEAD" or cur_rev <= last_rev:
start_t = time.time()
stop_rev = min(last_rev, cur_rev + int(chunk_length * chunk_interval_factor))
entries = run_svn_log(svn_url, cur_rev, stop_rev, chunk_length)
duration = time.time() - start_t
if not entries:
if stop_rev == last_rev:
break
cur_rev = stop_rev + 1
chunk_interval_factor *= 2.0
continue
for e in entries:
yield e
cur_rev = e['revision'] + 1
# Adapt chunk length based on measured request duration
if duration < log_duration_threshold:
chunk_length = int(chunk_length * 2.0)
elif duration > log_duration_threshold * 2:
chunk_length = max(log_min_chunk_length, int(chunk_length / 2.0))
def commit_from_svn_log_entry(entry, files=None, keep_author=False):
"""
Given an SVN log entry and an optional sequence of files, do an svn commit.
"""
# This will use the local timezone for displaying commit times
timestamp = int(entry['date'])
svn_date = str(datetime.fromtimestamp(timestamp))
# Uncomment this one one if you prefer UTC commit times
#svn_date = "%d 0" % timestamp
if keep_author:
options = ["ci", "--force-log", "-m", entry['message'] + "\nDate: " + svn_date, "--username", entry['author']]
else:
options = ["ci", "--force-log", "-m", entry['message'] + "\nDate: " + svn_date + "\nAuthor: " + entry['author']]
if files:
options += list(files)
run_svn(options)
def svn_add_dir(p):
# set p = "." when p = ""
#p = p.strip() or "."
if p.strip() and not os.path.exists(p + os.sep + ".svn"):
svn_add_dir(os.path.dirname(p))
if not os.path.exists(p):
os.makedirs(p)
run_svn(["add", p])
def pull_svn_rev(log_entry, svn_url, target_url, svn_path, original_wc, keep_author=False):
"""
Pull SVN changes from the given log entry.
Returns the new SVN revision.
If an exception occurs, it will rollback to revision 'svn_rev - 1'.
"""
svn_rev = log_entry['revision']
run_svn(["up", "--ignore-externals", "-r", svn_rev, original_wc])
removed_paths = []
merged_paths = []
unrelated_paths = []
commit_paths = []
for d in log_entry['changed_paths']:
# e.g. u'/branches/xmpp/twisted/words/test/test.py'
p = d['path']
if not p.startswith(svn_path + "/"):
# Ignore changed files that are not part of this subdir
if p != svn_path:
unrelated_paths.append(p)
continue
# e.g. u'twisted/words/test/test.py'
p = p[len(svn_path):].strip("/")
# Record for commit
action = d['action']
if action not in 'MARD':
display_error("In SVN rev. %d: action '%s' not supported. \
Please report a bug!" % (svn_rev, action))
if len (commit_paths) < 100:
commit_paths.append(p)
# Detect special cases
old_p = d['copyfrom_path']
if old_p and old_p.startswith(svn_path + "/"):
old_p = old_p[len(svn_path):].strip("/")
# Both paths can be identical if copied from an old rev.
# We treat like it a normal change.
if old_p != p:
if not os.path.exists(p + os.sep + '.svn'):
svn_add_dir(os.path.dirname(p))
run_svn(["up", old_p])
run_svn(["copy", old_p, p])
if os.path.isfile(p):
shutil.copy(original_wc + os.sep + p, p)
if action == 'R':
removed_paths.append(old_p)
if len (commit_paths) < 100:
commit_paths.append(old_p)
continue
if action == 'A':
if os.path.isdir(original_wc + os.sep + p):
svn_add_dir(p)
else:
p_path = os.path.dirname(p).strip() or '.'
svn_add_dir(p_path)
shutil.copy(original_wc + os.sep + p, p)
run_svn(["add", p])
elif action == 'D':
removed_paths.append(p)
else: # action == 'M'
merged_paths.append(p)
if removed_paths:
for r in removed_paths:
run_svn(["up", r])
run_svn(["remove", "--force", r])
if merged_paths:
for m in merged_paths:
run_svn(["up", m])
m_url = svn_url + "/" + m
out = run_svn(["merge", "-c", str(svn_rev), "--non-recursive",
m_url+"@"+str(svn_rev), m])
# if conflicts, use the copy from original_wc
if out and out.split()[0] == 'C':
print "\n### Conflicts ignored: %s, in revision: %s\n" \
% (m, svn_rev)
run_svn(["revert", "--recursive", m])
if os.path.isfile(m):
shutil.copy(original_wc + os.sep + m, m)
if unrelated_paths:
print "Unrelated paths: "
print "*", unrelated_paths
## too many files
if len (commit_paths) > 99:
commit_paths = []
try:
commit_from_svn_log_entry(log_entry, commit_paths,
keep_author=keep_author)
except ExternalCommandFailed:
# try to ignore the Properties conflicts on files and dirs
# use the copy from original_wc
has_Conflict = False
for d in log_entry['changed_paths']:
p = d['path']
p = p[len(svn_path):].strip("/")
if os.path.isfile(p):
if os.path.isfile(p + ".prej"):
has_Conflict = True
shutil.copy(original_wc + os.sep + p, p)
p2=os.sep + p.replace('_', '__').replace('/', '_') \
+ ".prej-" + str(svn_rev)
shutil.move(p + ".prej", os.path.dirname(original_wc) + p2)
w="\n### Properties conflicts ignored:"
print "%s %s, in revision: %s\n" % (w, p, svn_rev)
elif os.path.isdir(p):
if os.path.isfile(p + os.sep + "dir_conflicts.prej"):
has_Conflict = True
p2=os.sep + p.replace('_', '__').replace('/', '_') \
+ "_dir__conflicts.prej-" + str(svn_rev)
shutil.move(p + os.sep + "dir_conflicts.prej",
os.path.dirname(original_wc) + p2)
w="\n### Properties conflicts ignored:"
print "%s %s, in revision: %s\n" % (w, p, svn_rev)
out = run_svn(["propget", "svn:ignore",
original_wc + os.sep + p])
if out:
run_svn(["propset", "svn:ignore", out.strip(), p])
out = run_svn(["propget", "svn:externel",
original_wc + os.sep + p])
if out:
run_svn(["propset", "svn:external", out.strip(), p])
# try again
if has_Conflict:
commit_from_svn_log_entry(log_entry, commit_paths,
keep_author=keep_author)
else:
raise ExternalCommandFailed
def main():
usage = "Usage: %prog [-a] [-c] [-r SVN rev] <Source SVN URL> <Target SVN URL>"
parser = OptionParser(usage)
parser.add_option("-a", "--keep-author", action="store_true",
dest="keep_author", help="Keep revision Author or not")
parser.add_option("-c", "--continue-from-break", action="store_true",
dest="cont_from_break",
help="Continue from previous break")
parser.add_option("-r", "--svn-rev", type="int", dest="svn_rev",
help="SVN revision to checkout from")
(options, args) = parser.parse_args()
if len(args) != 2:
display_error("incorrect number of arguments\n\nTry: svn2svn.py --help",
False)
source_url = args.pop(0).rstrip("/")
target_url = args.pop(0).rstrip("/")
if options.keep_author:
keep_author = True
else:
keep_author = False
# Find the greatest_rev
# don't use 'svn info' to get greatest_rev, it doesn't work sometimes
svn_log = get_one_svn_log_entry(source_url, "HEAD", "HEAD")
greatest_rev = svn_log['revision']
original_wc = "_original_wc"
dup_wc = "_dup_wc"
## old working copy does not exist, disable continue mode
if not os.path.exists(dup_wc):
options.cont_from_break = False
if not options.cont_from_break:
# Warn if Target SVN URL existed
cmd = find_program("svn")
pipe = Popen([cmd] + ["list"] + [target_url], executable=cmd,
stdout=PIPE, stderr=PIPE)
out, err = pipe.communicate()
if pipe.returncode == 0:
print "Target SVN URL: %s existed!" % target_url
if out:
print out
print "Press 'Enter' to Continue, 'Ctrl + C' to Cancel..."
print "(Timeout in 5 seconds)"
rfds, wfds, efds = select.select([sys.stdin], [], [], 5)
# Get log entry for the SVN revision we will check out
if options.svn_rev:
# If specify a rev, get log entry just before or at rev
svn_start_log = get_last_svn_log_entry(source_url, 1,
options.svn_rev)
else:
# Otherwise, get log entry of branch creation
svn_start_log = get_first_svn_log_entry(source_url, 1,
greatest_rev)
# This is the revision we will checkout from
svn_rev = svn_start_log['revision']
# Check out first revision (changeset) from Source SVN URL
if os.path.exists(original_wc):
shutil.rmtree(original_wc)
svn_checkout(source_url, original_wc, svn_rev)
# Import first revision (changeset) into Target SVN URL
timestamp = int(svn_start_log['date'])
svn_date = str(datetime.fromtimestamp(timestamp))
if keep_author:
run_svn(["import", original_wc, target_url, "-m",
svn_start_log['message'] + "\nDate: " + svn_date,
"--username", svn_start_log['author']])
else:
run_svn(["import", original_wc, target_url, "-m",
svn_start_log['message'] + "\nDate: " + svn_date +
"\nAuthor: " + svn_start_log['author']])
# Check out a working copy
if os.path.exists(dup_wc):
shutil.rmtree(dup_wc)
svn_checkout(target_url, dup_wc)
original_wc = os.path.abspath(original_wc)
dup_wc = os.path.abspath(dup_wc)
os.chdir(dup_wc)
# Get SVN info
svn_info = get_svn_info(original_wc)
# e.g. u'svn://svn.twistedmatrix.com/svn/Twisted'
repos_url = svn_info['repos_url']
# e.g. u'svn://svn.twistedmatrix.com/svn/Twisted/branches/xmpp'
svn_url = svn_info['url']
assert svn_url.startswith(repos_url)
# e.g. u'/branches/xmpp'
svn_path = svn_url[len(repos_url):]
# e.g. 'xmpp'
svn_branch = svn_url.split("/")[-1]
if options.cont_from_break:
svn_rev = svn_info['revision'] - 1
if svn_rev < 1:
svn_rev = 1
# Load SVN log starting from svn_rev + 1
it_log_entries = iter_svn_log_entries(svn_url, svn_rev + 1, greatest_rev)
try:
for log_entry in it_log_entries:
|
except KeyboardInterrupt:
print "\nStopped by user."
run_svn(["cleanup"])
run_svn(["revert", "--recursive", "."])
except:
print "\nCommand failed with following error:\n"
traceback.print_exc()
run_svn(["cleanup"])
run_svn(["revert", "--recursive", "."])
finally:
run_svn(["up"])
print "\nFinished!"
if __name__ == "__main__":
main()
| pull_svn_rev(log_entry, svn_url, target_url, svn_path,
original_wc, keep_author) | conditional_block |
MySVN.py | #!/usr/bin/env python
"""
svn2svn.py
Replicate changesets from one SVN repository to another,
includes diffs, comments, and Dates of each revision.
It's also possible to retain the Author info if the Target SVN URL
is in a local filesystem (ie, running svn2svn.py on Target SVN server),
or if Target SVN URL is managed through ssh tunnel.
In later case, please run 'ssh-add' (adds RSA or DSA identities to
the authentication agent) before invoking svn2svn.py.
For example (in Unix environment):
$ exec /usr/bin/ssh-agent $SHELL
$ /usr/bin/ssh-add
Enter passphrase for /home/user/.ssh/id_dsa:
Identity added: /home/user/.ssh/id_dsa (/home/user/.ssh/id_dsa)
$ python ./svn2svn.py -a SOURCE TARGET
Written and used on Ubuntu 7.04 (Feisty Fawn).
Provided as-is and absolutely no warranty - aka Don't bet your life on it.
This tool re-used some modules from svnclient.py on project hgsvn
(a tool can create Mercurial repository from SVN repository):
http://cheeseshop.python.org/pypi/hgsvn
License: GPLv2, the same as hgsvn.
version 0.1.1; Jul 31, 2007; simford dot dong at gmail dot com
"""
import os
import sys
import time
import locale
import shutil
import select
import calendar
import traceback
from optparse import OptionParser
from subprocess import Popen, PIPE
from datetime import datetime
try:
from xml.etree import cElementTree as ET
except ImportError:
try:
from xml.etree import ElementTree as ET
except ImportError:
try:
import cElementTree as ET
except ImportError:
from elementtree import ElementTree as ET
svn_log_args = ['log', '--xml', '-v']
svn_info_args = ['info', '--xml']
svn_checkout_args = ['checkout', '-q']
svn_status_args = ['status', '--xml', '-v', '--ignore-externals']
# define exception class
class ExternalCommandFailed(RuntimeError):
"""
An external command failed.
"""
class ParameterError(RuntimeError):
"""
An external command failed.
"""
def display_error(message, raise_exception = True):
"""
Display error message, then terminate.
"""
print "Error:", message
print
if raise_exception:
raise ExternalCommandFailed
else:
sys.exit(1)
# Windows compatibility code by Bill Baxter
if os.name == "nt":
def find_program(name):
"""
Find the name of the program for Popen.
Windows is finnicky about having the complete file name. Popen
won't search the %PATH% for you automatically.
(Adapted from ctypes.find_library)
"""
# See MSDN for the REAL search order.
base, ext = os.path.splitext(name)
if ext:
exts = [ext]
else:
exts = ['.bat', '.exe']
for directory in os.environ['PATH'].split(os.pathsep):
for e in exts:
fname = os.path.join(directory, base + e)
if os.path.exists(fname):
return fname
return None
else:
def find_program(name):
"""
Find the name of the program for Popen.
On Unix, popen isn't picky about having absolute paths.
"""
return name
def shell_quote(s):
|
locale_encoding = locale.getpreferredencoding()
def run_svn(args, fail_if_stderr=False, encoding="utf-8"):
"""
Run svn cmd in PIPE
exit if svn cmd failed
"""
def _transform_arg(a):
if isinstance(a, unicode):
a = a.encode(encoding or locale_encoding)
elif not isinstance(a, str):
a = str(a)
return a
t_args = map(_transform_arg, args)
cmd = find_program("svn")
cmd_string = str(" ".join(map(shell_quote, [cmd] + t_args)))
print "*", cmd_string
pipe = Popen([cmd] + t_args, executable=cmd, stdout=PIPE, stderr=PIPE)
out, err = pipe.communicate()
if pipe.returncode != 0 or (fail_if_stderr and err.strip()):
display_error("External program failed (return code %d): %s\n%s"
% (pipe.returncode, cmd_string, err))
return out
def svn_date_to_timestamp(svn_date):
"""
Parse an SVN date as read from the XML output and
return the corresponding timestamp.
"""
# Strip microseconds and timezone (always UTC, hopefully)
# XXX there are various ISO datetime parsing routines out there,
# cf. http://seehuhn.de/comp/pdate
date = svn_date.split('.', 2)[0]
time_tuple = time.strptime(date, "%Y-%m-%dT%H:%M:%S")
return calendar.timegm(time_tuple)
def parse_svn_info_xml(xml_string):
"""
Parse the XML output from an "svn info" command and extract
useful information as a dict.
"""
d = {}
tree = ET.fromstring(xml_string)
entry = tree.find('.//entry')
if entry:
d['url'] = entry.find('url').text
d['revision'] = int(entry.get('revision'))
d['repos_url'] = tree.find('.//repository/root').text
d['last_changed_rev'] = int(tree.find('.//commit').get('revision'))
d['kind'] = entry.get('kind')
return d
def parse_svn_log_xml(xml_string):
"""
Parse the XML output from an "svn log" command and extract
useful information as a list of dicts (one per log changeset).
"""
l = []
tree = ET.fromstring(xml_string)
for entry in tree.findall('logentry'):
d = {}
d['revision'] = int(entry.get('revision'))
# Some revisions don't have authors, most notably
# the first revision in a repository.
author = entry.find('author')
d['author'] = author is not None and author.text or None
d['date'] = svn_date_to_timestamp(entry.find('date').text)
# Some revisions may have empty commit message
message = entry.find('msg')
message = message is not None and message.text is not None \
and message.text.strip() or ""
# Replace DOS return '\r\n' and MacOS return '\r' with unix return '\n'
d['message'] = message.replace('\r\n', '\n').replace('\n\r', '\n'). \
replace('\r', '\n')
paths = d['changed_paths'] = []
for path in entry.findall('.//path'):
copyfrom_rev = path.get('copyfrom-rev')
if copyfrom_rev:
copyfrom_rev = int(copyfrom_rev)
paths.append({
'path': path.text,
'action': path.get('action'),
'copyfrom_path': path.get('copyfrom-path'),
'copyfrom_revision': copyfrom_rev,
})
l.append(d)
return l
def parse_svn_status_xml(xml_string, base_dir=None):
"""
Parse the XML output from an "svn status" command and extract
useful info as a list of dicts (one per status entry).
"""
l = []
tree = ET.fromstring(xml_string)
for entry in tree.findall('.//entry'):
d = {}
path = entry.get('path')
if base_dir is not None:
assert path.startswith(base_dir)
path = path[len(base_dir):].lstrip('/\\')
d['path'] = path
wc_status = entry.find('wc-status')
if wc_status.get('item') == 'external':
d['type'] = 'external'
elif wc_status.get('revision') is not None:
d['type'] = 'normal'
else:
d['type'] = 'unversioned'
l.append(d)
return l
def get_svn_info(svn_url_or_wc, rev_number=None):
"""
Get SVN information for the given URL or working copy,
with an optionally specified revision number.
Returns a dict as created by parse_svn_info_xml().
"""
if rev_number is not None:
args = [svn_url_or_wc + "@" + str(rev_number)]
else:
args = [svn_url_or_wc]
xml_string = run_svn(svn_info_args + args,
fail_if_stderr=True)
return parse_svn_info_xml(xml_string)
def svn_checkout(svn_url, checkout_dir, rev_number=None):
"""
Checkout the given URL at an optional revision number.
"""
args = []
if rev_number is not None:
args += ['-r', rev_number]
args += [svn_url, checkout_dir]
return run_svn(svn_checkout_args + args)
def run_svn_log(svn_url_or_wc, rev_start, rev_end, limit, stop_on_copy=False):
"""
Fetch up to 'limit' SVN log entries between the given revisions.
"""
if stop_on_copy:
args = ['--stop-on-copy']
else:
args = []
args += ['-r', '%s:%s' % (rev_start, rev_end), '--limit',
str(limit), svn_url_or_wc]
xml_string = run_svn(svn_log_args + args)
return parse_svn_log_xml(xml_string)
def get_svn_status(svn_wc):
"""
Get SVN status information about the given working copy.
"""
# Ensure proper stripping by canonicalizing the path
svn_wc = os.path.abspath(svn_wc)
args = [svn_wc]
xml_string = run_svn(svn_status_args + args)
return parse_svn_status_xml(xml_string, svn_wc)
def get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=False):
"""
Get the first SVN log entry in the requested revision range.
"""
entries = run_svn_log(svn_url, rev_start, rev_end, 1, stop_on_copy)
if not entries:
display_error("No SVN log for %s between revisions %s and %s" %
(svn_url, rev_start, rev_end))
return entries[0]
def get_first_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the first log entry after/at the given revision number in an SVN branch.
By default the revision number is set to 0, which will give you the log
entry corresponding to the branch creaction.
NOTE: to know whether the branch creation corresponds to an SVN import or
a copy from another branch, inspect elements of the 'changed_paths' entry
in the returned dictionary.
"""
return get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=True)
def get_last_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the last log entry before/at the given revision number in an SVN branch.
By default the revision number is set to HEAD, which will give you the log
entry corresponding to the latest commit in branch.
"""
return get_one_svn_log_entry(svn_url, rev_end, rev_start, stop_on_copy=True)
log_duration_threshold = 10.0
log_min_chunk_length = 10
def iter_svn_log_entries(svn_url, first_rev, last_rev):
"""
Iterate over SVN log entries between first_rev and last_rev.
This function features chunked log fetching so that it isn't too nasty
to the SVN server if many entries are requested.
"""
cur_rev = first_rev
chunk_length = log_min_chunk_length
chunk_interval_factor = 1.0
while last_rev == "HEAD" or cur_rev <= last_rev:
start_t = time.time()
stop_rev = min(last_rev, cur_rev + int(chunk_length * chunk_interval_factor))
entries = run_svn_log(svn_url, cur_rev, stop_rev, chunk_length)
duration = time.time() - start_t
if not entries:
if stop_rev == last_rev:
break
cur_rev = stop_rev + 1
chunk_interval_factor *= 2.0
continue
for e in entries:
yield e
cur_rev = e['revision'] + 1
# Adapt chunk length based on measured request duration
if duration < log_duration_threshold:
chunk_length = int(chunk_length * 2.0)
elif duration > log_duration_threshold * 2:
chunk_length = max(log_min_chunk_length, int(chunk_length / 2.0))
def commit_from_svn_log_entry(entry, files=None, keep_author=False):
"""
Given an SVN log entry and an optional sequence of files, do an svn commit.
"""
# This will use the local timezone for displaying commit times
timestamp = int(entry['date'])
svn_date = str(datetime.fromtimestamp(timestamp))
# Uncomment this one one if you prefer UTC commit times
#svn_date = "%d 0" % timestamp
if keep_author:
options = ["ci", "--force-log", "-m", entry['message'] + "\nDate: " + svn_date, "--username", entry['author']]
else:
options = ["ci", "--force-log", "-m", entry['message'] + "\nDate: " + svn_date + "\nAuthor: " + entry['author']]
if files:
options += list(files)
run_svn(options)
def svn_add_dir(p):
# set p = "." when p = ""
#p = p.strip() or "."
if p.strip() and not os.path.exists(p + os.sep + ".svn"):
svn_add_dir(os.path.dirname(p))
if not os.path.exists(p):
os.makedirs(p)
run_svn(["add", p])
def pull_svn_rev(log_entry, svn_url, target_url, svn_path, original_wc, keep_author=False):
"""
Pull SVN changes from the given log entry.
Returns the new SVN revision.
If an exception occurs, it will rollback to revision 'svn_rev - 1'.
"""
svn_rev = log_entry['revision']
run_svn(["up", "--ignore-externals", "-r", svn_rev, original_wc])
removed_paths = []
merged_paths = []
unrelated_paths = []
commit_paths = []
for d in log_entry['changed_paths']:
# e.g. u'/branches/xmpp/twisted/words/test/test.py'
p = d['path']
if not p.startswith(svn_path + "/"):
# Ignore changed files that are not part of this subdir
if p != svn_path:
unrelated_paths.append(p)
continue
# e.g. u'twisted/words/test/test.py'
p = p[len(svn_path):].strip("/")
# Record for commit
action = d['action']
if action not in 'MARD':
display_error("In SVN rev. %d: action '%s' not supported. \
Please report a bug!" % (svn_rev, action))
if len (commit_paths) < 100:
commit_paths.append(p)
# Detect special cases
old_p = d['copyfrom_path']
if old_p and old_p.startswith(svn_path + "/"):
old_p = old_p[len(svn_path):].strip("/")
# Both paths can be identical if copied from an old rev.
# We treat like it a normal change.
if old_p != p:
if not os.path.exists(p + os.sep + '.svn'):
svn_add_dir(os.path.dirname(p))
run_svn(["up", old_p])
run_svn(["copy", old_p, p])
if os.path.isfile(p):
shutil.copy(original_wc + os.sep + p, p)
if action == 'R':
removed_paths.append(old_p)
if len (commit_paths) < 100:
commit_paths.append(old_p)
continue
if action == 'A':
if os.path.isdir(original_wc + os.sep + p):
svn_add_dir(p)
else:
p_path = os.path.dirname(p).strip() or '.'
svn_add_dir(p_path)
shutil.copy(original_wc + os.sep + p, p)
run_svn(["add", p])
elif action == 'D':
removed_paths.append(p)
else: # action == 'M'
merged_paths.append(p)
if removed_paths:
for r in removed_paths:
run_svn(["up", r])
run_svn(["remove", "--force", r])
if merged_paths:
for m in merged_paths:
run_svn(["up", m])
m_url = svn_url + "/" + m
out = run_svn(["merge", "-c", str(svn_rev), "--non-recursive",
m_url+"@"+str(svn_rev), m])
# if conflicts, use the copy from original_wc
if out and out.split()[0] == 'C':
print "\n### Conflicts ignored: %s, in revision: %s\n" \
% (m, svn_rev)
run_svn(["revert", "--recursive", m])
if os.path.isfile(m):
shutil.copy(original_wc + os.sep + m, m)
if unrelated_paths:
print "Unrelated paths: "
print "*", unrelated_paths
## too many files
if len (commit_paths) > 99:
commit_paths = []
try:
commit_from_svn_log_entry(log_entry, commit_paths,
keep_author=keep_author)
except ExternalCommandFailed:
# try to ignore the Properties conflicts on files and dirs
# use the copy from original_wc
has_Conflict = False
for d in log_entry['changed_paths']:
p = d['path']
p = p[len(svn_path):].strip("/")
if os.path.isfile(p):
if os.path.isfile(p + ".prej"):
has_Conflict = True
shutil.copy(original_wc + os.sep + p, p)
p2=os.sep + p.replace('_', '__').replace('/', '_') \
+ ".prej-" + str(svn_rev)
shutil.move(p + ".prej", os.path.dirname(original_wc) + p2)
w="\n### Properties conflicts ignored:"
print "%s %s, in revision: %s\n" % (w, p, svn_rev)
elif os.path.isdir(p):
if os.path.isfile(p + os.sep + "dir_conflicts.prej"):
has_Conflict = True
p2=os.sep + p.replace('_', '__').replace('/', '_') \
+ "_dir__conflicts.prej-" + str(svn_rev)
shutil.move(p + os.sep + "dir_conflicts.prej",
os.path.dirname(original_wc) + p2)
w="\n### Properties conflicts ignored:"
print "%s %s, in revision: %s\n" % (w, p, svn_rev)
out = run_svn(["propget", "svn:ignore",
original_wc + os.sep + p])
if out:
run_svn(["propset", "svn:ignore", out.strip(), p])
out = run_svn(["propget", "svn:externel",
original_wc + os.sep + p])
if out:
run_svn(["propset", "svn:external", out.strip(), p])
# try again
if has_Conflict:
commit_from_svn_log_entry(log_entry, commit_paths,
keep_author=keep_author)
else:
raise ExternalCommandFailed
def main():
usage = "Usage: %prog [-a] [-c] [-r SVN rev] <Source SVN URL> <Target SVN URL>"
parser = OptionParser(usage)
parser.add_option("-a", "--keep-author", action="store_true",
dest="keep_author", help="Keep revision Author or not")
parser.add_option("-c", "--continue-from-break", action="store_true",
dest="cont_from_break",
help="Continue from previous break")
parser.add_option("-r", "--svn-rev", type="int", dest="svn_rev",
help="SVN revision to checkout from")
(options, args) = parser.parse_args()
if len(args) != 2:
display_error("incorrect number of arguments\n\nTry: svn2svn.py --help",
False)
source_url = args.pop(0).rstrip("/")
target_url = args.pop(0).rstrip("/")
if options.keep_author:
keep_author = True
else:
keep_author = False
# Find the greatest_rev
# don't use 'svn info' to get greatest_rev, it doesn't work sometimes
svn_log = get_one_svn_log_entry(source_url, "HEAD", "HEAD")
greatest_rev = svn_log['revision']
original_wc = "_original_wc"
dup_wc = "_dup_wc"
## old working copy does not exist, disable continue mode
if not os.path.exists(dup_wc):
options.cont_from_break = False
if not options.cont_from_break:
# Warn if Target SVN URL existed
cmd = find_program("svn")
pipe = Popen([cmd] + ["list"] + [target_url], executable=cmd,
stdout=PIPE, stderr=PIPE)
out, err = pipe.communicate()
if pipe.returncode == 0:
print "Target SVN URL: %s existed!" % target_url
if out:
print out
print "Press 'Enter' to Continue, 'Ctrl + C' to Cancel..."
print "(Timeout in 5 seconds)"
rfds, wfds, efds = select.select([sys.stdin], [], [], 5)
# Get log entry for the SVN revision we will check out
if options.svn_rev:
# If specify a rev, get log entry just before or at rev
svn_start_log = get_last_svn_log_entry(source_url, 1,
options.svn_rev)
else:
# Otherwise, get log entry of branch creation
svn_start_log = get_first_svn_log_entry(source_url, 1,
greatest_rev)
# This is the revision we will checkout from
svn_rev = svn_start_log['revision']
# Check out first revision (changeset) from Source SVN URL
if os.path.exists(original_wc):
shutil.rmtree(original_wc)
svn_checkout(source_url, original_wc, svn_rev)
# Import first revision (changeset) into Target SVN URL
timestamp = int(svn_start_log['date'])
svn_date = str(datetime.fromtimestamp(timestamp))
if keep_author:
run_svn(["import", original_wc, target_url, "-m",
svn_start_log['message'] + "\nDate: " + svn_date,
"--username", svn_start_log['author']])
else:
run_svn(["import", original_wc, target_url, "-m",
svn_start_log['message'] + "\nDate: " + svn_date +
"\nAuthor: " + svn_start_log['author']])
# Check out a working copy
if os.path.exists(dup_wc):
shutil.rmtree(dup_wc)
svn_checkout(target_url, dup_wc)
original_wc = os.path.abspath(original_wc)
dup_wc = os.path.abspath(dup_wc)
os.chdir(dup_wc)
# Get SVN info
svn_info = get_svn_info(original_wc)
# e.g. u'svn://svn.twistedmatrix.com/svn/Twisted'
repos_url = svn_info['repos_url']
# e.g. u'svn://svn.twistedmatrix.com/svn/Twisted/branches/xmpp'
svn_url = svn_info['url']
assert svn_url.startswith(repos_url)
# e.g. u'/branches/xmpp'
svn_path = svn_url[len(repos_url):]
# e.g. 'xmpp'
svn_branch = svn_url.split("/")[-1]
if options.cont_from_break:
svn_rev = svn_info['revision'] - 1
if svn_rev < 1:
svn_rev = 1
# Load SVN log starting from svn_rev + 1
it_log_entries = iter_svn_log_entries(svn_url, svn_rev + 1, greatest_rev)
try:
for log_entry in it_log_entries:
pull_svn_rev(log_entry, svn_url, target_url, svn_path,
original_wc, keep_author)
except KeyboardInterrupt:
print "\nStopped by user."
run_svn(["cleanup"])
run_svn(["revert", "--recursive", "."])
except:
print "\nCommand failed with following error:\n"
traceback.print_exc()
run_svn(["cleanup"])
run_svn(["revert", "--recursive", "."])
finally:
run_svn(["up"])
print "\nFinished!"
if __name__ == "__main__":
main()
| if os.name == "nt":
q = '"'
else:
q = "'"
return q + s.replace('\\', '\\\\').replace("'", "'\"'\"'") + q | identifier_body |
MySVN.py | #!/usr/bin/env python
"""
svn2svn.py
Replicate changesets from one SVN repository to another,
includes diffs, comments, and Dates of each revision.
It's also possible to retain the Author info if the Target SVN URL
is in a local filesystem (ie, running svn2svn.py on Target SVN server),
or if Target SVN URL is managed through ssh tunnel.
In later case, please run 'ssh-add' (adds RSA or DSA identities to
the authentication agent) before invoking svn2svn.py.
For example (in Unix environment):
$ exec /usr/bin/ssh-agent $SHELL
$ /usr/bin/ssh-add
Enter passphrase for /home/user/.ssh/id_dsa:
Identity added: /home/user/.ssh/id_dsa (/home/user/.ssh/id_dsa)
$ python ./svn2svn.py -a SOURCE TARGET
Written and used on Ubuntu 7.04 (Feisty Fawn).
Provided as-is and absolutely no warranty - aka Don't bet your life on it.
This tool re-used some modules from svnclient.py on project hgsvn
(a tool can create Mercurial repository from SVN repository):
http://cheeseshop.python.org/pypi/hgsvn
License: GPLv2, the same as hgsvn.
version 0.1.1; Jul 31, 2007; simford dot dong at gmail dot com
"""
import os
import sys
import time
import locale
import shutil
import select
import calendar
import traceback
from optparse import OptionParser
from subprocess import Popen, PIPE
from datetime import datetime
try:
from xml.etree import cElementTree as ET
except ImportError:
try:
from xml.etree import ElementTree as ET
except ImportError:
try:
import cElementTree as ET
except ImportError:
from elementtree import ElementTree as ET
svn_log_args = ['log', '--xml', '-v']
svn_info_args = ['info', '--xml']
svn_checkout_args = ['checkout', '-q']
svn_status_args = ['status', '--xml', '-v', '--ignore-externals']
# define exception class
class ExternalCommandFailed(RuntimeError):
"""
An external command failed.
"""
class ParameterError(RuntimeError):
"""
An external command failed.
"""
def display_error(message, raise_exception = True):
"""
Display error message, then terminate.
"""
print "Error:", message
print
if raise_exception:
raise ExternalCommandFailed
else:
sys.exit(1)
# Windows compatibility code by Bill Baxter
if os.name == "nt":
def find_program(name):
"""
Find the name of the program for Popen.
Windows is finnicky about having the complete file name. Popen
won't search the %PATH% for you automatically.
(Adapted from ctypes.find_library)
"""
# See MSDN for the REAL search order.
base, ext = os.path.splitext(name)
if ext:
exts = [ext]
else:
exts = ['.bat', '.exe']
for directory in os.environ['PATH'].split(os.pathsep):
for e in exts:
fname = os.path.join(directory, base + e)
if os.path.exists(fname):
return fname
return None
else:
def find_program(name):
"""
Find the name of the program for Popen.
On Unix, popen isn't picky about having absolute paths.
"""
return name
def shell_quote(s):
if os.name == "nt":
q = '"'
else:
q = "'"
return q + s.replace('\\', '\\\\').replace("'", "'\"'\"'") + q
locale_encoding = locale.getpreferredencoding()
def run_svn(args, fail_if_stderr=False, encoding="utf-8"):
"""
Run svn cmd in PIPE
exit if svn cmd failed
"""
def _transform_arg(a):
if isinstance(a, unicode):
a = a.encode(encoding or locale_encoding)
elif not isinstance(a, str):
a = str(a)
return a
t_args = map(_transform_arg, args)
cmd = find_program("svn")
cmd_string = str(" ".join(map(shell_quote, [cmd] + t_args)))
print "*", cmd_string
pipe = Popen([cmd] + t_args, executable=cmd, stdout=PIPE, stderr=PIPE)
out, err = pipe.communicate()
if pipe.returncode != 0 or (fail_if_stderr and err.strip()):
display_error("External program failed (return code %d): %s\n%s"
% (pipe.returncode, cmd_string, err))
return out
def svn_date_to_timestamp(svn_date):
"""
Parse an SVN date as read from the XML output and
return the corresponding timestamp.
"""
# Strip microseconds and timezone (always UTC, hopefully)
# XXX there are various ISO datetime parsing routines out there,
# cf. http://seehuhn.de/comp/pdate
date = svn_date.split('.', 2)[0]
time_tuple = time.strptime(date, "%Y-%m-%dT%H:%M:%S")
return calendar.timegm(time_tuple)
def parse_svn_info_xml(xml_string):
"""
Parse the XML output from an "svn info" command and extract
useful information as a dict.
"""
d = {}
tree = ET.fromstring(xml_string)
entry = tree.find('.//entry')
if entry:
d['url'] = entry.find('url').text
d['revision'] = int(entry.get('revision'))
d['repos_url'] = tree.find('.//repository/root').text
d['last_changed_rev'] = int(tree.find('.//commit').get('revision'))
d['kind'] = entry.get('kind')
return d
def parse_svn_log_xml(xml_string):
"""
Parse the XML output from an "svn log" command and extract
useful information as a list of dicts (one per log changeset).
"""
l = []
tree = ET.fromstring(xml_string)
for entry in tree.findall('logentry'):
d = {}
d['revision'] = int(entry.get('revision'))
# Some revisions don't have authors, most notably
# the first revision in a repository.
author = entry.find('author')
d['author'] = author is not None and author.text or None
d['date'] = svn_date_to_timestamp(entry.find('date').text)
# Some revisions may have empty commit message
message = entry.find('msg')
message = message is not None and message.text is not None \
and message.text.strip() or ""
# Replace DOS return '\r\n' and MacOS return '\r' with unix return '\n'
d['message'] = message.replace('\r\n', '\n').replace('\n\r', '\n'). \
replace('\r', '\n')
paths = d['changed_paths'] = []
for path in entry.findall('.//path'):
copyfrom_rev = path.get('copyfrom-rev')
if copyfrom_rev:
copyfrom_rev = int(copyfrom_rev)
paths.append({
'path': path.text,
'action': path.get('action'),
'copyfrom_path': path.get('copyfrom-path'),
'copyfrom_revision': copyfrom_rev,
})
l.append(d)
return l
def parse_svn_status_xml(xml_string, base_dir=None):
"""
Parse the XML output from an "svn status" command and extract
useful info as a list of dicts (one per status entry).
"""
l = []
tree = ET.fromstring(xml_string)
for entry in tree.findall('.//entry'):
d = {}
path = entry.get('path')
if base_dir is not None:
assert path.startswith(base_dir)
path = path[len(base_dir):].lstrip('/\\')
d['path'] = path
wc_status = entry.find('wc-status')
if wc_status.get('item') == 'external':
d['type'] = 'external'
elif wc_status.get('revision') is not None:
d['type'] = 'normal'
else:
d['type'] = 'unversioned'
l.append(d)
return l
def get_svn_info(svn_url_or_wc, rev_number=None):
"""
Get SVN information for the given URL or working copy,
with an optionally specified revision number.
Returns a dict as created by parse_svn_info_xml().
"""
if rev_number is not None:
args = [svn_url_or_wc + "@" + str(rev_number)]
else:
args = [svn_url_or_wc]
xml_string = run_svn(svn_info_args + args,
fail_if_stderr=True)
return parse_svn_info_xml(xml_string)
def svn_checkout(svn_url, checkout_dir, rev_number=None):
"""
Checkout the given URL at an optional revision number.
"""
args = []
if rev_number is not None:
args += ['-r', rev_number]
args += [svn_url, checkout_dir]
return run_svn(svn_checkout_args + args)
def run_svn_log(svn_url_or_wc, rev_start, rev_end, limit, stop_on_copy=False):
"""
Fetch up to 'limit' SVN log entries between the given revisions.
"""
if stop_on_copy:
args = ['--stop-on-copy']
else:
args = []
args += ['-r', '%s:%s' % (rev_start, rev_end), '--limit',
str(limit), svn_url_or_wc]
xml_string = run_svn(svn_log_args + args)
return parse_svn_log_xml(xml_string)
def get_svn_status(svn_wc):
"""
Get SVN status information about the given working copy.
"""
# Ensure proper stripping by canonicalizing the path
svn_wc = os.path.abspath(svn_wc)
args = [svn_wc]
xml_string = run_svn(svn_status_args + args)
return parse_svn_status_xml(xml_string, svn_wc)
def get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=False):
"""
Get the first SVN log entry in the requested revision range.
"""
entries = run_svn_log(svn_url, rev_start, rev_end, 1, stop_on_copy)
if not entries:
display_error("No SVN log for %s between revisions %s and %s" %
(svn_url, rev_start, rev_end))
return entries[0]
def get_first_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the first log entry after/at the given revision number in an SVN branch.
By default the revision number is set to 0, which will give you the log
entry corresponding to the branch creaction.
NOTE: to know whether the branch creation corresponds to an SVN import or
a copy from another branch, inspect elements of the 'changed_paths' entry
in the returned dictionary.
"""
return get_one_svn_log_entry(svn_url, rev_start, rev_end, stop_on_copy=True)
def get_last_svn_log_entry(svn_url, rev_start, rev_end):
"""
Get the last log entry before/at the given revision number in an SVN branch.
By default the revision number is set to HEAD, which will give you the log
entry corresponding to the latest commit in branch.
"""
return get_one_svn_log_entry(svn_url, rev_end, rev_start, stop_on_copy=True)
log_duration_threshold = 10.0
log_min_chunk_length = 10
def iter_svn_log_entries(svn_url, first_rev, last_rev):
"""
Iterate over SVN log entries between first_rev and last_rev.
This function features chunked log fetching so that it isn't too nasty
to the SVN server if many entries are requested.
"""
cur_rev = first_rev
chunk_length = log_min_chunk_length
chunk_interval_factor = 1.0
while last_rev == "HEAD" or cur_rev <= last_rev:
start_t = time.time()
stop_rev = min(last_rev, cur_rev + int(chunk_length * chunk_interval_factor))
entries = run_svn_log(svn_url, cur_rev, stop_rev, chunk_length)
duration = time.time() - start_t
if not entries:
if stop_rev == last_rev:
break
cur_rev = stop_rev + 1
chunk_interval_factor *= 2.0
continue
for e in entries:
yield e
cur_rev = e['revision'] + 1
# Adapt chunk length based on measured request duration
if duration < log_duration_threshold:
chunk_length = int(chunk_length * 2.0)
elif duration > log_duration_threshold * 2:
chunk_length = max(log_min_chunk_length, int(chunk_length / 2.0))
def commit_from_svn_log_entry(entry, files=None, keep_author=False):
"""
Given an SVN log entry and an optional sequence of files, do an svn commit.
"""
# This will use the local timezone for displaying commit times
timestamp = int(entry['date'])
svn_date = str(datetime.fromtimestamp(timestamp))
# Uncomment this one one if you prefer UTC commit times
#svn_date = "%d 0" % timestamp
if keep_author:
options = ["ci", "--force-log", "-m", entry['message'] + "\nDate: " + svn_date, "--username", entry['author']]
else:
options = ["ci", "--force-log", "-m", entry['message'] + "\nDate: " + svn_date + "\nAuthor: " + entry['author']]
if files:
options += list(files)
run_svn(options)
def svn_add_dir(p):
# set p = "." when p = ""
#p = p.strip() or "."
if p.strip() and not os.path.exists(p + os.sep + ".svn"):
svn_add_dir(os.path.dirname(p))
if not os.path.exists(p):
os.makedirs(p)
run_svn(["add", p])
def pull_svn_rev(log_entry, svn_url, target_url, svn_path, original_wc, keep_author=False):
"""
Pull SVN changes from the given log entry.
Returns the new SVN revision.
If an exception occurs, it will rollback to revision 'svn_rev - 1'.
"""
svn_rev = log_entry['revision']
run_svn(["up", "--ignore-externals", "-r", svn_rev, original_wc])
removed_paths = []
merged_paths = []
unrelated_paths = []
commit_paths = []
for d in log_entry['changed_paths']:
# e.g. u'/branches/xmpp/twisted/words/test/test.py'
p = d['path']
if not p.startswith(svn_path + "/"):
# Ignore changed files that are not part of this subdir
if p != svn_path:
unrelated_paths.append(p)
continue
# e.g. u'twisted/words/test/test.py'
p = p[len(svn_path):].strip("/")
# Record for commit
action = d['action']
if action not in 'MARD':
display_error("In SVN rev. %d: action '%s' not supported. \
Please report a bug!" % (svn_rev, action))
if len (commit_paths) < 100:
commit_paths.append(p)
# Detect special cases
old_p = d['copyfrom_path']
if old_p and old_p.startswith(svn_path + "/"):
old_p = old_p[len(svn_path):].strip("/")
# Both paths can be identical if copied from an old rev.
# We treat like it a normal change.
if old_p != p:
if not os.path.exists(p + os.sep + '.svn'):
svn_add_dir(os.path.dirname(p))
run_svn(["up", old_p])
run_svn(["copy", old_p, p])
if os.path.isfile(p):
shutil.copy(original_wc + os.sep + p, p)
if action == 'R':
removed_paths.append(old_p)
if len (commit_paths) < 100:
commit_paths.append(old_p)
continue
if action == 'A':
if os.path.isdir(original_wc + os.sep + p):
svn_add_dir(p)
else:
p_path = os.path.dirname(p).strip() or '.'
svn_add_dir(p_path)
shutil.copy(original_wc + os.sep + p, p)
run_svn(["add", p])
elif action == 'D':
removed_paths.append(p)
else: # action == 'M'
merged_paths.append(p)
if removed_paths:
for r in removed_paths:
run_svn(["up", r])
run_svn(["remove", "--force", r])
if merged_paths:
for m in merged_paths:
run_svn(["up", m])
m_url = svn_url + "/" + m
out = run_svn(["merge", "-c", str(svn_rev), "--non-recursive",
m_url+"@"+str(svn_rev), m])
# if conflicts, use the copy from original_wc
if out and out.split()[0] == 'C':
print "\n### Conflicts ignored: %s, in revision: %s\n" \
% (m, svn_rev)
run_svn(["revert", "--recursive", m])
if os.path.isfile(m):
shutil.copy(original_wc + os.sep + m, m)
if unrelated_paths:
print "Unrelated paths: "
| print "*", unrelated_paths
## too many files
if len (commit_paths) > 99:
commit_paths = []
try:
commit_from_svn_log_entry(log_entry, commit_paths,
keep_author=keep_author)
except ExternalCommandFailed:
# try to ignore the Properties conflicts on files and dirs
# use the copy from original_wc
has_Conflict = False
for d in log_entry['changed_paths']:
p = d['path']
p = p[len(svn_path):].strip("/")
if os.path.isfile(p):
if os.path.isfile(p + ".prej"):
has_Conflict = True
shutil.copy(original_wc + os.sep + p, p)
p2=os.sep + p.replace('_', '__').replace('/', '_') \
+ ".prej-" + str(svn_rev)
shutil.move(p + ".prej", os.path.dirname(original_wc) + p2)
w="\n### Properties conflicts ignored:"
print "%s %s, in revision: %s\n" % (w, p, svn_rev)
elif os.path.isdir(p):
if os.path.isfile(p + os.sep + "dir_conflicts.prej"):
has_Conflict = True
p2=os.sep + p.replace('_', '__').replace('/', '_') \
+ "_dir__conflicts.prej-" + str(svn_rev)
shutil.move(p + os.sep + "dir_conflicts.prej",
os.path.dirname(original_wc) + p2)
w="\n### Properties conflicts ignored:"
print "%s %s, in revision: %s\n" % (w, p, svn_rev)
out = run_svn(["propget", "svn:ignore",
original_wc + os.sep + p])
if out:
run_svn(["propset", "svn:ignore", out.strip(), p])
out = run_svn(["propget", "svn:externel",
original_wc + os.sep + p])
if out:
run_svn(["propset", "svn:external", out.strip(), p])
# try again
if has_Conflict:
commit_from_svn_log_entry(log_entry, commit_paths,
keep_author=keep_author)
else:
raise ExternalCommandFailed
def main():
usage = "Usage: %prog [-a] [-c] [-r SVN rev] <Source SVN URL> <Target SVN URL>"
parser = OptionParser(usage)
parser.add_option("-a", "--keep-author", action="store_true",
dest="keep_author", help="Keep revision Author or not")
parser.add_option("-c", "--continue-from-break", action="store_true",
dest="cont_from_break",
help="Continue from previous break")
parser.add_option("-r", "--svn-rev", type="int", dest="svn_rev",
help="SVN revision to checkout from")
(options, args) = parser.parse_args()
if len(args) != 2:
display_error("incorrect number of arguments\n\nTry: svn2svn.py --help",
False)
source_url = args.pop(0).rstrip("/")
target_url = args.pop(0).rstrip("/")
if options.keep_author:
keep_author = True
else:
keep_author = False
# Find the greatest_rev
# don't use 'svn info' to get greatest_rev, it doesn't work sometimes
svn_log = get_one_svn_log_entry(source_url, "HEAD", "HEAD")
greatest_rev = svn_log['revision']
original_wc = "_original_wc"
dup_wc = "_dup_wc"
## old working copy does not exist, disable continue mode
if not os.path.exists(dup_wc):
options.cont_from_break = False
if not options.cont_from_break:
# Warn if Target SVN URL existed
cmd = find_program("svn")
pipe = Popen([cmd] + ["list"] + [target_url], executable=cmd,
stdout=PIPE, stderr=PIPE)
out, err = pipe.communicate()
if pipe.returncode == 0:
print "Target SVN URL: %s existed!" % target_url
if out:
print out
print "Press 'Enter' to Continue, 'Ctrl + C' to Cancel..."
print "(Timeout in 5 seconds)"
rfds, wfds, efds = select.select([sys.stdin], [], [], 5)
# Get log entry for the SVN revision we will check out
if options.svn_rev:
# If specify a rev, get log entry just before or at rev
svn_start_log = get_last_svn_log_entry(source_url, 1,
options.svn_rev)
else:
# Otherwise, get log entry of branch creation
svn_start_log = get_first_svn_log_entry(source_url, 1,
greatest_rev)
# This is the revision we will checkout from
svn_rev = svn_start_log['revision']
# Check out first revision (changeset) from Source SVN URL
if os.path.exists(original_wc):
shutil.rmtree(original_wc)
svn_checkout(source_url, original_wc, svn_rev)
# Import first revision (changeset) into Target SVN URL
timestamp = int(svn_start_log['date'])
svn_date = str(datetime.fromtimestamp(timestamp))
if keep_author:
run_svn(["import", original_wc, target_url, "-m",
svn_start_log['message'] + "\nDate: " + svn_date,
"--username", svn_start_log['author']])
else:
run_svn(["import", original_wc, target_url, "-m",
svn_start_log['message'] + "\nDate: " + svn_date +
"\nAuthor: " + svn_start_log['author']])
# Check out a working copy
if os.path.exists(dup_wc):
shutil.rmtree(dup_wc)
svn_checkout(target_url, dup_wc)
original_wc = os.path.abspath(original_wc)
dup_wc = os.path.abspath(dup_wc)
os.chdir(dup_wc)
# Get SVN info
svn_info = get_svn_info(original_wc)
# e.g. u'svn://svn.twistedmatrix.com/svn/Twisted'
repos_url = svn_info['repos_url']
# e.g. u'svn://svn.twistedmatrix.com/svn/Twisted/branches/xmpp'
svn_url = svn_info['url']
assert svn_url.startswith(repos_url)
# e.g. u'/branches/xmpp'
svn_path = svn_url[len(repos_url):]
# e.g. 'xmpp'
svn_branch = svn_url.split("/")[-1]
if options.cont_from_break:
svn_rev = svn_info['revision'] - 1
if svn_rev < 1:
svn_rev = 1
# Load SVN log starting from svn_rev + 1
it_log_entries = iter_svn_log_entries(svn_url, svn_rev + 1, greatest_rev)
try:
for log_entry in it_log_entries:
pull_svn_rev(log_entry, svn_url, target_url, svn_path,
original_wc, keep_author)
except KeyboardInterrupt:
print "\nStopped by user."
run_svn(["cleanup"])
run_svn(["revert", "--recursive", "."])
except:
print "\nCommand failed with following error:\n"
traceback.print_exc()
run_svn(["cleanup"])
run_svn(["revert", "--recursive", "."])
finally:
run_svn(["up"])
print "\nFinished!"
if __name__ == "__main__":
main() | random_line_split | |
base.js | var user={
pid:GetQueryString("pid"),
sid:GetQueryString("sid")
};
var serverUrl01="https://www.member361.com";//84正式服务器
var serverUrl02="https://121.43.150.38";//38测试服务器
var serverUrl03="http://106.15.89.156";//156测试服务器
var serverHost="https://www.member361.com";
var path=serverUrl01; //更改服务器地址可设置此值
var httpUrl={
// 基础
loginId:getCookie("loginId"),
path_img:path+"/file/getImage?md5=", // 图片地址
download:path+"/file/downloadOne?", // 文件下载
picUrl:path+"/file/upload2", // 图片上传地址
basicFileUpload:path+"/file/business/upload", // 业务文件上传
login:path+"/web/login/loginChecking",// 首页登入
loginUserInfo:path+"/web/basic/loginUserInfo",// 获得登录人信息
basicButton:path+"/web/ops/menu/button/list",// 获取菜单功能按钮列表
basicMyClassInfo:path+"/web/basic/myClassInfo",// 获得当前人 所在班级列表
basicAllClassInfo:path+"/web/basic/allClassInfo",// 获得登录人所在学校所有班级列表
basicCompanyList:path+"/web/ops/company/list",// 获取所有的学校
basicZip:path+"/file/zip",// 获取打包下载zip
basicStudent:path+"/common/basic/class/student",// 获取当前班级学生列表
// 菜单
menuList:path+"/web/ops/user/menu/list",// 菜单接口
menuChildList:path+"/web/ops/user/menu/childList",// 获取子菜单列表
menuButtonList:path+"/web/ops/menu/button/list",// 获取菜单功能按钮列表
// 菜单管理
menuButtonAddOrUpdate:path+"/web/ops/menu/button/addOrUpdate",// 新增或更新按钮信息
menuAddOrUpdate:path+"/web/ops/menu/addOrUpdate",// 新增或更新菜单信息
menuCompanyUpdate:path+"/web/ops/company/menu/update",// 更新学校菜单信息
menuCompanyList:path+"/web/ops/company/menu/list",// 获取学校菜单列表
menuButtonList:path+"/web/ops/button/list",// 获取菜单按钮列表
menuDelete:path+"/web/ops/menu/delete",// 删除菜单
menuButtonDelete:path+"/web/ops/menu/button/delete",// 删除菜单按钮
menuDetail:path+"/web/ops/menu/detail",// 菜单详情
menuButtonDetail:path+"/web/ops/menu/button/detail",// 按钮详情
menuRoleButtonUpdate:path+"/web/ops/role/button/update",// 更新角色按钮权限
menuRoleButtonList:path+"/web/ops/role/buttonList",// 获得角色所有按钮(含选中信息)
// 学校角色管理
schoolTypeList:path+"/web/ops/role/typeList",// 获取所有的角色
schoolMenuList:path+"/web/ops/company/role/menuList",// 获取学校角色菜单
schoolMenuUpdate:path+"/web/ops/company/role/menu/update",// 更新学校角色菜单
// 教师信息
teacherAdd:path+"/web/basic/staff/add",// 新建教职工
teacherSingleStaffInfo:path+"/web/basic/staff/singleStaffInfo",// 获得单项教职工条目
teacherUpdate:path+"/web/basic/staff/update",// 更新教职工条目
teacherDelete:path+"/web/basic/staff/delete",// 移除教职工
teacherAllType:path+"/web/basic/staff/allType",// 获得所有教职工类型
teacherMyClassInfo:path+"/web/basic/myClassInfo",// 获得教职工所在班级列表
teacherStaffInfo:path+"/web/basic/staff/staffInfo",// 获得教职工列表
teacherGetImportUserInfo:path+"/web/basic/import/getImportUserInfo",// 获得用户导入表信息
teacherDeleteImportUser:path+"/web/basic/import/deleteImportUser",// 用户导入表-删除
teacherSubmitUserData:path+"/web/basic/import/submitUserData",// 用户导入表 提交数据
teacherGetSingleImportUserInfo:path+"/web/basic/import/getSingleImportUserInfo",// 获得用户导入表单项导入信息
teacherUpdateImportUser:path+"/web/basic/import/updateImportUser",// 用户导入表-编辑
// 幼儿信息
childrenAdd:path+"/web/basic/child/add",// 新建幼儿
childrenSingleChildInfo:path+"/web/basic/child/singleChildInfo",// 获得单项幼儿条目
childrenUpdate:path+"/web/basic/child/update",// 更新幼儿条目
childrenDelete:path+"/web/basic/child/delete",// 移除幼儿
childrenMyClassInfo:path+"/web/basic/myClassInfo",// 获得幼儿所在班级列表
childrenInfo:path+"/web/basic/child/childInfo",// 获得幼儿列表
childrenParentInfo:path+"/web/basic/child/parentInfo",// 获得幼儿家长列表
childrenGetImportUserInfo:path+"/web/basic/import/getImportChildInfo",// 获得用户导入表信息
childrenDeleteImportUser:path+"/web/basic/import/deleteImportChild",// 用户导入表-删除
childrenSubmitUserData:path+"/web/basic/import/submitChildData",// 用户导入表 提交数据
childrenGetSingleImportUserInfo:path+"/web/basic/import/getSingleImportChildInfo",// 获得用户导入表单项导入信息
childrenUpdateImportUser:path+"/web/basic/import/updateImportChild",// 幼儿导入表-编辑
childrenParentDelete:path+"/web/basic/parent/delete",// 删除家长条目
childrenSingleParentInfo:path+"/web/basic/parent/singleParentInfo",// 获得单项家长条目
childrenParentUpdate:path+"/web/basic/parent/update",// 更新家长条目
// 班级管理
classAdd:path+"/web/basic/org/add",// 新建班级
classUpdate:path+"/web/basic/org/update",// 更新班级
classDelete:path+"/web/basic/org/delete",// 移除班级
classGradeList:path+"/web/basic/org/gradeList",// 获得年级列表
classInfo:path+"/web/basic/org/classInfo",// 获得班级列表及人员数量
classOfStaff:path+"/web/basic/org/staffOfClass",// 获得班级教职工列表
classSingleClassInfo:path+"/web/basic/org/singleClassInfo",// 获得单项班级条目
classBasicInfo:path+"/web/basic/org/classBasicInfo",// 获得所有班级基础信息
classUpgrade:path+"/web/basic/org/upgrade",// 升班
classChange:path+"/web/basic/org/changeClass",// 调班
classMemberBasic:path+"/web/basic/org/memberBasic",// 获取班级幼儿及教职工名单
// 萌宝成长
growthBanner:path+"/web/ops/company/banner/list",// 萌宝成长 获取学校banner
growthAdd:path+"/web/growth/message/add",// 萌宝成长 新增
growthList:path+"/web/growth/message/list",// 萌宝成长 获取班级内容列表
growthStudent:path+"/common/basic/class/student",// 萌宝成长 获取当前班级学生列表
growthLabel:path+"/web/growth/label/list",// 萌宝成长 获取学校所有的标签
growthAddordelete:path+"/web/growth/praise/addordelete",// 萌宝成长 点赞或者取消点赞
growthCancelSticky:path+"/web/growth/message/cancelSticky",// 萌宝成长 取消内容置顶
growthCommentAdd:path+"/web/growth/comment/add",// 萌宝成长 新增一条评论或者回复
growthCommentDelete:path+"/web/growth/comment/delete",// 萌宝成长 删除某一条评论
growthMessageDelete:path+"/web/growth/message/delete",// 萌宝成长 删除一条内容
// 萌宝成长标签
growthLabelDelete:path+"/web/growth/label/delete",// 萌宝成长标签 删除
growthLabelAddOrUpdate:path+"/web/growth/label/addOrUpdate",// 新增或者编辑标签
// 萌宝成长统计
growthTeacherStat:path+"/web/growth/report/message/teacher",// 萌宝成长统计 教师发帖数量
growthClassStat:path+"/web/growth/report/message/class",// 萌宝成长统计 班级发帖数量
growthLivelyStat:path+"/web/growth/report/parent/lively",// 萌宝成长统计 点赞评论数量
// 观察记录
watchCourseList:path+"/web/sample/search/course/list",// (查询)个人观察计划列表
watchClassList:path+"/web/sample/search/class/list",// (查询)获取个人所在班级
watchTeacherList:path+"/web/sample/search/teacher/list",// (查询)获取班级所有老师
watchDimList:path+"/web/sample/search/dim/list",// (查询)观察计划维度列表
watchRecordUpdate:path+"/web/sample/student/record/update",// 更新观察记录
watchRecordList:path+"/web/sample/student/record/list",// 获取学生观察记录列表
watchStudentList:path+"/web/sample/record/student/list",// 获取观察记录学生列表
watchRecordDetail:path+"/web/sample/student/record/detail",// 获取观察记录详情
watchRecordDelete:path+"/web/sample/record/delete",// 删除观察记录
watchTeacherStat:path+"/web/sample/report/teacher",// 观察记录统计
watchClassStat:path+"/web/sample/report/class",// 观察记录统计01
// 观察计划
watchPlanList:path+"/web/sample/company/course/list",// 获取观察计划列表
watchPlanDetail:path+"/web/sample/company/course/detail",// 获取观察计划详情
watchPlanAddOrUpdate:path+"/web/sample/company/course/addOrUpdate",// 新增或更新观察计划
watchPlanDelete:path+"/web/sample/company/course/delete",// 删除观察计划
watchPlanTeacherList:path+"/web/sample/company/teacherList",// 关联教师
// 综合评价
watchStudentInfo:path+"/web/sample/evaluate/evaluateStudentInfo",// 月度评价学生列表
watchStudentDetail:path+"/web/sample/evaluate/student/detail",// 学生月度评价详情
watchStudentAddOrUpdate:path+"/web/sample/evaluate/student/addOrUpdate",// 新增或编辑学生月度评价
watchConfigMonthList:path+"/web/sample/evaluate/config/monthList",// 获取所有月份配置列表
watchConfigAdd:path+"/web/sample/evaluate/config/add",// 新增月度评价配置
watchConfigAllDim:path+"/web/sample/evaluate/config/allDim",// 获取学校配置所有维度
watchConfigDetail:path+"/web/sample/evaluate/config/detail",// 获取月度评价配置详情
// 观察维度
dimLevelDelete:path+"/web/sample/company/dimLevel/delete",// 删除学校维度水平
dimDelete:path+"/web/sample/company/dim/delete",// 删除学校观察维度
dimLevelAddOrUpdate:path+"/web/sample/company/dimLevel/addOrUpdate",// 新增或更新学校维度水平
dimAddOrUpdate:path+"/web/sample/company/dim/addOrUpdate",// 新增或更新学校观察维度
dimLevelList:path+"/web/sample/company/dimLevel/list",// 获取学校维度水平列表
dimList:path+"/web/sample/company/dim/list",// 获取学校观察维度
// 个体发展水平
getStudentAbility:path+"/web/sample/TJ/TJ_GCJL_GetStudentAbilityStrong",// 个人综合能力评价 雷达图
getStudentCourseAbility:path+"/web/sample/TJ/TJ_GCJL_GetStudentCourseAbility",// 个人课程 能力评价 雷达图
// 班级发展水平
getClassesAbilibySimple:path+"/web/sample/TJ/TJ_GCJL_GetClassesAbilibySimple",// 班级领域发展水平
getCourseAbilibySimple:path+"/web/sample/TJ/TJ_GCJL_GetCourseAbilibySimple",// 班级游戏与生活观察
getClassAbilibySimple:path+"/web/sample/TJ/TJ_GCJL_GetClassAbilibySimple",// 班级领域发展水平--数量统计
getCourseAbilibyCount:path+"/web/sample/TJ/TJ_GCJL_GetCourseAbilibyCount",// 课程发展水平--数量统计
// 成长档案
recordStudent:path+"/web/mbtrack/dan/student",// 获取学生列表(含档案信息)
recordList:path+"/web/mbtrack/danbook/list",// 获取档案册列表
recordMonthList:path+"/web/mbtrack/danbook/danList",// 获取档案册档案页详情
recordNewDanbook:path+"/web/mbtrack/danbook/save",// 新建档案册
recordDownload:path+"/file/patch/download",//图片批量下载(档案页)
recordDanbookUpdate:path+"/web/mbtrack/danbook/update",// 档案册名更新
recordTeacherStat:path+"/web/mbtrack/report/teacher",// 教师成长档案统计
recordParentStat:path+"/web/mbtrack/report/parent",// 家长成长档案统计
// 考勤
attendGetChildOfClass:path+"/web/attendance/teacher/getChildOfClass",// 获得班级所有幼儿信息
attendGetAttendanceRecord:path+"/web/attendance/teacher/getAttendanceRecord",// 获得考勤记录
attendCheckConfirm:path+"/web/attendance/teacher/checkConfirm",// 教师端检查确认
attendDisPlayAttendDays:path+"/web/attendance/teacher/disPlayAttendDays",// 查看已设置的考勤天数
attendUpdateAttendDays:path+"/web/attendance/teacher/updateAttendDays",// 修改考勤天数设置
attendResetAttendDays:path+"/web/attendance/teacher/resetAttendDays",// 复位考勤天数设置
attendGetClassAttendanceInfo:path+"/web/attendance/teacher/getClassAttendanceInfo",// 获得班级考勤
attendGetPersonalAttendance:path+"/web/attendance/parent/getPersonalAttendance",// 获得个人考勤
// 公告
getMyClassInfo:path+"/web/basic/getMyClassInfo",// 获取我的班级信息
getClassStuAndTeachers:path+"/web/basic/getClassStuAndTeachers",// 获取班级所有学生和老师
noticeGetDesc:path+"/web/notice/getNoticeDesc",// 获取公告描述
noticeReaded:path+"/web/notice/markNoticeReaded",// 公告置为已读
noticeAddNew:path+"/web/notice/addNewNotice",// 新增新的公告内容
noticeGetContentList:path+"/web/notice/getNoticeContent",// 获取某个公告内容列表
noticeGetReadDetail:path+"/web/notice/getReadDetail",// 获取某条公告内容阅读详情
noticeDelNoticeContent:path+"/web/notice/delNoticeContent",// 删除某条公告内容
noticeUpdateNoticeContent:path+"/web/notice/updateNoticeContent",// 更新某条公告内容
// 每周菜谱
menuSaveTable:path+"/web/cookbook/saveTable",// 保存表格
menuDeleteTable:path+"/web/cookbook/deleteTable",// 删除整张表
menuUpdateTitle:path+"/web/cookbook/updateTitle",// 更新菜谱标题
menuSelectCell:path+"/web/cookbook/selectCell",// 获得某个单元
menuGetTitleList:path+"/web/cookbook/getTitleList",// 获得菜谱标题列表
menuStructuringTableCell:path+"/web/cookbook/structuringTableCell",// 通过开始日期获取表单
// 风险预警
riskGetCompanyHealthAlert:path+"/web/healthAlert/getCompanyHealthAlert",// 获取登录人所在学校的所有预警
riskGetAlertType:path+"/web/healthAlert/getAlertType",// 获取预警类型列表
riskGetAlertAge:path+"/web/healthAlert/getAlertAge",// 获得预警年龄列表
riskNewHealthAlert:path+"/web/healthAlert/newHealthAlert",// 新增风险预警
riskGetHealthAlert:path+"/web/healthAlert/getHealthAlert",// 获取单条健康预警
riskUpdateHealthAlert:path+"/web/healthAlert/updateHealthAlert",// 更改健康预警
riskDeleteHealthAlert:path+"/web/healthAlert/deleteHealthAlert",// 删除健康预警
// 健康信息
healthGetExamDateList:path+"/web/healthInfo/getExamDateList",// 根据班级获得检查日期列表
healthGetClassHealthInfo:path+"/web/healthInfo/getClassHealthInfo",// 获得班级健康信息
healthGetChildListOfClass:path+"/web/healthInfo/getChildListOfClass",// 获得班级幼儿列表
healthGetBirthdaySex:path+"/web/healthInfo/getBirthdaySex",// 获得幼儿生日及性别
healthCalculateAge:path+"/web/healthInfo/calculateAge",// 根据生日,体检日期,计算年龄
healthHPValue:path+"/web/healthInfo/HPValue",// 计算身高p值
healthWPValue:path+"/web/healthInfo/WPValue",// 计算体重p值
healthFatnessValue:path+"/web/healthInfo/FatnessValue",// 计算肥胖值
healthNewHealthInfo:path+"/web/healthInfo/newHealthInfo",// 新增健康信息
healthGetSingleHI:path+"/web/healthInfo/getSingleHI",// 获得单条健康信息
healthUpdateHealthInfo:path+"/web/healthInfo/updateHealthInfo",// 更新健康信息
healthDeleteHealthInfo:path+"/web/healthInfo/deleteHealthInfo",// 删除健康信息
// 自选课程 剧场活动
GetSchoolIds:path+"/web/activity/TSCourse_GetSchoolIds",//特色课程 获取学校课程id
GetSchoolJYIds:path+"/web/activity/TSCourse_GetSchoolJYIds",//剧场活动 id
GetSchoolCourses:path+"/web/activity/TSCourse_GetSchoolCourses",//特色课程 获取学校课程
AddCourse:path+"/web/activity/TSCourse_AddCourse",//特色课程 新增
GetCourseDetails:path+"/web/activity/TSCourse_GetCourseDetails",//获取学校课程详情
tsDelCourse:path+"/web/activity/TSCourse_DelCourse",// 删除学校课程
tsGetBookedChildren:path+"/web/activity/TSCourse_GetBookedChildren",// 签到学生列表
tsCallRoll:path+"/web/activity/TSCourse_CallRoll",// 签到
tsCancelRoll:path+"/web/activity/TSCourse_CancelRoll",// 取消签到
tsTempBookCourse:path+"/web/activity/TSCourse_tempBookCourse",// 补加预约人数
getCourseSimpleTJ:path+"/web/activity/TSCourse_getCourseSimpleTJ",// 自选活动 活动统计
getCourseClassTJ:path+"/web/activity/TSCourse_getCourseClassTJ",// 自选活动 班级统计
getCourseStudentTJ:path+"/web/activity/TSCourse_getCourseStudentTJ",// 自选活动 学生统计
getCourseStudentDetailTJ:path+"/web/activity/TSCourse_getCourseStudentDetailTJ",// 自选活动 活动统计详情
getCourseAllTJ:path+"/web/activity/TSCourse_getCourseAllTJ2",// 自选活动 活动统计01
// 文件中心
fileGetRoot:path+"/web/fileCenter/getRoot",// 获取根目录
fileGetChildFileInfo:path+"/web/fileCenter/getChildFileInfo",// 获取文件的所有子级文件
fileGetSingleFileInfo:path+"/web/fileCenter/getSingleFileInfo",// 获取单项文件信息
fileAddFileInfo:path+"/web/fileCenter/addFileInfo",// 增加一项文件信息
fileDeleteFileInfo:path+"/web/fileCenter/deleteFileInfo",// 删除文件信息
fileUpdateFileName:path+"/web/fileCenter/updateFileName",// 更新文件名
// 08设置
setting:''
};
function initAjax(url,param,callback,callback01,callback02) {
$.ajax({
type:"POST",
url:url,
data:param,
dataType:"json",
statusCode:{
404:function(){
console.log("访问地址不存在或接口参数有误 错误代码404");
},
500:function(){
console.log("因为意外情况,服务器不能完成请求 错误代码500");
// window.location.href=httpUrl.loginHttp;
},
405:function(){
console.log("资源被禁止 错误代码405");
}
},
beforeSend:function () {
// loadingIn();// loading载入
},
success:function(result){
callback(result,callback01,callback02);
// loadingOut(); // loading退出
},
error:function(result){
console.log("请求失败 ajax error!");
// window.location.href=httpUrl.loginHttp;
}
});
};
// loading载入函数
function loadingIn() {
$("#page-loader").removeClass('hide');
$("#page-loader").css("z-index","999999");
};
function loadingOut(argument) {
$("#page-loader").addClass('hide');
};
Date.prototype.Format = function (fmt) {
var o = {
"M+": this.getMonth() + 1, //月份
"d+": this.getDate(), //日
"h+": this.getHours(), //小时
"m+": this.getMinutes(), //分
"s+": this.getSeconds(), //秒
"q+": Math.floor((this.getMonth() + 3) / 3), //季度
"S": this.getMilliseconds() //毫秒
};
if (/(y+)/.test(fmt)) fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "").substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt)) fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k]) : (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
};
// 地址栏search参数筛选函数
function GetQueryString(name){
var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var result = window.location.search.substr(1).match(reg);
return result?decodeURIComponent(result[2]):null;
}
// 设置cookie 过期时间s20代表20秒 h12代表12小时 d30代表30天
function setCookie(name,value,time){
var strsec = getsec(time);
var exp = new Date();
exp.setTime(exp.getTime() + strsec*1);
// document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString()+"path=/; domain="+domain;
document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString();
};
function getsec(str){
var str1=str.substring(1,str.length)*1;
var str2=str.substring(0,1);
if (str2=="s"){
return str1*1000;
}
else if (str2=="h")
{
return str1*60*60*1000;
}
else if (str2=="d")
{
return str1*24*60*60*1000;
}
};
// 获取cookie
function getCookie(name){
var arr,reg=new RegExp("(^| )"+name+"=([^;]*)(;|$)");
if(arr=document.cookie.match(reg)){
return unescape(arr[2]);
}
else{
return null;
}
};
// 删除cookie
function delCookie(name){
var exp = new Date();
exp.setTime(exp.getTime() - 1);
var cval=getCookie(name);
if(cval!=null){
document.cookie= name + "="+cval+";expires="+exp.toGMTString();
};
};
// niceScroll滚动条
function chooseNiceScroll(AA,color) {
$(AA).niceScroll({
cursorcolor: color || "#ccc",//#CC0071 光标颜色
cursoropacitymax: 1, //改变不透明度非常光标处于活动状态(scrollabar“可见”状态),范围从1到0
touchbehavior: true, //使光标拖动滚动像在台式电脑触摸设备
cursorwidth: "5px", //像素光标的宽度
cursorborder: "0", // 游标边框css定义
cursorborderradius: "5px",//以像素为光标边界半径
autohidemode: true //是否隐藏滚动条
});
};
// 消息提示函数
function toastTip(heading,text,hideAfter,afterHidden) {
$.toast({
heading: heading,
text: text,
showHideTransition: 'slide',
icon: 'success',
hideAfter: hideAfter || 1500,
loaderBg: '#edd42e',
position: 'bottom-right',
afterHidden: afterHidden
});
};
| identifier_body | ||
base.js | var user={
pid:GetQueryString("pid"),
sid:GetQueryString("sid")
};
var serverUrl01="https://www.member361.com";//84正式服务器
var serverUrl02="https://121.43.150.38";//38测试服务器
var serverUrl03="http://106.15.89.156";//156测试服务器
var serverHost="https://www.member361.com";
var path=serverUrl01; //更改服务器地址可设置此值
var httpUrl={
// 基础
loginId:getCookie("loginId"),
path_img:path+"/file/getImage?md5=", // 图片地址
download:path+"/file/downloadOne?", // 文件下载
picUrl:path+"/file/upload2", // 图片上传地址
basicFileUpload:path+"/file/business/upload", // 业务文件上传
login:path+"/web/login/loginChecking",// 首页登入
loginUserInfo:path+"/web/basic/loginUserInfo",// 获得登录人信息
basicButton:path+"/web/ops/menu/button/list",// 获取菜单功能按钮列表
basicMyClassInfo:path+"/web/basic/myClassInfo",// 获得当前人 所在班级列表
basicAllClassInfo:path+"/web/basic/allClassInfo",// 获得登录人所在学校所有班级列表
basicCompanyList:path+"/web/ops/company/list",// 获取所有的学校
basicZip:path+"/file/zip",// 获取打包下载zip
basicStudent:path+"/common/basic/class/student",// 获取当前班级学生列表
// 菜单
menuList:path+"/web/ops/user/menu/list",// 菜单接口
menuChildList:path+"/web/ops/user/menu/childList",// 获取子菜单列表
menuButtonList:path+"/web/ops/menu/button/list",// 获取菜单功能按钮列表
// 菜单管理
menuButtonAddOrUpdate:path+"/web/ops/menu/button/addOrUpdate",// 新增或更新按钮信息
menuAddOrUpdate:path+"/web/ops/menu/addOrUpdate",// 新增或更新菜单信息
menuCompanyUpdate:path+"/web/ops/company/menu/update",// 更新学校菜单信息
menuCompanyList:path+"/web/ops/company/menu/list",// 获取学校菜单列表
menuButtonList:path+"/web/ops/button/list",// 获取菜单按钮列表
menuDelete:path+"/web/ops/menu/delete",// 删除菜单
menuButtonDelete:path+"/web/ops/menu/button/delete",// 删除菜单按钮
menuDetail:path+"/web/ops/menu/detail",// 菜单详情
menuButtonDetail:path+"/web/ops/menu/button/detail",// 按钮详情
menuRoleButtonUpdate:path+"/web/ops/role/button/update",// 更新角色按钮权限
menuRoleButtonList:path+"/web/ops/role/buttonList",// 获得角色所有按钮(含选中信息)
// 学校角色管理
schoolTypeList:path+"/web/ops/role/typeList",// 获取所有的角色
schoolMenuList:path+"/web/ops/company/role/menuList",// 获取学校角色菜单
schoolMenuUpdate:path+"/web/ops/company/role/menu/update",// 更新学校角色菜单
// 教师信息
teacherAdd:path+"/web/basic/staff/add",// 新建教职工
teacherSingleStaffInfo:path+"/web/basic/staff/singleStaffInfo",// 获得单项教职工条目
teacherUpdate:path+"/web/basic/staff/update",// 更新教职工条目
teacherDelete:path+"/web/basic/staff/delete",// 移除教职工
teacherAllType:path+"/web/basic/staff/allType",// 获得所有教职工类型
teacherMyClassInfo:path+"/web/basic/myClassInfo",// 获得教职工所在班级列表
teacherStaffInfo:path+"/web/basic/staff/staffInfo",// 获得教职工列表
teacherGetImportUserInfo:path+"/web/basic/import/getImportUserInfo",// 获得用户导入表信息
teacherDeleteImportUser:path+"/web/basic/import/deleteImportUser",// 用户导入表-删除
teacherSubmitUserData:path+"/web/basic/import/submitUserData",// 用户导入表 提交数据
teacherGetSingleImportUserInfo:path+"/web/basic/import/getSingleImportUserInfo",// 获得用户导入表单项导入信息
teacherUpdateImportUser:path+"/web/basic/import/updateImportUser",// 用户导入表-编辑
// 幼儿信息
childrenAdd:path+"/web/basic/child/add",// 新建幼儿
childrenSingleChildInfo:path+"/web/basic/child/singleChildInfo",// 获得单项幼儿条目
childrenUpdate:path+"/web/basic/child/update",// 更新幼儿条目
childrenDelete:path+"/web/basic/child/delete",// 移除幼儿
childrenMyClassInfo:path+"/web/basic/myClassInfo",// 获得幼儿所在班级列表
childrenInfo:path+"/web/basic/child/childInfo",// 获得幼儿列表
childrenParentInfo:path+"/web/basic/child/parentInfo",// 获得幼儿家长列表
childrenGetImportUserInfo:path+"/web/basic/import/getImportChildInfo",// 获得用户导入表信息
childrenDeleteImportUser:path+"/web/basic/import/deleteImportChild",// 用户导入表-删除
childrenSubmitUserData:path+"/web/basic/import/submitChildData",// 用户导入表 提交数据
childrenGetSingleImportUserInfo:path+"/web/basic/import/getSingleImportChildInfo",// 获得用户导入表单项导入信息
childrenUpdateImportUser:path+"/web/basic/import/updateImportChild",// 幼儿导入表-编辑
childrenParentDelete:path+"/web/basic/parent/delete",// 删除家长条目
childrenSingleParentInfo:path+"/web/basic/parent/singleParentInfo",// 获得单项家长条目
childrenParentUpdate:path+"/web/basic/parent/update",// 更新家长条目
// 班级管理
classAdd:path+"/web/basic/org/add",// 新建班级
classUpdate:path+"/web/basic/org/update",// 更新班级
classDelete:path+"/web/basic/org/delete",// 移除班级
classGradeList:path+"/web/basic/org/gradeList",// 获得年级列表
classInfo:path+"/web/basic/org/classInfo",// 获得班级列表及人员数量
classOfStaff:path+"/web/basic/org/staffOfClass",// 获得班级教职工列表
classSingleClassInfo:path+"/web/basic/org/singleClassInfo",// 获得单项班级条目
classBasicInfo:path+"/web/basic/org/classBasicInfo",// 获得所有班级基础信息
classUpgrade:path+"/web/basic/org/upgrade",// 升班
classChange:path+"/web/basic/org/changeClass",// 调班
classMemberBasic:path+"/web/basic/org/memberBasic",// 获取班级幼儿及教职工名单
// 萌宝成长
growthBanner:path+"/web/ops/company/banner/list",// 萌宝成长 获取学校banner
growthAdd:path+"/web/growth/message/add",// 萌宝成长 新增
growthList:path+"/web/growth/message/list",// 萌宝成长 获取班级内容列表
growthStudent:path+"/common/basic/class/student",// 萌宝成长 获取当前班级学生列表
growthLabel:path+"/web/growth/label/list",// 萌宝成长 获取学校所有的标签
growthAddordelete:path+"/web/growth/praise/addordelete",// 萌宝成长 点赞或者取消点赞
growthCancelSticky:path+"/web/growth/message/cancelSticky",// 萌宝成长 取消内容置顶
growthCommentAdd:path+"/web/growth/comment/add",// 萌宝成长 新增一条评论或者回复
growthCommentDelete:path+"/web/growth/comment/delete",// 萌宝成长 删除某一条评论
growthMessageDelete:path+"/web/growth/message/delete",// 萌宝成长 删除一条内容
// 萌宝成长标签
growthLabelDelete:path+"/web/growth/label/delete",// 萌宝成长标签 删除
growthLabelAddOrUpdate:path+"/web/growth/label/addOrUpdate",// 新增或者编辑标签
// 萌宝成长统计
growthTeacherStat:path+"/web/growth/report/message/teacher",// 萌宝成长统计 教师发帖数量
growthClassStat:path+"/web/growth/report/message/class",// 萌宝成长统计 班级发帖数量
growthLivelyStat:path+"/web/growth/report/parent/lively",// 萌宝成长统计 点赞评论数量
// 观察记录
watchCourseList:path+"/web/sample/search/course/list",// (查询)个人观察计划列表
watchClassList:path+"/web/sample/search/class/list",// (查询)获取个人所在班级
watchTeacherList:path+"/web/sample/search/teacher/list",// (查询)获取班级所有老师
watchDimList:path+"/web/sample/search/dim/list",// (查询)观察计划维度列表
watchRecordUpdate:path+"/web/sample/student/record/update",// 更新观察记录
watchRecordList:path+"/web/sample/student/record/list",// 获取学生观察记录列表
watchStudentList:path+"/web/sample/record/student/list",// 获取观察记录学生列表
watchRecordDetail:path+"/web/sample/student/record/detail",// 获取观察记录详情
watchRecordDelete:path+"/web/sample/record/delete",// 删除观察记录
watchTeacherStat:path+"/web/sample/report/teacher",// 观察记录统计
watchClassStat:path+"/web/sample/report/class",// 观察记录统计01
// 观察计划
watchPlanList:path+"/web/sample/company/course/list",// 获取观察计划列表
watchPlanDetail:path+"/web/sample/company/course/detail",// 获取观察计划详情
watchPlanAddOrUpdate:path+"/web/sample/company/course/addOrUpdate",// 新增或更新观察计划
watchPlanDelete:path+"/web/sample/company/course/delete",// 删除观察计划
watchPlanTeacherList:path+"/web/sample/company/teacherList",// 关联教师
// 综合评价
watchStudentInfo:path+"/web/sample/evaluate/evaluateStudentInfo",// 月度评价学生列表
watchStudentDetail:path+"/web/sample/evaluate/student/detail",// 学生月度评价详情
watchStudentAddOrUpdate:path+"/web/sample/evaluate/student/addOrUpdate",// 新增或编辑学生月度评价
watchConfigMonthList:path+"/web/sample/evaluate/config/monthList",// 获取所有月份配置列表
watchConfigAdd:path+"/web/sample/evaluate/config/add",// 新增月度评价配置
watchConfigAllDim:path+"/web/sample/evaluate/config/allDim",// 获取学校配置所有维度
watchConfigDetail:path+"/web/sample/evaluate/config/detail",// 获取月度评价配置详情
// 观察维度
dimLevelDelete:path+"/web/sample/company/dimLevel/delete",// 删除学校维度水平
dimDelete:path+"/web/sample/company/dim/delete",// 删除学校观察维度
dimLevelAddOrUpdate:path+"/web/sample/company/dimLevel/addOrUpdate",// 新增或更新学校维度水平
dimAddOrUpdate:path+"/web/sample/company/dim/addOrUpdate",// 新增或更新学校观察维度
dimLevelList:path+"/web/sample/company/dimLevel/list",// 获取学校维度水平列表
dimList:path+"/web/sample/company/dim/list",// 获取学校观察维度
// 个体发展水平
getStudentAbility:path+"/web/sample/TJ/TJ_GCJL_GetStudentAbilityStrong",// 个人综合能力评价 雷达图
getStudentCourseAbility:path+"/web/sample/TJ/TJ_GCJL_GetStudentCourseAbility",// 个人课程 能力评价 雷达图
// 班级发展水平
getClassesAbilibySimple:path+"/web/sample/TJ/TJ_GCJL_GetClassesAbilibySimple",// 班级领域发展水平
getCourseAbilibySimple:path+"/web/sample/TJ/TJ_GCJL_GetCourseAbilibySimple",// 班级游戏与生活观察
getClassAbilibySimple:path+"/web/sample/TJ/TJ_GCJL_GetClassAbilibySimple",// 班级领域发展水平--数量统计
getCourseAbilibyCount:path+"/web/sample/TJ/TJ_GCJL_GetCourseAbilibyCount",// 课程发展水平--数量统计
// 成长档案
recordStudent:path+"/web/mbtrack/dan/student",// 获取学生列表(含档案信息)
recordList:path+"/web/mbtrack/danbook/list",// 获取档案册列表
recordMonthList:path+"/web/mbtrack/danbook/danList",// 获取档案册档案页详情
recordNewDanbook:path+"/web/mbtrack/danbook/save",// 新建档案册
recordDownload:path+"/file/patch/download",//图片批量下载(档案页)
recordDanbookUpdate:path+"/web/mbtrack/danbook/update",// 档案册名更新
recordTeacherStat:path+"/web/mbtrack/report/teacher",// 教师成长档案统计
recordParentStat:path+"/web/mbtrack/report/parent",// 家长成长档案统计
// 考勤
attendGetChildOfClass:path+"/web/attendance/teacher/getChildOfClass",// 获得班级所有幼儿信息
attendGetAttendanceRecord:path+"/web/attendance/teacher/getAttendanceRecord",// 获得考勤记录
attendCheckConfirm:path+"/web/attendance/teacher/checkConfirm",// 教师端检查确认
attendDisPlayAttendDays:path+"/web/attendance/teacher/disPlayAttendDays",// 查看已设置的考勤天数
attendUpdateAttendDays:path+"/web/attendance/teacher/updateAttendDays",// 修改考勤天数设置
attendResetAttendDays:path+"/web/attendance/teacher/resetAttendDays",// 复位考勤天数设置
attendGetClassAttendanceInfo:path+"/web/attendance/teacher/getClassAttendanceInfo",// 获得班级考勤
attendGetPersonalAttendance:path+"/web/attendance/parent/getPersonalAttendance",// 获得个人考勤
// 公告
getMyClassInfo:path+"/web/basic/getMyClassInfo",// 获取我的班级信息
getClassStuAndTeachers:path+"/web/basic/getClassStuAndTeachers",// 获取班级所有学生和老师
noticeGetDesc:path+"/web/notice/getNoticeDesc",// 获取公告描述
noticeReaded:path+"/web/notice/markNoticeReaded",// 公告置为已读
noticeAddNew:path+"/web/notice/addNewNotice",// 新增新的公告内容
noticeGetContentList:path+"/web/notice/getNoticeContent",// 获取某个公告内容列表
noticeGetReadDetail:path+"/web/notice/getReadDetail",// 获取某条公告内容阅读详情
noticeDelNoticeContent:path+"/web/notice/delNoticeContent",// 删除某条公告内容
noticeUpdateNoticeContent:path+"/web/notice/updateNoticeContent",// 更新某条公告内容
// 每周菜谱
menuSaveTable:path+"/web/cookbook/saveTable",// 保存表格
menuDeleteTable:path+"/web/cookbook/deleteTable",// 删除整张表
menuUpdateTitle:path+"/web/cookbook/updateTitle",// 更新菜谱标题
menuSelectCell:path+"/web/cookbook/selectCell",// 获得某个单元
menuGetTitleList:path+"/web/cookbook/getTitleList",// 获得菜谱标题列表
menuStructuringTableCell:path+"/web/cookbook/structuringTableCell",// 通过开始日期获取表单
// 风险预警
riskGetCompanyHealthAlert:path+"/web/healthAlert/getCompanyHealthAlert",// 获取登录人所在学校的所有预警
riskGetAlertType:path+"/web/healthAlert/getAlertType",// 获取预警类型列表
riskGetAlertAge:path+"/web/healthAlert/getAlertAge",// 获得预警年龄列表
riskNewHealthAlert:path+"/web/healthAlert/newHealthAlert",// 新增风险预警
riskGetHealthAlert:path+"/web/healthAlert/getHealthAlert",// 获取单条健康预警
riskUpdateHealthAlert:path+"/web/healthAlert/updateHealthAlert",// 更改健康预警
riskDeleteHealthAlert:path+"/web/healthAlert/deleteHealthAlert",// 删除健康预警
// 健康信息
healthGetExamDateList:path+"/web/healthInfo/getExamDateList",// 根据班级获得检查日期列表
healthGetClassHealthInfo:path+"/web/healthInfo/getClassHealthInfo",// 获得班级健康信息
healthGetChildListOfClass:path+"/web/healthInfo/getChildListOfClass",// 获得班级幼儿列表
healthGetBirthdaySex:path+"/web/healthInfo/getBirthdaySex",// 获得幼儿生日及性别
healthCalculateAge:path+"/web/healthInfo/calculateAge",// 根据生日,体检日期,计算年龄
healthHPValue:path+"/web/healthInfo/HPValue",// 计算身高p值
healthWPValue:path+"/web/healthInfo/WPValue",// 计算体重p值
healthFatnessValue:path+"/web/healthInfo/FatnessValue",// 计算肥胖值
healthNewHealthInfo:path+"/web/healthInfo/newHealthInfo",// 新增健康信息
healthGetSingleHI:path+"/web/healthInfo/getSingleHI",// 获得单条健康信息
healthUpdateHealthInfo:path+"/web/healthInfo/updateHealthInfo",// 更新健康信息
healthDeleteHealthInfo:path+"/web/healthInfo/deleteHealthInfo",// 删除健康信息
// 自选课程 剧场活动
GetSchoolIds:path+"/web/activity/TSCourse_GetSchoolIds",//特色课程 获取学校课程id
GetSchoolJYIds:path+"/web/activity/TSCourse_GetSchoolJYIds",//剧场活动 id
GetSchoolCourses:path+"/web/activity/TSCourse_GetSchoolCourses",//特色课程 获取学校课程
AddCourse:path+"/web/activity/TSCourse_AddCourse",//特色课程 新增
GetCourseDetails:path+"/web/activity/TSCourse_GetCourseDetails",//获取学校课程详情
tsDelCourse:path+"/web/activity/TSCourse_DelCourse",// 删除学校课程
tsGetBookedChildren:path+"/web/activity/TSCourse_GetBookedChildren",// 签到学生列表
tsCallRoll:path+"/web/activity/TSCourse_CallRoll",// 签到
tsCancelRoll:path+"/web/activity/TSCourse_CancelRoll",// 取消签到
tsTempBookCourse:path+"/web/activity/TSCourse_tempBookCourse",// 补加预约人数
getCourseSimpleTJ:path+"/web/activity/TSCourse_getCourseSimpleTJ",// 自选活动 活动统计
getCourseClassTJ:path+"/web/activity/TSCourse_getCourseClassTJ",// 自选活动 班级统计
getCourseStudentTJ:path+"/web/activity/TSCourse_getCourseStudentTJ",// 自选活动 学生统计
getCourseStudentDetailTJ:path+"/web/activity/TSCourse_getCourseStudentDetailTJ",// 自选活动 活动统计详情
getCourseAllTJ:path+"/web/activity/TSCourse_getCourseAllTJ2",// 自选活动 活动统计01
// 文件中心
fileGetRoot:path+"/web/fileCenter/getRoot",// 获取根目录
fileGetChildFileInfo:path+"/web/fileCenter/getChildFileInfo",// 获取文件的所有子级文件
fileGetSingleFileInfo:path+"/web/fileCenter/getSingleFileInfo",// 获取单项文件信息
fileAddFileInfo:path+"/web/fileCenter/addFileInfo",// 增加一项文件信息
fileDeleteFileInfo:path+"/web/fileCenter/deleteFileInfo",// 删除文件信息
fileUpdateFileName:path+"/web/fileCenter/updateFileName",// 更新文件名
// 08设置
setting:''
};
function initAjax(url,param,callback,callback01,callback02) {
$.ajax({
type:"POST",
url:url,
data:param,
dataType:"json",
statusCode:{
404:function(){
console.log("访问地址不存在或接口参数有误 错误代码404");
},
500:function(){
console.log("因为意外情况,服务器不能完成请求 错误代码500");
// window.location.href=httpUrl.loginHttp;
},
405:function(){
console.log("资源被禁止 错误代码405");
}
},
beforeSend:function () {
// loadingIn();// loading载入
},
success:function(result){
callback(result,callback01,callback02);
// loadingOut(); // loading退出
},
error:function(result){
console.log("请求失败 ajax error!");
// window.location.href=httpUrl.loginHttp;
}
});
};
// loading载入函数
function loadingIn() {
$("#page-loader").removeClass('hide');
$("#page-loader").css("z-index","999999");
};
function loadingOut(argument) {
$("#page-loader").addClass('hide'); |
Date.prototype.Format = function (fmt) {
var o = {
"M+": this.getMonth() + 1, //月份
"d+": this.getDate(), //日
"h+": this.getHours(), //小时
"m+": this.getMinutes(), //分
"s+": this.getSeconds(), //秒
"q+": Math.floor((this.getMonth() + 3) / 3), //季度
"S": this.getMilliseconds() //毫秒
};
if (/(y+)/.test(fmt)) fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "").substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt)) fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k]) : (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
};
// 地址栏search参数筛选函数
function GetQueryString(name){
var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var result = window.location.search.substr(1).match(reg);
return result?decodeURIComponent(result[2]):null;
}
// 设置cookie 过期时间s20代表20秒 h12代表12小时 d30代表30天
function setCookie(name,value,time){
var strsec = getsec(time);
var exp = new Date();
exp.setTime(exp.getTime() + strsec*1);
// document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString()+"path=/; domain="+domain;
document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString();
};
function getsec(str){
var str1=str.substring(1,str.length)*1;
var str2=str.substring(0,1);
if (str2=="s"){
return str1*1000;
}
else if (str2=="h")
{
return str1*60*60*1000;
}
else if (str2=="d")
{
return str1*24*60*60*1000;
}
};
// 获取cookie
function getCookie(name){
var arr,reg=new RegExp("(^| )"+name+"=([^;]*)(;|$)");
if(arr=document.cookie.match(reg)){
return unescape(arr[2]);
}
else{
return null;
}
};
// 删除cookie
function delCookie(name){
var exp = new Date();
exp.setTime(exp.getTime() - 1);
var cval=getCookie(name);
if(cval!=null){
document.cookie= name + "="+cval+";expires="+exp.toGMTString();
};
};
// niceScroll滚动条
function chooseNiceScroll(AA,color) {
$(AA).niceScroll({
cursorcolor: color || "#ccc",//#CC0071 光标颜色
cursoropacitymax: 1, //改变不透明度非常光标处于活动状态(scrollabar“可见”状态),范围从1到0
touchbehavior: true, //使光标拖动滚动像在台式电脑触摸设备
cursorwidth: "5px", //像素光标的宽度
cursorborder: "0", // 游标边框css定义
cursorborderradius: "5px",//以像素为光标边界半径
autohidemode: true //是否隐藏滚动条
});
};
// 消息提示函数
function toastTip(heading,text,hideAfter,afterHidden) {
$.toast({
heading: heading,
text: text,
showHideTransition: 'slide',
icon: 'success',
hideAfter: hideAfter || 1500,
loaderBg: '#edd42e',
position: 'bottom-right',
afterHidden: afterHidden
});
}; | }; | random_line_split |
base.js | var user={
pid:GetQueryString("pid"),
sid:GetQueryString("sid")
};
var serverUrl01="https://www.member361.com";//84正式服务器
var serverUrl02="https://121.43.150.38";//38测试服务器
var serverUrl03="http://106.15.89.156";//156测试服务器
var serverHost="https://www.member361.com";
var path=serverUrl01; //更改服务器地址可设置此值
var httpUrl={
// 基础
loginId:getCookie("loginId"),
path_img:path+"/file/getImage?md5=", // 图片地址
download:path+"/file/downloadOne?", // 文件下载
picUrl:path+"/file/upload2", // 图片上传地址
basicFileUpload:path+"/file/business/upload", // 业务文件上传
login:path+"/web/login/loginChecking",// 首页登入
loginUserInfo:path+"/web/basic/loginUserInfo",// 获得登录人信息
basicButton:path+"/web/ops/menu/button/list",// 获取菜单功能按钮列表
basicMyClassInfo:path+"/web/basic/myClassInfo",// 获得当前人 所在班级列表
basicAllClassInfo:path+"/web/basic/allClassInfo",// 获得登录人所在学校所有班级列表
basicCompanyList:path+"/web/ops/company/list",// 获取所有的学校
basicZip:path+"/file/zip",// 获取打包下载zip
basicStudent:path+"/common/basic/class/student",// 获取当前班级学生列表
// 菜单
menuList:path+"/web/ops/user/menu/list",// 菜单接口
menuChildList:path+"/web/ops/user/menu/childList",// 获取子菜单列表
menuButtonList:path+"/web/ops/menu/button/list",// 获取菜单功能按钮列表
// 菜单管理
menuButtonAddOrUpdate:path+"/web/ops/menu/button/addOrUpdate",// 新增或更新按钮信息
menuAddOrUpdate:path+"/web/ops/menu/addOrUpdate",// 新增或更新菜单信息
menuCompanyUpdate:path+"/web/ops/company/menu/update",// 更新学校菜单信息
menuCompanyList:path+"/web/ops/company/menu/list",// 获取学校菜单列表
menuButtonList:path+"/web/ops/button/list",// 获取菜单按钮列表
menuDelete:path+"/web/ops/menu/delete",// 删除菜单
menuButtonDelete:path+"/web/ops/menu/button/delete",// 删除菜单按钮
menuDetail:path+"/web/ops/menu/detail",// 菜单详情
menuButtonDetail:path+"/web/ops/menu/button/detail",// 按钮详情
menuRoleButtonUpdate:path+"/web/ops/role/button/update",// 更新角色按钮权限
menuRoleButtonList:path+"/web/ops/role/buttonList",// 获得角色所有按钮(含选中信息)
// 学校角色管理
schoolTypeList:path+"/web/ops/role/typeList",// 获取所有的角色
schoolMenuList:path+"/web/ops/company/role/menuList",// 获取学校角色菜单
schoolMenuUpdate:path+"/web/ops/company/role/menu/update",// 更新学校角色菜单
// 教师信息
teacherAdd:path+"/web/basic/staff/add",// 新建教职工
teacherSingleStaffInfo:path+"/web/basic/staff/singleStaffInfo",// 获得单项教职工条目
teacherUpdate:path+"/web/basic/staff/update",// 更新教职工条目
teacherDelete:path+"/web/basic/staff/delete",// 移除教职工
teacherAllType:path+"/web/basic/staff/allType",// 获得所有教职工类型
teacherMyClassInfo:path+"/web/basic/myClassInfo",// 获得教职工所在班级列表
teacherStaffInfo:path+"/web/basic/staff/staffInfo",// 获得教职工列表
teacherGetImportUserInfo:path+"/web/basic/import/getImportUserInfo",// 获得用户导入表信息
teacherDeleteImportUser:path+"/web/basic/import/deleteImportUser",// 用户导入表-删除
teacherSubmitUserData:path+"/web/basic/import/submitUserData",// 用户导入表 提交数据
teacherGetSingleImportUserInfo:path+"/web/basic/import/getSingleImportUserInfo",// 获得用户导入表单项导入信息
teacherUpdateImportUser:path+"/web/basic/import/updateImportUser",// 用户导入表-编辑
// 幼儿信息
childrenAdd:path+"/web/basic/child/add",// 新建幼儿
childrenSingleChildInfo:path+"/web/basic/child/singleChildInfo",// 获得单项幼儿条目
childrenUpdate:path+"/web/basic/child/update",// 更新幼儿条目
childrenDelete:path+"/web/basic/child/delete",// 移除幼儿
childrenMyClassInfo:path+"/web/basic/myClassInfo",// 获得幼儿所在班级列表
childrenInfo:path+"/web/basic/child/childInfo",// 获得幼儿列表
childrenParentInfo:path+"/web/basic/child/parentInfo",// 获得幼儿家长列表
childrenGetImportUserInfo:path+"/web/basic/import/getImportChildInfo",// 获得用户导入表信息
childrenDeleteImportUser:path+"/web/basic/import/deleteImportChild",// 用户导入表-删除
childrenSubmitUserData:path+"/web/basic/import/submitChildData",// 用户导入表 提交数据
childrenGetSingleImportUserInfo:path+"/web/basic/import/getSingleImportChildInfo",// 获得用户导入表单项导入信息
childrenUpdateImportUser:path+"/web/basic/import/updateImportChild",// 幼儿导入表-编辑
childrenParentDelete:path+"/web/basic/parent/delete",// 删除家长条目
childrenSingleParentInfo:path+"/web/basic/parent/singleParentInfo",// 获得单项家长条目
childrenParentUpdate:path+"/web/basic/parent/update",// 更新家长条目
// 班级管理
classAdd:path+"/web/basic/org/add",// 新建班级
classUpdate:path+"/web/basic/org/update",// 更新班级
classDelete:path+"/web/basic/org/delete",// 移除班级
classGradeList:path+"/web/basic/org/gradeList",// 获得年级列表
classInfo:path+"/web/basic/org/classInfo",// 获得班级列表及人员数量
classOfStaff:path+"/web/basic/org/staffOfClass",// 获得班级教职工列表
classSingleClassInfo:path+"/web/basic/org/singleClassInfo",// 获得单项班级条目
classBasicInfo:path+"/web/basic/org/classBasicInfo",// 获得所有班级基础信息
classUpgrade:path+"/web/basic/org/upgrade",// 升班
classChange:path+"/web/basic/org/changeClass",// 调班
classMemberBasic:path+"/web/basic/org/memberBasic",// 获取班级幼儿及教职工名单
// 萌宝成长
growthBanner:path+"/web/ops/company/banner/list",// 萌宝成长 获取学校banner
growthAdd:path+"/web/growth/message/add",// 萌宝成长 新增
growthList:path+"/web/growth/message/list",// 萌宝成长 获取班级内容列表
growthStudent:path+"/common/basic/class/student",// 萌宝成长 获取当前班级学生列表
growthLabel:path+"/web/growth/label/list",// 萌宝成长 获取学校所有的标签
growthAddordelete:path+"/web/growth/praise/addordelete",// 萌宝成长 点赞或者取消点赞
growthCancelSticky:path+"/web/growth/message/cancelSticky",// 萌宝成长 取消内容置顶
growthCommentAdd:path+"/web/growth/comment/add",// 萌宝成长 新增一条评论或者回复
growthCommentDelete:path+"/web/growth/comment/delete",// 萌宝成长 删除某一条评论
growthMessageDelete:path+"/web/growth/message/delete",// 萌宝成长 删除一条内容
// 萌宝成长标签
growthLabelDelete:path+"/web/growth/label/delete",// 萌宝成长标签 删除
growthLabelAddOrUpdate:path+"/web/growth/label/addOrUpdate",// 新增或者编辑标签
// 萌宝成长统计
growthTeacherStat:path+"/web/growth/report/message/teacher",// 萌宝成长统计 教师发帖数量
growthClassStat:path+"/web/growth/report/message/class",// 萌宝成长统计 班级发帖数量
growthLivelyStat:path+"/web/growth/report/parent/lively",// 萌宝成长统计 点赞评论数量
// 观察记录
watchCourseList:path+"/web/sample/search/course/list",// (查询)个人观察计划列表
watchClassList:path+"/web/sample/search/class/list",// (查询)获取个人所在班级
watchTeacherList:path+"/web/sample/search/teacher/list",// (查询)获取班级所有老师
watchDimList:path+"/web/sample/search/dim/list",// (查询)观察计划维度列表
watchRecordUpdate:path+"/web/sample/student/record/update",// 更新观察记录
watchRecordList:path+"/web/sample/student/record/list",// 获取学生观察记录列表
watchStudentList:path+"/web/sample/record/student/list",// 获取观察记录学生列表
watchRecordDetail:path+"/web/sample/student/record/detail",// 获取观察记录详情
watchRecordDelete:path+"/web/sample/record/delete",// 删除观察记录
watchTeacherStat:path+"/web/sample/report/teacher",// 观察记录统计
watchClassStat:path+"/web/sample/report/class",// 观察记录统计01
// 观察计划
watchPlanList:path+"/web/sample/company/course/list",// 获取观察计划列表
watchPlanDetail:path+"/web/sample/company/course/detail",// 获取观察计划详情
watchPlanAddOrUpdate:path+"/web/sample/company/course/addOrUpdate",// 新增或更新观察计划
watchPlanDelete:path+"/web/sample/company/course/delete",// 删除观察计划
watchPlanTeacherList:path+"/web/sample/company/teacherList",// 关联教师
// 综合评价
watchStudentInfo:path+"/web/sample/evaluate/evaluateStudentInfo",// 月度评价学生列表
watchStudentDetail:path+"/web/sample/evaluate/student/detail",// 学生月度评价详情
watchStudentAddOrUpdate:path+"/web/sample/evaluate/student/addOrUpdate",// 新增或编辑学生月度评价
watchConfigMonthList:path+"/web/sample/evaluate/config/monthList",// 获取所有月份配置列表
watchConfigAdd:path+"/web/sample/evaluate/config/add",// 新增月度评价配置
watchConfigAllDim:path+"/web/sample/evaluate/config/allDim",// 获取学校配置所有维度
watchConfigDetail:path+"/web/sample/evaluate/config/detail",// 获取月度评价配置详情
// 观察维度
dimLevelDelete:path+"/web/sample/company/dimLevel/delete",// 删除学校维度水平
dimDelete:path+"/web/sample/company/dim/delete",// 删除学校观察维度
dimLevelAddOrUpdate:path+"/web/sample/company/dimLevel/addOrUpdate",// 新增或更新学校维度水平
dimAddOrUpdate:path+"/web/sample/company/dim/addOrUpdate",// 新增或更新学校观察维度
dimLevelList:path+"/web/sample/company/dimLevel/list",// 获取学校维度水平列表
dimList:path+"/web/sample/company/dim/list",// 获取学校观察维度
// 个体发展水平
getStudentAbility:path+"/web/sample/TJ/TJ_GCJL_GetStudentAbilityStrong",// 个人综合能力评价 雷达图
getStudentCourseAbility:path+"/web/sample/TJ/TJ_GCJL_GetStudentCourseAbility",// 个人课程 能力评价 雷达图
// 班级发展水平
getClassesAbilibySimple:path+"/web/sample/TJ/TJ_GCJL_GetClassesAbilibySimple",// 班级领域发展水平
getCourseAbilibySimple:path+"/web/sample/TJ/TJ_GCJL_GetCourseAbilibySimple",// 班级游戏与生活观察
getClassAbilibySimple:path+"/web/sample/TJ/TJ_GCJL_GetClassAbilibySimple",// 班级领域发展水平--数量统计
getCourseAbilibyCount:path+"/web/sample/TJ/TJ_GCJL_GetCourseAbilibyCount",// 课程发展水平--数量统计
// 成长档案
recordStudent:path+"/web/mbtrack/dan/student",// 获取学生列表(含档案信息)
recordList:path+"/web/mbtrack/danbook/list",// 获取档案册列表
recordMonthList:path+"/web/mbtrack/danbook/danList",// 获取档案册档案页详情
recordNewDanbook:path+"/web/mbtrack/danbook/save",// 新建档案册
recordDownload:path+"/file/patch/download",//图片批量下载(档案页)
recordDanbookUpdate:path+"/web/mbtrack/danbook/update",// 档案册名更新
recordTeacherStat:path+"/web/mbtrack/report/teacher",// 教师成长档案统计
recordParentStat:path+"/web/mbtrack/report/parent",// 家长成长档案统计
// 考勤
attendGetChildOfClass:path+"/web/attendance/teacher/getChildOfClass",// 获得班级所有幼儿信息
attendGetAttendanceRecord:path+"/web/attendance/teacher/getAttendanceRecord",// 获得考勤记录
attendCheckConfirm:path+"/web/attendance/teacher/checkConfirm",// 教师端检查确认
attendDisPlayAttendDays:path+"/web/attendance/teacher/disPlayAttendDays",// 查看已设置的考勤天数
attendUpdateAttendDays:path+"/web/attendance/teacher/updateAttendDays",// 修改考勤天数设置
attendResetAttendDays:path+"/web/attendance/teacher/resetAttendDays",// 复位考勤天数设置
attendGetClassAttendanceInfo:path+"/web/attendance/teacher/getClassAttendanceInfo",// 获得班级考勤
attendGetPersonalAttendance:path+"/web/attendance/parent/getPersonalAttendance",// 获得个人考勤
// 公告
getMyClassInfo:path+"/web/basic/getMyClassInfo",// 获取我的班级信息
getClassStuAndTeachers:path+"/web/basic/getClassStuAndTeachers",// 获取班级所有学生和老师
noticeGetDesc:path+"/web/notice/getNoticeDesc",// 获取公告描述
noticeReaded:path+"/web/notice/markNoticeReaded",// 公告置为已读
noticeAddNew:path+"/web/notice/addNewNotice",// 新增新的公告内容
noticeGetContentList:path+"/web/notice/getNoticeContent",// 获取某个公告内容列表
noticeGetReadDetail:path+"/web/notice/getReadDetail",// 获取某条公告内容阅读详情
noticeDelNoticeContent:path+"/web/notice/delNoticeContent",// 删除某条公告内容
noticeUpdateNoticeContent:path+"/web/notice/updateNoticeContent",// 更新某条公告内容
// 每周菜谱
menuSaveTable:path+"/web/cookbook/saveTable",// 保存表格
menuDeleteTable:path+"/web/cookbook/deleteTable",// 删除整张表
menuUpdateTitle:path+"/web/cookbook/updateTitle",// 更新菜谱标题
menuSelectCell:path+"/web/cookbook/selectCell",// 获得某个单元
menuGetTitleList:path+"/web/cookbook/getTitleList",// 获得菜谱标题列表
menuStructuringTableCell:path+"/web/cookbook/structuringTableCell",// 通过开始日期获取表单
// 风险预警
riskGetCompanyHealthAlert:path+"/web/healthAlert/getCompanyHealthAlert",// 获取登录人所在学校的所有预警
riskGetAlertType:path+"/web/healthAlert/getAlertType",// 获取预警类型列表
riskGetAlertAge:path+"/web/healthAlert/getAlertAge",// 获得预警年龄列表
riskNewHealthAlert:path+"/web/healthAlert/newHealthAlert",// 新增风险预警
riskGetHealthAlert:path+"/web/healthAlert/getHealthAlert",// 获取单条健康预警
riskUpdateHealthAlert:path+"/web/healthAlert/updateHealthAlert",// 更改健康预警
riskDeleteHealthAlert:path+"/web/healthAlert/deleteHealthAlert",// 删除健康预警
// 健康信息
healthGetExamDateList:path+"/web/healthInfo/getExamDateList",// 根据班级获得检查日期列表
healthGetClassHealthInfo:path+"/web/healthInfo/getClassHealthInfo",// 获得班级健康信息
healthGetChildListOfClass:path+"/web/healthInfo/getChildListOfClass",// 获得班级幼儿列表
healthGetBirthdaySex:path+"/web/healthInfo/getBirthdaySex",// 获得幼儿生日及性别
healthCalculateAge:path+"/web/healthInfo/calculateAge",// 根据生日,体检日期,计算年龄
healthHPValue:path+"/web/healthInfo/HPValue",// 计算身高p值
healthWPValue:path+"/web/healthInfo/WPValue",// 计算体重p值
healthFatnessValue:path+"/web/healthInfo/FatnessValue",// 计算肥胖值
healthNewHealthInfo:path+"/web/healthInfo/newHealthInfo",// 新增健康信息
healthGetSingleHI:path+"/web/healthInfo/getSingleHI",// 获得单条健康信息
healthUpdateHealthInfo:path+"/web/healthInfo/updateHealthInfo",// 更新健康信息
healthDeleteHealthInfo:path+"/web/healthInfo/deleteHealthInfo",// 删除健康信息
// 自选课程 剧场活动
GetSchoolIds:path+"/web/activity/TSCourse_GetSchoolIds",//特色课程 获取学校课程id
GetSchoolJYIds:path+"/web/activity/TSCourse_GetSchoolJYIds",//剧场活动 id
GetSchoolCourses:path+"/web/activity/TSCourse_GetSchoolCourses",//特色课程 获取学校课程
AddCourse:path+"/web/activity/TSCourse_AddCourse",//特色课程 新增
GetCourseDetails:path+"/web/activity/TSCourse_GetCourseDetails",//获取学校课程详情
tsDelCourse:path+"/web/activity/TSCourse_DelCourse",// 删除学校课程
tsGetBookedChildren:path+"/web/activity/TSCourse_GetBookedChildren",// 签到学生列表
tsCallRoll:path+"/web/activity/TSCourse_CallRoll",// 签到
tsCancelRoll:path+"/web/activity/TSCourse_CancelRoll",// 取消签到
tsTempBookCourse:path+"/web/activity/TSCourse_tempBookCourse",// 补加预约人数
getCourseSimpleTJ:path+"/web/activity/TSCourse_getCourseSimpleTJ",// 自选活动 活动统计
getCourseClassTJ:path+"/web/activity/TSCourse_getCourseClassTJ",// 自选活动 班级统计
getCourseStudentTJ:path+"/web/activity/TSCourse_getCourseStudentTJ",// 自选活动 学生统计
getCourseStudentDetailTJ:path+"/web/activity/TSCourse_getCourseStudentDetailTJ",// 自选活动 活动统计详情
getCourseAllTJ:path+"/web/activity/TSCourse_getCourseAllTJ2",// 自选活动 活动统计01
// 文件中心
fileGetRoot:path+"/web/fileCenter/getRoot",// 获取根目录
fileGetChildFileInfo:path+"/web/fileCenter/getChildFileInfo",// 获取文件的所有子级文件
fileGetSingleFileInfo:path+"/web/fileCenter/getSingleFileInfo",// 获取单项文件信息
fileAddFileInfo:path+"/web/fileCenter/addFileInfo",// 增加一项文件信息
fileDeleteFileInfo:path+"/web/fileCenter/deleteFileInfo",// 删除文件信息
fileUpdateFileName:path+"/web/fileCenter/updateFileName",// 更新文件名
// 08设置
setting:''
};
function initAjax(url,param,callback,callback01,callback02) {
$.ajax({
type:"POST",
url:url,
data:param,
dataType:"json",
statusCode:{
404:function(){
console.log("访问地址不存在或接口参数有误 错误代码404");
},
500:function(){
console.log("因为意外情况,服务器不能完成请求 错误代码500");
// window.location.href=httpUrl.loginHttp;
},
405:function(){
console.log("资源被禁止 错误代码405");
}
},
beforeSend:function () {
// loadingIn();// loading载入
},
success:function(result){
callback(result,callback01,callback02);
// loadingOut(); // loading退出
},
error:function(result){
console.log("请求失败 ajax error!");
// window.location.href=httpUrl.loginHttp;
}
});
};
// loading载入函数
function loadingIn() {
$("#page-loader").removeClass('hide');
$("#page-loader").css("z-index","999999");
};
function loadingOut(argument) {
$("#page-loader").addClass('hide');
};
Date.prototype.Format = function (fmt) {
var o = {
"M+": this.getMonth() + 1, //月份
"d+": this.getDate(), //日
"h+": this.getHours(), //小时
"m+": this.getMinutes(), //分
"s+": this.getSeconds(), //秒
"q+": Math.floor((this.getMonth() + 3) / 3), //季度
"S": this.getMilliseconds() //毫秒
};
if (/(y+)/.test(fmt)) fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "").substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt)) fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k]) : (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
};
// 地址栏search参数筛选函数
function GetQueryString(name){
var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var result = window.location.search.substr(1).match(reg);
return result?decodeURIComponent(result[2]):null;
}
// 设置cookie 过期时间s20代表20秒 h12代表12小时 d30代表30天
function setCookie(name,value,time){
var strsec = getsec(time);
var exp = new Date();
exp.setTime(exp.getTime() + strsec*1);
// document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString()+"path=/; domain="+domain;
document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString();
};
function getsec(str){
var str1=str.substring(1,str.length)*1;
var str2=str.substring(0,1);
if (str2=="s"){
return str1*1000;
}
else if (str2=="h")
{
return str1*60*60*1000;
}
else if (str2=="d")
{
return str1*24*60*60*1000;
}
};
// 获取cookie
function getCookie(name){
var arr,reg=new RegExp("(^| )"+name+"=([^;]*)(;|$)");
if(arr=document.cookie.match(reg)){
return unescape(arr[2]);
}
else{
return null;
}
};
// 删除cookie
function delCookie(name){
var exp = new Date();
exp.setTime(exp.getTime() - 1);
var cval=getCookie(name);
if(cval!=null){
document.cookie= name + "="+cval+";expires="+exp.toGMTString();
};
};
// niceScroll滚动条
function chooseNiceScroll(AA,color) {
$(AA).niceScroll({
cursorcolor: color || "#ccc",//#CC0071 光标颜色
cursoropacitymax: 1, //改变不透明度非常光标处于活动状态(scrollabar“可见”状态),范围从1到0
touchbehavior: true, //使光标拖动滚动像在台式电脑触摸设备
cursorwidth: "5px", //像素光标的宽度
cursorborder: "0", // 游标边框css定义
cursorborderradius: "5px",//以像素为光标边界半径
autohidemode: true //是否隐藏滚动条
});
};
// 消息提示函数
function toastTip(heading,text,hideAfter,afterHidden) {
$.toast({
heading: heading,
text: text,
showHideTransition: 'slide',
icon: 'success',
hideAfter: hideAfter || 1500,
loaderBg: '#edd42e',
position: 'bottom-right',
afterHidden: afterHidden
});
};
| identifier_name | ||
base.js | var user={
pid:GetQueryString("pid"),
sid:GetQueryString("sid")
};
var serverUrl01="https://www.member361.com";//84正式服务器
var serverUrl02="https://121.43.150.38";//38测试服务器
var serverUrl03="http://106.15.89.156";//156测试服务器
var serverHost="https://www.member361.com";
var path=serverUrl01; //更改服务器地址可设置此值
var httpUrl={
// 基础
loginId:getCookie("loginId"),
path_img:path+"/file/getImage?md5=", // 图片地址
download:path+"/file/downloadOne?", // 文件下载
picUrl:path+"/file/upload2", // 图片上传地址
basicFileUpload:path+"/file/business/upload", // 业务文件上传
login:path+"/web/login/loginChecking",// 首页登入
loginUserInfo:path+"/web/basic/loginUserInfo",// 获得登录人信息
basicButton:path+"/web/ops/menu/button/list",// 获取菜单功能按钮列表
basicMyClassInfo:path+"/web/basic/myClassInfo",// 获得当前人 所在班级列表
basicAllClassInfo:path+"/web/basic/allClassInfo",// 获得登录人所在学校所有班级列表
basicCompanyList:path+"/web/ops/company/list",// 获取所有的学校
basicZip:path+"/file/zip",// 获取打包下载zip
basicStudent:path+"/common/basic/class/student",// 获取当前班级学生列表
// 菜单
menuList:path+"/web/ops/user/menu/list",// 菜单接口
menuChildList:path+"/web/ops/user/menu/childList",// 获取子菜单列表
menuButtonList:path+"/web/ops/menu/button/list",// 获取菜单功能按钮列表
// 菜单管理
menuButtonAddOrUpdate:path+"/web/ops/menu/button/addOrUpdate",// 新增或更新按钮信息
menuAddOrUpdate:path+"/web/ops/menu/addOrUpdate",// 新增或更新菜单信息
menuCompanyUpdate:path+"/web/ops/company/menu/update",// 更新学校菜单信息
menuCompanyList:path+"/web/ops/company/menu/list",// 获取学校菜单列表
menuButtonList:path+"/web/ops/button/list",// 获取菜单按钮列表
menuDelete:path+"/web/ops/menu/delete",// 删除菜单
menuButtonDelete:path+"/web/ops/menu/button/delete",// 删除菜单按钮
menuDetail:path+"/web/ops/menu/detail",// 菜单详情
menuButtonDetail:path+"/web/ops/menu/button/detail",// 按钮详情
menuRoleButtonUpdate:path+"/web/ops/role/button/update",// 更新角色按钮权限
menuRoleButtonList:path+"/web/ops/role/buttonList",// 获得角色所有按钮(含选中信息)
// 学校角色管理
schoolTypeList:path+"/web/ops/role/typeList",// 获取所有的角色
schoolMenuList:path+"/web/ops/company/role/menuList",// 获取学校角色菜单
schoolMenuUpdate:path+"/web/ops/company/role/menu/update",// 更新学校角色菜单
// 教师信息
teacherAdd:path+"/web/basic/staff/add",// 新建教职工
teacherSingleStaffInfo:path+"/web/basic/staff/singleStaffInfo",// 获得单项教职工条目
teacherUpdate:path+"/web/basic/staff/update",// 更新教职工条目
teacherDelete:path+"/web/basic/staff/delete",// 移除教职工
teacherAllType:path+"/web/basic/staff/allType",// 获得所有教职工类型
teacherMyClassInfo:path+"/web/basic/myClassInfo",// 获得教职工所在班级列表
teacherStaffInfo:path+"/web/basic/staff/staffInfo",// 获得教职工列表
teacherGetImportUserInfo:path+"/web/basic/import/getImportUserInfo",// 获得用户导入表信息
teacherDeleteImportUser:path+"/web/basic/import/deleteImportUser",// 用户导入表-删除
teacherSubmitUserData:path+"/web/basic/import/submitUserData",// 用户导入表 提交数据
teacherGetSingleImportUserInfo:path+"/web/basic/import/getSingleImportUserInfo",// 获得用户导入表单项导入信息
teacherUpdateImportUser:path+"/web/basic/import/updateImportUser",// 用户导入表-编辑
// 幼儿信息
childrenAdd:path+"/web/basic/child/add",// 新建幼儿
childrenSingleChildInfo:path+"/web/basic/child/singleChildInfo",// 获得单项幼儿条目
childrenUpdate:path+"/web/basic/child/update",// 更新幼儿条目
childrenDelete:path+"/web/basic/child/delete",// 移除幼儿
childrenMyClassInfo:path+"/web/basic/myClassInfo",// 获得幼儿所在班级列表
childrenInfo:path+"/web/basic/child/childInfo",// 获得幼儿列表
childrenParentInfo:path+"/web/basic/child/parentInfo",// 获得幼儿家长列表
childrenGetImportUserInfo:path+"/web/basic/import/getImportChildInfo",// 获得用户导入表信息
childrenDeleteImportUser:path+"/web/basic/import/deleteImportChild",// 用户导入表-删除
childrenSubmitUserData:path+"/web/basic/import/submitChildData",// 用户导入表 提交数据
childrenGetSingleImportUserInfo:path+"/web/basic/import/getSingleImportChildInfo",// 获得用户导入表单项导入信息
childrenUpdateImportUser:path+"/web/basic/import/updateImportChild",// 幼儿导入表-编辑
childrenParentDelete:path+"/web/basic/parent/delete",// 删除家长条目
childrenSingleParentInfo:path+"/web/basic/parent/singleParentInfo",// 获得单项家长条目
childrenParentUpdate:path+"/web/basic/parent/update",// 更新家长条目
// 班级管理
classAdd:path+"/web/basic/org/add",// 新建班级
classUpdate:path+"/web/basic/org/update",// 更新班级
classDelete:path+"/web/basic/org/delete",// 移除班级
classGradeList:path+"/web/basic/org/gradeList",// 获得年级列表
classInfo:path+"/web/basic/org/classInfo",// 获得班级列表及人员数量
classOfStaff:path+"/web/basic/org/staffOfClass",// 获得班级教职工列表
classSingleClassInfo:path+"/web/basic/org/singleClassInfo",// 获得单项班级条目
classBasicInfo:path+"/web/basic/org/classBasicInfo",// 获得所有班级基础信息
classUpgrade:path+"/web/basic/org/upgrade",// 升班
classChange:path+"/web/basic/org/changeClass",// 调班
classMemberBasic:path+"/web/basic/org/memberBasic",// 获取班级幼儿及教职工名单
// 萌宝成长
growthBanner:path+"/web/ops/company/banner/list",// 萌宝成长 获取学校banner
growthAdd:path+"/web/growth/message/add",// 萌宝成长 新增
growthList:path+"/web/growth/message/list",// 萌宝成长 获取班级内容列表
growthStudent:path+"/common/basic/class/student",// 萌宝成长 获取当前班级学生列表
growthLabel:path+"/web/growth/label/list",// 萌宝成长 获取学校所有的标签
growthAddordelete:path+"/web/growth/praise/addordelete",// 萌宝成长 点赞或者取消点赞
growthCancelSticky:path+"/web/growth/message/cancelSticky",// 萌宝成长 取消内容置顶
growthCommentAdd:path+"/web/growth/comment/add",// 萌宝成长 新增一条评论或者回复
growthCommentDelete:path+"/web/growth/comment/delete",// 萌宝成长 删除某一条评论
growthMessageDelete:path+"/web/growth/message/delete",// 萌宝成长 删除一条内容
// 萌宝成长标签
growthLabelDelete:path+"/web/growth/label/delete",// 萌宝成长标签 删除
growthLabelAddOrUpdate:path+"/web/growth/label/addOrUpdate",// 新增或者编辑标签
// 萌宝成长统计
growthTeacherStat:path+"/web/growth/report/message/teacher",// 萌宝成长统计 教师发帖数量
growthClassStat:path+"/web/growth/report/message/class",// 萌宝成长统计 班级发帖数量
growthLivelyStat:path+"/web/growth/report/parent/lively",// 萌宝成长统计 点赞评论数量
// 观察记录
watchCourseList:path+"/web/sample/search/course/list",// (查询)个人观察计划列表
watchClassList:path+"/web/sample/search/class/list",// (查询)获取个人所在班级
watchTeacherList:path+"/web/sample/search/teacher/list",// (查询)获取班级所有老师
watchDimList:path+"/web/sample/search/dim/list",// (查询)观察计划维度列表
watchRecordUpdate:path+"/web/sample/student/record/update",// 更新观察记录
watchRecordList:path+"/web/sample/student/record/list",// 获取学生观察记录列表
watchStudentList:path+"/web/sample/record/student/list",// 获取观察记录学生列表
watchRecordDetail:path+"/web/sample/student/record/detail",// 获取观察记录详情
watchRecordDelete:path+"/web/sample/record/delete",// 删除观察记录
watchTeacherStat:path+"/web/sample/report/teacher",// 观察记录统计
watchClassStat:path+"/web/sample/report/class",// 观察记录统计01
// 观察计划
watchPlanList:path+"/web/sample/company/course/list",// 获取观察计划列表
watchPlanDetail:path+"/web/sample/company/course/detail",// 获取观察计划详情
watchPlanAddOrUpdate:path+"/web/sample/company/course/addOrUpdate",// 新增或更新观察计划
watchPlanDelete:path+"/web/sample/company/course/delete",// 删除观察计划
watchPlanTeacherList:path+"/web/sample/company/teacherList",// 关联教师
// 综合评价
watchStudentInfo:path+"/web/sample/evaluate/evaluateStudentInfo",// 月度评价学生列表
watchStudentDetail:path+"/web/sample/evaluate/student/detail",// 学生月度评价详情
watchStudentAddOrUpdate:path+"/web/sample/evaluate/student/addOrUpdate",// 新增或编辑学生月度评价
watchConfigMonthList:path+"/web/sample/evaluate/config/monthList",// 获取所有月份配置列表
watchConfigAdd:path+"/web/sample/evaluate/config/add",// 新增月度评价配置
watchConfigAllDim:path+"/web/sample/evaluate/config/allDim",// 获取学校配置所有维度
watchConfigDetail:path+"/web/sample/evaluate/config/detail",// 获取月度评价配置详情
// 观察维度
dimLevelDelete:path+"/web/sample/company/dimLevel/delete",// 删除学校维度水平
dimDelete:path+"/web/sample/company/dim/delete",// 删除学校观察维度
dimLevelAddOrUpdate:path+"/web/sample/company/dimLevel/addOrUpdate",// 新增或更新学校维度水平
dimAddOrUpdate:path+"/web/sample/company/dim/addOrUpdate",// 新增或更新学校观察维度
dimLevelList:path+"/web/sample/company/dimLevel/list",// 获取学校维度水平列表
dimList:path+"/web/sample/company/dim/list",// 获取学校观察维度
// 个体发展水平
getStudentAbility:path+"/web/sample/TJ/TJ_GCJL_GetStudentAbilityStrong",// 个人综合能力评价 雷达图
getStudentCourseAbility:path+"/web/sample/TJ/TJ_GCJL_GetStudentCourseAbility",// 个人课程 能力评价 雷达图
// 班级发展水平
getClassesAbilibySimple:path+"/web/sample/TJ/TJ_GCJL_GetClassesAbilibySimple",// 班级领域发展水平
getCourseAbilibySimple:path+"/web/sample/TJ/TJ_GCJL_GetCourseAbilibySimple",// 班级游戏与生活观察
getClassAbilibySimple:path+"/web/sample/TJ/TJ_GCJL_GetClassAbilibySimple",// 班级领域发展水平--数量统计
getCourseAbilibyCount:path+"/web/sample/TJ/TJ_GCJL_GetCourseAbilibyCount",// 课程发展水平--数量统计
// 成长档案
recordStudent:path+"/web/mbtrack/dan/student",// 获取学生列表(含档案信息)
recordList:path+"/web/mbtrack/danbook/list",// 获取档案册列表
recordMonthList:path+"/web/mbtrack/danbook/danList",// 获取档案册档案页详情
recordNewDanbook:path+"/web/mbtrack/danbook/save",// 新建档案册
recordDownload:path+"/file/patch/download",//图片批量下载(档案页)
recordDanbookUpdate:path+"/web/mbtrack/danbook/update",// 档案册名更新
recordTeacherStat:path+"/web/mbtrack/report/teacher",// 教师成长档案统计
recordParentStat:path+"/web/mbtrack/report/parent",// 家长成长档案统计
// 考勤
attendGetChildOfClass:path+"/web/attendance/teacher/getChildOfClass",// 获得班级所有幼儿信息
attendGetAttendanceRecord:path+"/web/attendance/teacher/getAttendanceRecord",// 获得考勤记录
attendCheckConfirm:path+"/web/attendance/teacher/checkConfirm",// 教师端检查确认
attendDisPlayAttendDays:path+"/web/attendance/teacher/disPlayAttendDays",// 查看已设置的考勤天数
attendUpdateAttendDays:path+"/web/attendance/teacher/updateAttendDays",// 修改考勤天数设置
attendResetAttendDays:path+"/web/attendance/teacher/resetAttendDays",// 复位考勤天数设置
attendGetClassAttendanceInfo:path+"/web/attendance/teacher/getClassAttendanceInfo",// 获得班级考勤
attendGetPersonalAttendance:path+"/web/attendance/parent/getPersonalAttendance",// 获得个人考勤
// 公告
getMyClassInfo:path+"/web/basic/getMyClassInfo",// 获取我的班级信息
getClassStuAndTeachers:path+"/web/basic/getClassStuAndTeachers",// 获取班级所有学生和老师
noticeGetDesc:path+"/web/notice/getNoticeDesc",// 获取公告描述
noticeReaded:path+"/web/notice/markNoticeReaded",// 公告置为已读
noticeAddNew:path+"/web/notice/addNewNotice",// 新增新的公告内容
noticeGetContentList:path+"/web/notice/getNoticeContent",// 获取某个公告内容列表
noticeGetReadDetail:path+"/web/notice/getReadDetail",// 获取某条公告内容阅读详情
noticeDelNoticeContent:path+"/web/notice/delNoticeContent",// 删除某条公告内容
noticeUpdateNoticeContent:path+"/web/notice/updateNoticeContent",// 更新某条公告内容
// 每周菜谱
menuSaveTable:path+"/web/cookbook/saveTable",// 保存表格
menuDeleteTable:path+"/web/cookbook/deleteTable",// 删除整张表
menuUpdateTitle:path+"/web/cookbook/updateTitle",// 更新菜谱标题
menuSelectCell:path+"/web/cookbook/selectCell",// 获得某个单元
menuGetTitleList:path+"/web/cookbook/getTitleList",// 获得菜谱标题列表
menuStructuringTableCell:path+"/web/cookbook/structuringTableCell",// 通过开始日期获取表单
// 风险预警
riskGetCompanyHealthAlert:path+"/web/healthAlert/getCompanyHealthAlert",// 获取登录人所在学校的所有预警
riskGetAlertType:path+"/web/healthAlert/getAlertType",// 获取预警类型列表
riskGetAlertAge:path+"/web/healthAlert/getAlertAge",// 获得预警年龄列表
riskNewHealthAlert:path+"/web/healthAlert/newHealthAlert",// 新增风险预警
riskGetHealthAlert:path+"/web/healthAlert/getHealthAlert",// 获取单条健康预警
riskUpdateHealthAlert:path+"/web/healthAlert/updateHealthAlert",// 更改健康预警
riskDeleteHealthAlert:path+"/web/healthAlert/deleteHealthAlert",// 删除健康预警
// 健康信息
healthGetExamDateList:path+"/web/healthInfo/getExamDateList",// 根据班级获得检查日期列表
healthGetClassHealthInfo:path+"/web/healthInfo/getClassHealthInfo",// 获得班级健康信息
healthGetChildListOfClass:path+"/web/healthInfo/getChildListOfClass",// 获得班级幼儿列表
healthGetBirthdaySex:path+"/web/healthInfo/getBirthdaySex",// 获得幼儿生日及性别
healthCalculateAge:path+"/web/healthInfo/calculateAge",// 根据生日,体检日期,计算年龄
healthHPValue:path+"/web/healthInfo/HPValue",// 计算身高p值
healthWPValue:path+"/web/healthInfo/WPValue",// 计算体重p值
healthFatnessValue:path+"/web/healthInfo/FatnessValue",// 计算肥胖值
healthNewHealthInfo:path+"/web/healthInfo/newHealthInfo",// 新增健康信息
healthGetSingleHI:path+"/web/healthInfo/getSingleHI",// 获得单条健康信息
healthUpdateHealthInfo:path+"/web/healthInfo/updateHealthInfo",// 更新健康信息
healthDeleteHealthInfo:path+"/web/healthInfo/deleteHealthInfo",// 删除健康信息
// 自选课程 剧场活动
GetSchoolIds:path+"/web/activity/TSCourse_GetSchoolIds",//特色课程 获取学校课程id
GetSchoolJYIds:path+"/web/activity/TSCourse_GetSchoolJYIds",//剧场活动 id
GetSchoolCourses:path+"/web/activity/TSCourse_GetSchoolCourses",//特色课程 获取学校课程
AddCourse:path+"/web/activity/TSCourse_AddCourse",//特色课程 新增
GetCourseDetails:path+"/web/activity/TSCourse_GetCourseDetails",//获取学校课程详情
tsDelCourse:path+"/web/activity/TSCourse_DelCourse",// 删除学校课程
tsGetBookedChildren:path+"/web/activity/TSCourse_GetBookedChildren",// 签到学生列表
tsCallRoll:path+"/web/activity/TSCourse_CallRoll",// 签到
tsCancelRoll:path+"/web/activity/TSCourse_CancelRoll",// 取消签到
tsTempBookCourse:path+"/web/activity/TSCourse_tempBookCourse",// 补加预约人数
getCourseSimpleTJ:path+"/web/activity/TSCourse_getCourseSimpleTJ",// 自选活动 活动统计
getCourseClassTJ:path+"/web/activity/TSCourse_getCourseClassTJ",// 自选活动 班级统计
getCourseStudentTJ:path+"/web/activity/TSCourse_getCourseStudentTJ",// 自选活动 学生统计
getCourseStudentDetailTJ:path+"/web/activity/TSCourse_getCourseStudentDetailTJ",// 自选活动 活动统计详情
getCourseAllTJ:path+"/web/activity/TSCourse_getCourseAllTJ2",// 自选活动 活动统计01
// 文件中心
fileGetRoot:path+"/web/fileCenter/getRoot",// 获取根目录
fileGetChildFileInfo:path+"/web/fileCenter/getChildFileInfo",// 获取文件的所有子级文件
fileGetSingleFileInfo:path+"/web/fileCenter/getSingleFileInfo",// 获取单项文件信息
fileAddFileInfo:path+"/web/fileCenter/addFileInfo",// 增加一项文件信息
fileDeleteFileInfo:path+"/web/fileCenter/deleteFileInfo",// 删除文件信息
fileUpdateFileName:path+"/web/fileCenter/updateFileName",// 更新文件名
// 08设置
setting:''
};
function initAjax(url,param,callback,callback01,callback02) {
$.ajax({
type:"POST",
url:url,
data:param,
dataType:"json",
statusCode:{
404:function(){
console.log("访问地址不存在或接口参数有误 错误代码404");
},
500:function(){
console.log("因为意外情况,服务器不能完成请求 错误代码500");
// window.location.href=httpUrl.loginHttp;
},
405:function(){
console.log("资源被禁止 错误代码405");
}
},
beforeSend:function () {
// loadingIn();// loading载入
},
success:function(result){
callback(result,callback01,callback02);
// loadingOut(); // loading退出
},
error:function(result){
console.log("请求失败 ajax error!");
// window.location.href=httpUrl.loginHttp;
}
});
};
// loading载入函数
function loadingIn() {
$("#page-loader").removeClass('hide');
$("#page-loader").css("z-index","999999");
};
function loadingOut(argument) {
$("#page-loader").addClass('hide');
};
Date.prototype.Format = function (fmt) {
var o = {
"M+": this.getMonth() + 1, //月份
"d+": this.getDate(), //日
"h+": this.getHours(), //小时
"m+": this.getMinutes(), //分
"s+": this.getSeconds(), //秒
"q+": Math.floor((this.getMonth() + 3) / 3), //季度
"S": this.getMilliseconds() //毫秒
};
if (/(y+)/.test(fmt)) fmt = fmt.replace(RegExp.$1, (this.getFullYear() + "").substr(4 - RegExp.$1.length));
for (var k in o)
if (new RegExp("(" + k + ")").test(fmt)) fmt = fmt.replace(RegExp.$1, (RegExp.$1.length == 1) ? (o[k]) : (("00" + o[k]).substr(("" + o[k]).length)));
return fmt;
};
// 地址栏search参数筛选函数
function GetQueryString(name){
var reg = new RegExp("(^|&)"+ name +"=([^&]*)(&|$)");
var result = window.location.search.substr(1).match(reg);
return result?decodeURIComponent(result[2]):null;
}
// 设置cookie 过期时间s20代表20秒 h12代表12小时 d30代表30天
function setCookie(name,value,time){
var strsec = getsec(time);
var exp = new Date();
exp.setTime(exp.getTime() + strsec*1);
// document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString()+"path=/; domain="+domain;
document.cookie = name + "="+ escape (value) + ";expires=" + exp.toGMTString();
};
function getsec(str){
var str1=str.substring(1,str.length)*1;
var str2=str.substring(0,1);
if (str2=="s"){
return str1*1000;
}
else if (str2=="h")
{
return str1*60*60*1000;
}
else if (str2=="d")
{
return str1*24*60*60*1000;
}
};
// 获取cookie
function getCookie(name){
var arr,reg=new RegExp("(^| )"+name+"=([^;]*)(;|$)");
if(arr=document.cookie.match(reg)){
return unescape(arr[2]);
}
else{
return null;
}
};
// 删除cookie
function delCookie(name){
var exp = new Date();
exp.setTime(exp.getTime() - 1);
var cval=getCookie(name);
if(cval!=null){
document.cookie= name + "="+cval+";expires="+exp.toGMTString();
};
};
// niceScroll滚动条
function chooseNiceScroll(AA,color) {
$(AA).niceScroll({
cursorcolor: color || "#ccc",//#CC0071 光标颜色
cursoropacitymax: 1, //改变不透明度非常光标处于活动状态(scrollabar“可见”状态),范围从1到0
touchbehavior: true, //使光标拖动滚动像在台式电脑触摸设备
cursorwidth: "5px", //像素光标的宽度
cursorborder: "0", // 游标边框css定义
cursorborderradius: "5px",//以像素为光标边界半径
autohidemode: true //是否隐藏滚动条
});
};
// 消息提示函数
function toastTip(heading,text,hideAfter,afterHidden) {
$.toast({
heading: heading,
text: text,
showHideTransition: 'slide',
icon: 'success',
hideAfter: hideAfter || 1500,
loaderBg: '#edd42e',
position: 'bottom-right',
afterHidden: afterHidden
});
};
| conditional_block | ||
pouch-db-singleton.ts | /**
* @license
* Copyright (c) 2017 Google Inc. All rights reserved.
* This code may only be used under the BSD style license found at
* http://polymer.github.io/LICENSE.txt
* Code distributed by Google as part of this project is also
* subject to an additional IP rights grant found at
* http://polymer.github.io/PATENTS.txt
*/
import {PouchDB} from '../../../../concrete-storage/pouchdb.js';
import {assert} from '../../../platform/assert-web.js';
import {Type} from '../../type.js';
import {ChangeEvent, SingletonStorageProvider} from '../storage-provider-base.js';
import {SerializedModelEntry, ModelValue} from '../crdt-collection-model.js';
import {PouchDbStorageProvider} from './pouch-db-storage-provider.js';
import {PouchDbStorage} from './pouch-db-storage.js';
import {upsert, UpsertDoc, UpsertMutatorFn} from './pouch-db-upsert.js';
/**
* A representation of a Singleton in Pouch storage.
*/
interface SingletonStorage extends UpsertDoc {
value: ModelValue;
/** ReferenceMode state for this data */
referenceMode: boolean;
/** Monotonically increasing version number */
version: number;
}
/**
* The PouchDB-based implementation of a Singleton.
*/
export class PouchDbSingleton extends PouchDbStorageProvider implements SingletonStorageProvider {
private localKeyId = 0;
/**
* Create a new PouchDbSingleton.
*
* @param type the underlying type for this singleton.
* @param storageEngine a reference back to the PouchDbStorage, used for baseStorageKey calls.
* @param name appears unused.
* @param id see base class.
* @param key the storage key for this collection.
*/
constructor(type: Type, storageEngine: PouchDbStorage, name: string, id: string, key: string, refMode: boolean) {
super(type, storageEngine, name, id, key, refMode);
this._version = 0;
// See if the value has been set
this.upsert(async doc => doc).then((doc) => {
this.resolveInitialized();
// value has been written
}).catch((err) => {
console.warn('Error init ' + this.storageKey, err);
// TODO(lindner) error out the initialized Promise
throw err;
});
}
/** @inheritDoc */
backingType(): Type {
return this.type;
}
async clone(): Promise<PouchDbSingleton> {
const singleton = new PouchDbSingleton(this.type, this.storageEngine, this.name, this.id, null, this.referenceMode);
await singleton.cloneFrom(this);
return singleton;
}
async cloneFrom(handle): Promise<void> {
const literal = await handle.serializeContents();
await this.initialized;
this.referenceMode = handle.referenceMode;
if (handle.referenceMode && literal.model.length > 0) {
// cloneFrom the backing store data by reading the model and writing it out.
const [backingStore, handleBackingStore] = await Promise.all(
[this.ensureBackingStore(), handle.ensureBackingStore()]);
literal.model = literal.model.map(({id, value}) => ({id, value: {id: value.id, storageKey: backingStore.storageKey}}));
const underlying = await handleBackingStore.getMultiple(literal.model.map(({id}) => id));
await backingStore.storeMultiple(underlying, [this.storageKey]);
}
await this.fromLiteral(literal);
if (literal && literal.model && literal.model.length === 1) {
const newvalue = literal.model[0].value;
if (newvalue) {
await this.upsert(async doc => {
doc.value = newvalue;
doc.referenceMode = this.referenceMode;
doc.version = Math.max(this._version, doc.version) + 1;
return doc;
});
}
await this._fire(new ChangeEvent({data: newvalue, version: this._version}));
}
}
/**
* Returns the model data in a format suitable for transport over
* the API channel (i.e. between execution host and context).
*/
async modelForSynchronization() {
await this.initialized;
const doc = await this.upsert(async doc => doc);
const value = doc.value;
if (this.referenceMode && value !== null) {
const backingStore = await this.ensureBackingStore();
const result = await backingStore.get(value.id);
return {
version: this._version,
model: [{id: value.id, value: result}]
};
}
return super.modelForSynchronization();
}
/**
* Returns the state of this singleton based as an object of the form
* {version, model: [{id, value}]}
*/
async | (): Promise<{version: number; model: SerializedModelEntry[]}> {
await this.initialized;
const doc = await this.upsert(async doc => doc);
const value = doc.value;
let model: SerializedModelEntry[] = [];
if (value != null) {
model = [
{
id: value.id,
keys: [],
value
}
];
}
return {
version: this._version,
model
};
}
/**
* Updates the internal state of this singleton with the supplied data.
*/
async fromLiteral({version, model}): Promise<void> {
await this.initialized;
const value = model.length === 0 ? null : model[0].value;
if (this.referenceMode && value && value.rawData) {
assert(false, `shouldn't have rawData ${JSON.stringify(value.rawData)} here`);
}
assert(value !== undefined);
const newDoc = await this.upsert(async (doc) => {
// modify document
doc.value = value;
doc.referenceMode = this.referenceMode;
doc.version = Math.max(version, doc.version) + 1;
return doc;
});
this._version = newDoc.version;
}
/**
* @return a promise containing the singleton value or null if it does not exist.
*/
async get(): Promise<ModelValue> {
await this.initialized;
try {
const doc = await this.upsert(async doc => doc);
let value = doc.value;
if (value == null) {
//console.warn('value is null and refmode=' + this.referenceMode);
}
if (this.referenceMode && value) {
const backingStore = await this.ensureBackingStore();
value = await backingStore.get(value.id);
}
// logging goes here
return value;
} catch (err) {
// TODO(plindner): caught for compatibility: pouchdb layer can throw, firebase layer never does
console.warn('PouchDbSingleton.get err=', err);
return null;
}
}
/**
* Set the value for this singleton.
* @param value the value we want to set. If null remove the singleton from storage
* @param originatorId TBD
* @param barrier TBD
*/
async set(value, originatorId: string = null, barrier: string|null = null): Promise<void> {
assert(value !== undefined);
let stored: SingletonStorage;
if (this.referenceMode && value) {
// Even if this value is identical to the previously written one,
// we can't suppress an event here because we don't actually have
// the previous value for comparison (that's down in the backing store).
// TODO(shans): should we fetch and compare in the case of the ids matching?
const referredType = this.type;
const storageKey = this.storageEngine.baseStorageKey(referredType, this.storageKey);
const backingStore = await this.ensureBackingStore();
// TODO(shans): mutating the storageKey here to provide unique keys is
// a hack that can be removed once entity mutation is distinct from collection
// updates. Once entity mutation exists, it shouldn't ever be possible to write
// different values with the same id.
await backingStore.store(value, [this.storageKey + this.localKeyId++]);
// Store the indirect pointer to the storageKey
// Do this *after* the write to backing store, otherwise null responses could occur
stored = await this.upsert(async doc => {
doc.referenceMode = this.referenceMode;
doc.version = this._version;
doc.value = {id: value['id'], storageKey};
return doc;
});
} else {
// Update Pouch/_stored, If value is null delete key, otherwise store it.
if (value == null) {
try {
const doc = await this.db.get(this.pouchDbKey.location);
await this.db.remove(doc);
} catch (err) {
// Deleting an already deleted item is acceptable.
if (err.name !== 'not_found') {
console.warn('PouchDbSingleton.remove err=', err);
throw err;
}
}
} else {
stored = await this.upsert(async doc => {
doc.referenceMode = this.referenceMode;
doc.version = this._version;
doc.value = value;
return doc;
});
}
}
this.bumpVersion();
const data = this.referenceMode ? value : stored.value;
await this._fire(new ChangeEvent({data, version: this._version, originatorId, barrier}));
}
/**
* Clear a singleton from storage.
* @param originatorId TBD
* @param barrier TBD
*/
async clear(originatorId: string = null, barrier: string = null): Promise<void> {
await this.set(null, originatorId, barrier);
}
/**
* Triggered when the storage key has been modified or deleted.
*/
async onRemoteStateSynced(doc: PouchDB.Core.ExistingDocument<SingletonStorage>) {
// TODO(lindner): reimplement as simple fires when we have replication working again
// TODO(lindner): consider using doc._deleted to special case.
const value = doc.value;
// Store locally
this.bumpVersion(doc.version);
// Skip if value == null, which is what happens when docs are deleted..
if (value) {
await this.ensureBackingStore().then(async store => {
const data = await store.get(value.id);
if (!data) {
// TODO(lindner): data referred to by this data is missing.
console.log('PouchDbSingleton.onRemoteSynced: possible race condition for id=' + value.id);
return;
}
await this._fire(new ChangeEvent({data, version: this._version}));
});
} else {
if (value != null) {
await this._fire(new ChangeEvent({data: value, version: this._version}));
}
}
}
/**
* Get/Modify/Set the data stored for this singleton.
*/
private async upsert(mutatorFn: UpsertMutatorFn<SingletonStorage>): Promise<SingletonStorage> {
const defaultDoc: SingletonStorage = {
value: null,
version: 0,
referenceMode: this.referenceMode
};
const doc = await upsert(this.db, this.pouchDbKey.location, mutatorFn, defaultDoc);
// post process results from doc here.
this.referenceMode = doc.referenceMode;
this._version = doc.version;
return doc;
}
}
| serializeContents | identifier_name |
pouch-db-singleton.ts | /**
* @license
* Copyright (c) 2017 Google Inc. All rights reserved.
* This code may only be used under the BSD style license found at
* http://polymer.github.io/LICENSE.txt
* Code distributed by Google as part of this project is also
* subject to an additional IP rights grant found at
* http://polymer.github.io/PATENTS.txt
*/
import {PouchDB} from '../../../../concrete-storage/pouchdb.js';
import {assert} from '../../../platform/assert-web.js';
import {Type} from '../../type.js';
import {ChangeEvent, SingletonStorageProvider} from '../storage-provider-base.js';
import {SerializedModelEntry, ModelValue} from '../crdt-collection-model.js';
import {PouchDbStorageProvider} from './pouch-db-storage-provider.js';
import {PouchDbStorage} from './pouch-db-storage.js';
import {upsert, UpsertDoc, UpsertMutatorFn} from './pouch-db-upsert.js';
/**
* A representation of a Singleton in Pouch storage.
*/
interface SingletonStorage extends UpsertDoc {
value: ModelValue;
/** ReferenceMode state for this data */
referenceMode: boolean;
/** Monotonically increasing version number */
version: number;
}
/**
* The PouchDB-based implementation of a Singleton.
*/
export class PouchDbSingleton extends PouchDbStorageProvider implements SingletonStorageProvider {
private localKeyId = 0;
/**
* Create a new PouchDbSingleton.
*
* @param type the underlying type for this singleton.
* @param storageEngine a reference back to the PouchDbStorage, used for baseStorageKey calls.
* @param name appears unused.
* @param id see base class.
* @param key the storage key for this collection.
*/
constructor(type: Type, storageEngine: PouchDbStorage, name: string, id: string, key: string, refMode: boolean) {
super(type, storageEngine, name, id, key, refMode);
this._version = 0;
// See if the value has been set
this.upsert(async doc => doc).then((doc) => {
this.resolveInitialized();
// value has been written
}).catch((err) => {
console.warn('Error init ' + this.storageKey, err);
// TODO(lindner) error out the initialized Promise
throw err;
});
}
/** @inheritDoc */
backingType(): Type {
return this.type;
}
async clone(): Promise<PouchDbSingleton> {
const singleton = new PouchDbSingleton(this.type, this.storageEngine, this.name, this.id, null, this.referenceMode);
await singleton.cloneFrom(this);
return singleton;
}
async cloneFrom(handle): Promise<void> {
const literal = await handle.serializeContents();
await this.initialized;
this.referenceMode = handle.referenceMode;
if (handle.referenceMode && literal.model.length > 0) {
// cloneFrom the backing store data by reading the model and writing it out.
const [backingStore, handleBackingStore] = await Promise.all(
[this.ensureBackingStore(), handle.ensureBackingStore()]);
literal.model = literal.model.map(({id, value}) => ({id, value: {id: value.id, storageKey: backingStore.storageKey}}));
const underlying = await handleBackingStore.getMultiple(literal.model.map(({id}) => id));
await backingStore.storeMultiple(underlying, [this.storageKey]);
}
await this.fromLiteral(literal);
if (literal && literal.model && literal.model.length === 1) {
const newvalue = literal.model[0].value;
if (newvalue) {
await this.upsert(async doc => {
doc.value = newvalue; | }
await this._fire(new ChangeEvent({data: newvalue, version: this._version}));
}
}
/**
* Returns the model data in a format suitable for transport over
* the API channel (i.e. between execution host and context).
*/
async modelForSynchronization() {
await this.initialized;
const doc = await this.upsert(async doc => doc);
const value = doc.value;
if (this.referenceMode && value !== null) {
const backingStore = await this.ensureBackingStore();
const result = await backingStore.get(value.id);
return {
version: this._version,
model: [{id: value.id, value: result}]
};
}
return super.modelForSynchronization();
}
/**
* Returns the state of this singleton based as an object of the form
* {version, model: [{id, value}]}
*/
async serializeContents(): Promise<{version: number; model: SerializedModelEntry[]}> {
await this.initialized;
const doc = await this.upsert(async doc => doc);
const value = doc.value;
let model: SerializedModelEntry[] = [];
if (value != null) {
model = [
{
id: value.id,
keys: [],
value
}
];
}
return {
version: this._version,
model
};
}
/**
* Updates the internal state of this singleton with the supplied data.
*/
async fromLiteral({version, model}): Promise<void> {
await this.initialized;
const value = model.length === 0 ? null : model[0].value;
if (this.referenceMode && value && value.rawData) {
assert(false, `shouldn't have rawData ${JSON.stringify(value.rawData)} here`);
}
assert(value !== undefined);
const newDoc = await this.upsert(async (doc) => {
// modify document
doc.value = value;
doc.referenceMode = this.referenceMode;
doc.version = Math.max(version, doc.version) + 1;
return doc;
});
this._version = newDoc.version;
}
/**
* @return a promise containing the singleton value or null if it does not exist.
*/
async get(): Promise<ModelValue> {
await this.initialized;
try {
const doc = await this.upsert(async doc => doc);
let value = doc.value;
if (value == null) {
//console.warn('value is null and refmode=' + this.referenceMode);
}
if (this.referenceMode && value) {
const backingStore = await this.ensureBackingStore();
value = await backingStore.get(value.id);
}
// logging goes here
return value;
} catch (err) {
// TODO(plindner): caught for compatibility: pouchdb layer can throw, firebase layer never does
console.warn('PouchDbSingleton.get err=', err);
return null;
}
}
/**
* Set the value for this singleton.
* @param value the value we want to set. If null remove the singleton from storage
* @param originatorId TBD
* @param barrier TBD
*/
async set(value, originatorId: string = null, barrier: string|null = null): Promise<void> {
assert(value !== undefined);
let stored: SingletonStorage;
if (this.referenceMode && value) {
// Even if this value is identical to the previously written one,
// we can't suppress an event here because we don't actually have
// the previous value for comparison (that's down in the backing store).
// TODO(shans): should we fetch and compare in the case of the ids matching?
const referredType = this.type;
const storageKey = this.storageEngine.baseStorageKey(referredType, this.storageKey);
const backingStore = await this.ensureBackingStore();
// TODO(shans): mutating the storageKey here to provide unique keys is
// a hack that can be removed once entity mutation is distinct from collection
// updates. Once entity mutation exists, it shouldn't ever be possible to write
// different values with the same id.
await backingStore.store(value, [this.storageKey + this.localKeyId++]);
// Store the indirect pointer to the storageKey
// Do this *after* the write to backing store, otherwise null responses could occur
stored = await this.upsert(async doc => {
doc.referenceMode = this.referenceMode;
doc.version = this._version;
doc.value = {id: value['id'], storageKey};
return doc;
});
} else {
// Update Pouch/_stored, If value is null delete key, otherwise store it.
if (value == null) {
try {
const doc = await this.db.get(this.pouchDbKey.location);
await this.db.remove(doc);
} catch (err) {
// Deleting an already deleted item is acceptable.
if (err.name !== 'not_found') {
console.warn('PouchDbSingleton.remove err=', err);
throw err;
}
}
} else {
stored = await this.upsert(async doc => {
doc.referenceMode = this.referenceMode;
doc.version = this._version;
doc.value = value;
return doc;
});
}
}
this.bumpVersion();
const data = this.referenceMode ? value : stored.value;
await this._fire(new ChangeEvent({data, version: this._version, originatorId, barrier}));
}
/**
* Clear a singleton from storage.
* @param originatorId TBD
* @param barrier TBD
*/
async clear(originatorId: string = null, barrier: string = null): Promise<void> {
await this.set(null, originatorId, barrier);
}
/**
* Triggered when the storage key has been modified or deleted.
*/
async onRemoteStateSynced(doc: PouchDB.Core.ExistingDocument<SingletonStorage>) {
// TODO(lindner): reimplement as simple fires when we have replication working again
// TODO(lindner): consider using doc._deleted to special case.
const value = doc.value;
// Store locally
this.bumpVersion(doc.version);
// Skip if value == null, which is what happens when docs are deleted..
if (value) {
await this.ensureBackingStore().then(async store => {
const data = await store.get(value.id);
if (!data) {
// TODO(lindner): data referred to by this data is missing.
console.log('PouchDbSingleton.onRemoteSynced: possible race condition for id=' + value.id);
return;
}
await this._fire(new ChangeEvent({data, version: this._version}));
});
} else {
if (value != null) {
await this._fire(new ChangeEvent({data: value, version: this._version}));
}
}
}
/**
* Get/Modify/Set the data stored for this singleton.
*/
private async upsert(mutatorFn: UpsertMutatorFn<SingletonStorage>): Promise<SingletonStorage> {
const defaultDoc: SingletonStorage = {
value: null,
version: 0,
referenceMode: this.referenceMode
};
const doc = await upsert(this.db, this.pouchDbKey.location, mutatorFn, defaultDoc);
// post process results from doc here.
this.referenceMode = doc.referenceMode;
this._version = doc.version;
return doc;
}
} | doc.referenceMode = this.referenceMode;
doc.version = Math.max(this._version, doc.version) + 1;
return doc;
}); | random_line_split |
pouch-db-singleton.ts | /**
* @license
* Copyright (c) 2017 Google Inc. All rights reserved.
* This code may only be used under the BSD style license found at
* http://polymer.github.io/LICENSE.txt
* Code distributed by Google as part of this project is also
* subject to an additional IP rights grant found at
* http://polymer.github.io/PATENTS.txt
*/
import {PouchDB} from '../../../../concrete-storage/pouchdb.js';
import {assert} from '../../../platform/assert-web.js';
import {Type} from '../../type.js';
import {ChangeEvent, SingletonStorageProvider} from '../storage-provider-base.js';
import {SerializedModelEntry, ModelValue} from '../crdt-collection-model.js';
import {PouchDbStorageProvider} from './pouch-db-storage-provider.js';
import {PouchDbStorage} from './pouch-db-storage.js';
import {upsert, UpsertDoc, UpsertMutatorFn} from './pouch-db-upsert.js';
/**
* A representation of a Singleton in Pouch storage.
*/
interface SingletonStorage extends UpsertDoc {
value: ModelValue;
/** ReferenceMode state for this data */
referenceMode: boolean;
/** Monotonically increasing version number */
version: number;
}
/**
* The PouchDB-based implementation of a Singleton.
*/
export class PouchDbSingleton extends PouchDbStorageProvider implements SingletonStorageProvider {
private localKeyId = 0;
/**
* Create a new PouchDbSingleton.
*
* @param type the underlying type for this singleton.
* @param storageEngine a reference back to the PouchDbStorage, used for baseStorageKey calls.
* @param name appears unused.
* @param id see base class.
* @param key the storage key for this collection.
*/
constructor(type: Type, storageEngine: PouchDbStorage, name: string, id: string, key: string, refMode: boolean) {
super(type, storageEngine, name, id, key, refMode);
this._version = 0;
// See if the value has been set
this.upsert(async doc => doc).then((doc) => {
this.resolveInitialized();
// value has been written
}).catch((err) => {
console.warn('Error init ' + this.storageKey, err);
// TODO(lindner) error out the initialized Promise
throw err;
});
}
/** @inheritDoc */
backingType(): Type {
return this.type;
}
async clone(): Promise<PouchDbSingleton> {
const singleton = new PouchDbSingleton(this.type, this.storageEngine, this.name, this.id, null, this.referenceMode);
await singleton.cloneFrom(this);
return singleton;
}
async cloneFrom(handle): Promise<void> {
const literal = await handle.serializeContents();
await this.initialized;
this.referenceMode = handle.referenceMode;
if (handle.referenceMode && literal.model.length > 0) {
// cloneFrom the backing store data by reading the model and writing it out.
const [backingStore, handleBackingStore] = await Promise.all(
[this.ensureBackingStore(), handle.ensureBackingStore()]);
literal.model = literal.model.map(({id, value}) => ({id, value: {id: value.id, storageKey: backingStore.storageKey}}));
const underlying = await handleBackingStore.getMultiple(literal.model.map(({id}) => id));
await backingStore.storeMultiple(underlying, [this.storageKey]);
}
await this.fromLiteral(literal);
if (literal && literal.model && literal.model.length === 1) {
const newvalue = literal.model[0].value;
if (newvalue) {
await this.upsert(async doc => {
doc.value = newvalue;
doc.referenceMode = this.referenceMode;
doc.version = Math.max(this._version, doc.version) + 1;
return doc;
});
}
await this._fire(new ChangeEvent({data: newvalue, version: this._version}));
}
}
/**
* Returns the model data in a format suitable for transport over
* the API channel (i.e. between execution host and context).
*/
async modelForSynchronization() {
await this.initialized;
const doc = await this.upsert(async doc => doc);
const value = doc.value;
if (this.referenceMode && value !== null) {
const backingStore = await this.ensureBackingStore();
const result = await backingStore.get(value.id);
return {
version: this._version,
model: [{id: value.id, value: result}]
};
}
return super.modelForSynchronization();
}
/**
* Returns the state of this singleton based as an object of the form
* {version, model: [{id, value}]}
*/
async serializeContents(): Promise<{version: number; model: SerializedModelEntry[]}> {
await this.initialized;
const doc = await this.upsert(async doc => doc);
const value = doc.value;
let model: SerializedModelEntry[] = [];
if (value != null) {
model = [
{
id: value.id,
keys: [],
value
}
];
}
return {
version: this._version,
model
};
}
/**
* Updates the internal state of this singleton with the supplied data.
*/
async fromLiteral({version, model}): Promise<void> {
await this.initialized;
const value = model.length === 0 ? null : model[0].value;
if (this.referenceMode && value && value.rawData) {
assert(false, `shouldn't have rawData ${JSON.stringify(value.rawData)} here`);
}
assert(value !== undefined);
const newDoc = await this.upsert(async (doc) => {
// modify document
doc.value = value;
doc.referenceMode = this.referenceMode;
doc.version = Math.max(version, doc.version) + 1;
return doc;
});
this._version = newDoc.version;
}
/**
* @return a promise containing the singleton value or null if it does not exist.
*/
async get(): Promise<ModelValue> {
await this.initialized;
try {
const doc = await this.upsert(async doc => doc);
let value = doc.value;
if (value == null) {
//console.warn('value is null and refmode=' + this.referenceMode);
}
if (this.referenceMode && value) {
const backingStore = await this.ensureBackingStore();
value = await backingStore.get(value.id);
}
// logging goes here
return value;
} catch (err) {
// TODO(plindner): caught for compatibility: pouchdb layer can throw, firebase layer never does
console.warn('PouchDbSingleton.get err=', err);
return null;
}
}
/**
* Set the value for this singleton.
* @param value the value we want to set. If null remove the singleton from storage
* @param originatorId TBD
* @param barrier TBD
*/
async set(value, originatorId: string = null, barrier: string|null = null): Promise<void> {
assert(value !== undefined);
let stored: SingletonStorage;
if (this.referenceMode && value) {
// Even if this value is identical to the previously written one,
// we can't suppress an event here because we don't actually have
// the previous value for comparison (that's down in the backing store).
// TODO(shans): should we fetch and compare in the case of the ids matching?
const referredType = this.type;
const storageKey = this.storageEngine.baseStorageKey(referredType, this.storageKey);
const backingStore = await this.ensureBackingStore();
// TODO(shans): mutating the storageKey here to provide unique keys is
// a hack that can be removed once entity mutation is distinct from collection
// updates. Once entity mutation exists, it shouldn't ever be possible to write
// different values with the same id.
await backingStore.store(value, [this.storageKey + this.localKeyId++]);
// Store the indirect pointer to the storageKey
// Do this *after* the write to backing store, otherwise null responses could occur
stored = await this.upsert(async doc => {
doc.referenceMode = this.referenceMode;
doc.version = this._version;
doc.value = {id: value['id'], storageKey};
return doc;
});
} else {
// Update Pouch/_stored, If value is null delete key, otherwise store it.
if (value == null) {
try {
const doc = await this.db.get(this.pouchDbKey.location);
await this.db.remove(doc);
} catch (err) {
// Deleting an already deleted item is acceptable.
if (err.name !== 'not_found') {
console.warn('PouchDbSingleton.remove err=', err);
throw err;
}
}
} else {
stored = await this.upsert(async doc => {
doc.referenceMode = this.referenceMode;
doc.version = this._version;
doc.value = value;
return doc;
});
}
}
this.bumpVersion();
const data = this.referenceMode ? value : stored.value;
await this._fire(new ChangeEvent({data, version: this._version, originatorId, barrier}));
}
/**
* Clear a singleton from storage.
* @param originatorId TBD
* @param barrier TBD
*/
async clear(originatorId: string = null, barrier: string = null): Promise<void> {
await this.set(null, originatorId, barrier);
}
/**
* Triggered when the storage key has been modified or deleted.
*/
async onRemoteStateSynced(doc: PouchDB.Core.ExistingDocument<SingletonStorage>) {
// TODO(lindner): reimplement as simple fires when we have replication working again
// TODO(lindner): consider using doc._deleted to special case.
const value = doc.value;
// Store locally
this.bumpVersion(doc.version);
// Skip if value == null, which is what happens when docs are deleted..
if (value) {
await this.ensureBackingStore().then(async store => {
const data = await store.get(value.id);
if (!data) {
// TODO(lindner): data referred to by this data is missing.
console.log('PouchDbSingleton.onRemoteSynced: possible race condition for id=' + value.id);
return;
}
await this._fire(new ChangeEvent({data, version: this._version}));
});
} else {
if (value != null) |
}
}
/**
* Get/Modify/Set the data stored for this singleton.
*/
private async upsert(mutatorFn: UpsertMutatorFn<SingletonStorage>): Promise<SingletonStorage> {
const defaultDoc: SingletonStorage = {
value: null,
version: 0,
referenceMode: this.referenceMode
};
const doc = await upsert(this.db, this.pouchDbKey.location, mutatorFn, defaultDoc);
// post process results from doc here.
this.referenceMode = doc.referenceMode;
this._version = doc.version;
return doc;
}
}
| {
await this._fire(new ChangeEvent({data: value, version: this._version}));
} | conditional_block |
world.py | #-*- coding:utf-8 -*-
import roslib
import rospy
import math
import time
import actionlib
from actionlib_msgs.msg import *
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from nav_msgs.msg import OccupancyGrid
from nav_msgs.msg import MapMetaData
from geometry_msgs.msg import Pose , Point , PoseWithCovarianceStamped , PoseStamped , Twist
from gazebo_msgs.msg import ModelState
from test import Map,Cell
class World(object):
def __init__(self):
self.map = [
['-','-','g','-'],
['.','.','.','.'],
['.','b','-','.'],
['.','.','.','.'],
]
self.Map = None
self.path = []
self.Target = None
self.rows = len(self.map)
self.cols = len(self.map[0])
self.traps = [0,1,3,10]
self.walls = [9]
self.targets = [2]
self.legalstate = filter(lambda x:not ( x in self.traps or x in self.targets or x in self.walls ) , [ i for i in range(self.rows * self.cols)])
# print(self.legalstate)
self.actionmap = {
0:'up',
1:'down',
2:'left',
3:'right'
}
self.targetReward = 1.0
self.trapReward = -1.0
self.wallReward = -0.5
self.norReward = -0.1
self.map_to_state , self.state_to_map = self.init()
self.stateNumber = len(self.map_to_state)
self.legalstate = map(lambda x:self.map_to_state[x],self.legalstate)
# print(self.legalstate)
def init(self):
map_to_state = {}
state_to_map = {}
mapindex = 0
stateindex = 0
for i in range(self.rows) :
for j in range(self.cols) :
if self.map[i][j] != 'b' :
map_to_state[mapindex] = stateindex
state_to_map[stateindex] = mapindex
stateindex += 1
mapindex += 1
return map_to_state , state_to_map
print(len(state_to_map))
print(map_to_state)
print(state_to_map)
def simulation(self,state,action):
if state not in self.state_to_map :
print("exception: illegal state")
exit(0)
m_state = self.state_to_map[state]
nextstate = self.__execute(m_state,action)
c = 0
r = 0.0
if self.isLegalState(nextstate):
c , r = self.getReward(nextstate)
else:
r = self.norReward
c = 0
nextstate = m_state
if nextstate in self.walls :
nextstate = m_state
return [state,action,self.map_to_state[nextstate],r,c]
def __execute(self,state,action):
if action == 0 :
nextstate = state - self.rows
elif action == 1 :
nextstate = state + self.rows
elif action == 2 :
nextstate = state - 1
elif action == 3 :
nextstate = state + 1
else:
nextstate = state
return nextstate
def isTarget(self,state):
if state < 0 or state >= self.rows * self.cols:
return False
state = self.state_to_map[state]
row = state / self.cols
col = state - row * self.cols
if self.map[row][col] == 'g':
return True
return False
def getReward(self,state):
| return (-1,self.targetReward)
elif state in self.traps :
return (-1,self.trapReward)
elif state in self.walls :
return (0,self.wallReward)
else:
return (0,self.norReward)
def isLegalState(self,state):
if state >= 0 and state <= self.rows * self.cols - 1 :
return True
else:
return False
def state_to_coordinate(self,state):
if self.isLegalState(state) :
row = state / self.cols
col = state - self.cols * row
return (row+1,col+1)
else:
return (None,None)
def setMap(self,map):
self.Map = map
self.Map.gridsize = self.Map.gridsize * self.Map.grids
self.rows = int(self.Map.height / self.Map.grids) + 1
self.cols = int(self.Map.width / self.Map.grids) + 1
self.stateNumber = self.cols * self.rows
print(self.Map.height,self.Map.width)
print(self.rows,self.cols)
print(self.stateNumber)
print(self.Map.turtlebotPosition)
print(self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition)))
def getPosition(self):
if self.Map == None :
return None
else:
return self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition))
def doAction(self,action):
if action == 0 :
x = 0
y = 1
elif action == 1 :
x = 0
y = -1
elif action == 2 :
x = -1
y = 0
elif action == 3 :
x = 1
y = 0
else:
x = y = 0
return x , y
#把机器人发来的动作转换成地图中的下一个目标位置,然后发送给move_base
#同时接收返回的数据,包括reward、当前位置等,然后通过一定的reward机制评定最后的reward,最后返回给机器人
def doSimulation(self,action):
x , y = self.doAction(action)
print("action ",self.actionmap[action])
state = self.Map.sendGoal(x,y)
if state != None :
state.Print()
# time.sleep(1)
state1 = self.getStateNum(state.curCell)
state2 = self.getStateNum(state.realCell)
realstate = self.getStateNum(state.realCell)
targetCell = self.getCellFromStateNum(self.Target)
diff1 = math.fabs(targetCell.X-state.curCell.X) + math.fabs(targetCell.Y-state.curCell.Y)
diff2 = math.fabs(targetCell.X-state.realCell.X) + math.fabs(targetCell.Y-state.realCell.Y)
addreward = 0
extraReward = 0.0
if diff2 >= diff1 :
extraReward += 10
#原地不懂,惩罚,表明可能遇到了障碍物
if state1 == realstate :
extraReward += 10
#离目标越近,reward越高
if math.fabs( targetCell.X - state.curCell.X ) > math.fabs(targetCell.X - state.realCell.X) :
addreward += 20
if math.fabs(targetCell.X - state.realCell.X) <= 2 :
addreward += 11
self.changeRewardOnPath(40)
elif math.fabs(targetCell.X - state.realCell.X) <= 3 :
addreward += 9
self.changeRewardOnPath(30)
elif math.fabs(targetCell.X - state.realCell.X) <= 4 :
addreward += 7
self.changeRewardOnPath(20)
elif math.fabs(targetCell.X - state.realCell.X) <= 5 :
addreward += 5
self.changeRewardOnPath(15)
elif math.fabs(targetCell.X - state.realCell.X) <= 6 :
addreward += 5
self.changeRewardOnPath(10)
if math.fabs( targetCell.X - state.curCell.X ) < math.fabs(targetCell.X - state.realCell.X) :
addreward -= 50
if math.fabs( targetCell.Y - state.curCell.Y ) > math.fabs(targetCell.Y - state.realCell.Y) :
addreward += 20
if math.fabs(targetCell.Y - state.realCell.Y) <= 1 :
addreward += 5
if math.fabs( targetCell.Y - state.curCell.Y ) < math.fabs(targetCell.Y - state.realCell.Y) :
addreward -40
state.reward -= extraReward
state.reward += addreward
print("a reward ",state.reward)
self.path.append([state1,action,state2,state.reward,state.c])
flag = self.checkGoal(state2)
if flag :
for i in range(len(self.path)):
self.path[i][3] += 10
if flag :
return realstate , False , self.path , state.reward , action
else:
return realstate , True , self.path , state.reward , action
def changeRewardOnPath(self,reward,rate=0.5,axis=0):
for i in range(len(self.path)-1,-1,-1):
cell1 = self.getCellFromStateNum(self.path[i][0])
cell2 = self.getCellFromStateNum(self.path[i][2])
targetCell = self.getCellFromStateNum(self.Target)
if math.fabs( targetCell.X - cell1.X ) > math.fabs(targetCell.X - cell2.X) :
self.path[i][3] += reward
if reward > 5 :
reward = reward * rate
else:
reward = 5
def checkGoal(self,state):
if state == self.Target :
print("get target ",self.getCellFromStateNum(self.Target))
return True
return False
def clearPath(self):
self.path = []
#根据地图中的cell获取状态编号
def getStateNum(self,cell):
num = cell.Y * self.cols + cell.X
return num
#根据状态编号获取地图中cell
def getCellFromStateNum(self,num):
y = num / self.cols
x = num - self.cols * y
cell = Cell(x,y)
return cell
def getPositionStateNum(self,pos):
if self.Map != None :
print(self.getStateNum(self.Map.getCell(pos)))
# mapspace = Map()
# world = World()
# def getMap(data):
# global mapspace
# data = data.data
# mapspace.setMap(data)
# def getMapMetaData(data):
# global mapspace
# mapspace.setMapParameters(data)
# # mapspace.Print()
# def getPosition(data):
# # print(data)
# global mapspace , world
# mapspace.setPosition(data.pose.pose.position)
# world.getPositionStateNum(data.pose.pose.position)
# if __name__ == '__main__' :
# # world = World()
# # print(world.cols)
# # state = 2
# # print(world.state_to_coordinate(state))
# # print(world.simulation(state,1))
# # print(world.state_to_coordinate(state))
# # print(world.state_to_coordinate(3))
# posePub = rospy.Publisher("/move_base_simple/goal",PoseStamped,queue_size=10)
# modelpositionPub = rospy.Publisher("/gazebo/set_model_state",ModelState,queue_size=10)
# rospy.init_node("test",anonymous=False)
# rospy.Subscriber("/map",OccupancyGrid,callback=getMap)
# rospy.Subscriber("/map_metadata",MapMetaData,getMapMetaData)
# rospy.Subscriber("/amcl_pose",PoseWithCovarianceStamped,getPosition)
# mapspace.init()
# # sim = rospy.get_param('/use_sim_time')
# # print(sim)
# while not mapspace.ready() :
# pass
# print("ready")
# mapspace.getNewMap()
# world.setMap(mapspace)
# # times = 0
# # while True:
# # state = mapspace.sendGoal(1,0)
# # if state != None :
# # state.Print()
# # print("send goal %d" % times)
# # times += 1
# # time.sleep(1)
# # if times > 10 :
# # break
# # pass
# rospy.spin() | if state in self.targets :
| random_line_split |
world.py | #-*- coding:utf-8 -*-
import roslib
import rospy
import math
import time
import actionlib
from actionlib_msgs.msg import *
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from nav_msgs.msg import OccupancyGrid
from nav_msgs.msg import MapMetaData
from geometry_msgs.msg import Pose , Point , PoseWithCovarianceStamped , PoseStamped , Twist
from gazebo_msgs.msg import ModelState
from test import Map,Cell
class World(object):
def __init__(self):
self.map = [
['-','-','g','-'],
['.','.','.','.'],
['.','b','-','.'],
['.','.','.','.'],
]
self.Map = None
self.path = []
self.Target = None
self.rows = len(self.map)
self.cols = len(self.map[0])
self.traps = [0,1,3,10]
self.walls = [9]
self.targets = [2]
self.legalstate = filter(lambda x:not ( x in self.traps or x in self.targets or x in self.walls ) , [ i for i in range(self.rows * self.cols)])
# print(self.legalstate)
self.actionmap = {
0:'up',
1:'down',
2:'left',
3:'right'
}
self.targetReward = 1.0
self.trapReward = -1.0
self.wallReward = -0.5
self.norReward = -0.1
self.map_to_state , self.state_to_map = self.init()
self.stateNumber = len(self.map_to_state)
self.legalstate = map(lambda x:self.map_to_state[x],self.legalstate)
# print(self.legalstate)
def init(self):
map_to_state = {}
state_to_map = {}
mapindex = 0
stateindex = 0
for i in range(self.rows) :
for j in range(self.cols) :
if self.map[i][j] != 'b' :
map_to_state[mapindex] = stateindex
state_to_map[stateindex] = mapindex
stateindex += 1
mapindex += 1
return map_to_state , state_to_map
print(len(state_to_map))
print(map_to_state)
print(state_to_map)
def simulation(self,state,action):
if state not in self.state_to_map :
print("exception: illegal state")
exit(0)
m_state = self.state_to_map[state]
nextstate = self.__execute(m_state,action)
c = 0
r = 0.0
if self.isLegalState(nextstate):
c , r = self.getReward(nextstate)
else:
r = self.norReward
c = 0
nextstate = m_state
if nextstate in self.walls :
nextstate = m_state
return [state,action,self.map_to_state[nextstate],r,c]
def __execute(self,state,action):
if action == 0 :
nextstate = state - self.rows
elif action == 1 :
nextstate = state + self.rows
elif action == 2 :
nextstate = state - 1
elif action == 3 :
nextstate = state + 1
else:
nextstate = state
return nextstate
def | (self,state):
if state < 0 or state >= self.rows * self.cols:
return False
state = self.state_to_map[state]
row = state / self.cols
col = state - row * self.cols
if self.map[row][col] == 'g':
return True
return False
def getReward(self,state):
if state in self.targets :
return (-1,self.targetReward)
elif state in self.traps :
return (-1,self.trapReward)
elif state in self.walls :
return (0,self.wallReward)
else:
return (0,self.norReward)
def isLegalState(self,state):
if state >= 0 and state <= self.rows * self.cols - 1 :
return True
else:
return False
def state_to_coordinate(self,state):
if self.isLegalState(state) :
row = state / self.cols
col = state - self.cols * row
return (row+1,col+1)
else:
return (None,None)
def setMap(self,map):
self.Map = map
self.Map.gridsize = self.Map.gridsize * self.Map.grids
self.rows = int(self.Map.height / self.Map.grids) + 1
self.cols = int(self.Map.width / self.Map.grids) + 1
self.stateNumber = self.cols * self.rows
print(self.Map.height,self.Map.width)
print(self.rows,self.cols)
print(self.stateNumber)
print(self.Map.turtlebotPosition)
print(self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition)))
def getPosition(self):
if self.Map == None :
return None
else:
return self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition))
def doAction(self,action):
if action == 0 :
x = 0
y = 1
elif action == 1 :
x = 0
y = -1
elif action == 2 :
x = -1
y = 0
elif action == 3 :
x = 1
y = 0
else:
x = y = 0
return x , y
#把机器人发来的动作转换成地图中的下一个目标位置,然后发送给move_base
#同时接收返回的数据,包括reward、当前位置等,然后通过一定的reward机制评定最后的reward,最后返回给机器人
def doSimulation(self,action):
x , y = self.doAction(action)
print("action ",self.actionmap[action])
state = self.Map.sendGoal(x,y)
if state != None :
state.Print()
# time.sleep(1)
state1 = self.getStateNum(state.curCell)
state2 = self.getStateNum(state.realCell)
realstate = self.getStateNum(state.realCell)
targetCell = self.getCellFromStateNum(self.Target)
diff1 = math.fabs(targetCell.X-state.curCell.X) + math.fabs(targetCell.Y-state.curCell.Y)
diff2 = math.fabs(targetCell.X-state.realCell.X) + math.fabs(targetCell.Y-state.realCell.Y)
addreward = 0
extraReward = 0.0
if diff2 >= diff1 :
extraReward += 10
#原地不懂,惩罚,表明可能遇到了障碍物
if state1 == realstate :
extraReward += 10
#离目标越近,reward越高
if math.fabs( targetCell.X - state.curCell.X ) > math.fabs(targetCell.X - state.realCell.X) :
addreward += 20
if math.fabs(targetCell.X - state.realCell.X) <= 2 :
addreward += 11
self.changeRewardOnPath(40)
elif math.fabs(targetCell.X - state.realCell.X) <= 3 :
addreward += 9
self.changeRewardOnPath(30)
elif math.fabs(targetCell.X - state.realCell.X) <= 4 :
addreward += 7
self.changeRewardOnPath(20)
elif math.fabs(targetCell.X - state.realCell.X) <= 5 :
addreward += 5
self.changeRewardOnPath(15)
elif math.fabs(targetCell.X - state.realCell.X) <= 6 :
addreward += 5
self.changeRewardOnPath(10)
if math.fabs( targetCell.X - state.curCell.X ) < math.fabs(targetCell.X - state.realCell.X) :
addreward -= 50
if math.fabs( targetCell.Y - state.curCell.Y ) > math.fabs(targetCell.Y - state.realCell.Y) :
addreward += 20
if math.fabs(targetCell.Y - state.realCell.Y) <= 1 :
addreward += 5
if math.fabs( targetCell.Y - state.curCell.Y ) < math.fabs(targetCell.Y - state.realCell.Y) :
addreward -40
state.reward -= extraReward
state.reward += addreward
print("a reward ",state.reward)
self.path.append([state1,action,state2,state.reward,state.c])
flag = self.checkGoal(state2)
if flag :
for i in range(len(self.path)):
self.path[i][3] += 10
if flag :
return realstate , False , self.path , state.reward , action
else:
return realstate , True , self.path , state.reward , action
def changeRewardOnPath(self,reward,rate=0.5,axis=0):
for i in range(len(self.path)-1,-1,-1):
cell1 = self.getCellFromStateNum(self.path[i][0])
cell2 = self.getCellFromStateNum(self.path[i][2])
targetCell = self.getCellFromStateNum(self.Target)
if math.fabs( targetCell.X - cell1.X ) > math.fabs(targetCell.X - cell2.X) :
self.path[i][3] += reward
if reward > 5 :
reward = reward * rate
else:
reward = 5
def checkGoal(self,state):
if state == self.Target :
print("get target ",self.getCellFromStateNum(self.Target))
return True
return False
def clearPath(self):
self.path = []
#根据地图中的cell获取状态编号
def getStateNum(self,cell):
num = cell.Y * self.cols + cell.X
return num
#根据状态编号获取地图中cell
def getCellFromStateNum(self,num):
y = num / self.cols
x = num - self.cols * y
cell = Cell(x,y)
return cell
def getPositionStateNum(self,pos):
if self.Map != None :
print(self.getStateNum(self.Map.getCell(pos)))
# mapspace = Map()
# world = World()
# def getMap(data):
# global mapspace
# data = data.data
# mapspace.setMap(data)
# def getMapMetaData(data):
# global mapspace
# mapspace.setMapParameters(data)
# # mapspace.Print()
# def getPosition(data):
# # print(data)
# global mapspace , world
# mapspace.setPosition(data.pose.pose.position)
# world.getPositionStateNum(data.pose.pose.position)
# if __name__ == '__main__' :
# # world = World()
# # print(world.cols)
# # state = 2
# # print(world.state_to_coordinate(state))
# # print(world.simulation(state,1))
# # print(world.state_to_coordinate(state))
# # print(world.state_to_coordinate(3))
# posePub = rospy.Publisher("/move_base_simple/goal",PoseStamped,queue_size=10)
# modelpositionPub = rospy.Publisher("/gazebo/set_model_state",ModelState,queue_size=10)
# rospy.init_node("test",anonymous=False)
# rospy.Subscriber("/map",OccupancyGrid,callback=getMap)
# rospy.Subscriber("/map_metadata",MapMetaData,getMapMetaData)
# rospy.Subscriber("/amcl_pose",PoseWithCovarianceStamped,getPosition)
# mapspace.init()
# # sim = rospy.get_param('/use_sim_time')
# # print(sim)
# while not mapspace.ready() :
# pass
# print("ready")
# mapspace.getNewMap()
# world.setMap(mapspace)
# # times = 0
# # while True:
# # state = mapspace.sendGoal(1,0)
# # if state != None :
# # state.Print()
# # print("send goal %d" % times)
# # times += 1
# # time.sleep(1)
# # if times > 10 :
# # break
# # pass
# rospy.spin() | isTarget | identifier_name |
world.py | #-*- coding:utf-8 -*-
import roslib
import rospy
import math
import time
import actionlib
from actionlib_msgs.msg import *
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from nav_msgs.msg import OccupancyGrid
from nav_msgs.msg import MapMetaData
from geometry_msgs.msg import Pose , Point , PoseWithCovarianceStamped , PoseStamped , Twist
from gazebo_msgs.msg import ModelState
from test import Map,Cell
class World(object):
def __init__(self):
self.map = [
['-','-','g','-'],
['.','.','.','.'],
['.','b','-','.'],
['.','.','.','.'],
]
self.Map = None
self.path = []
self.Target = None
self.rows = len(self.map)
self.cols = len(self.map[0])
self.traps = [0,1,3,10]
self.walls = [9]
self.targets = [2]
self.legalstate = filter(lambda x:not ( x in self.traps or x in self.targets or x in self.walls ) , [ i for i in range(self.rows * self.cols)])
# print(self.legalstate)
self.actionmap = {
0:'up',
1:'down',
2:'left',
3:'right'
}
self.targetReward = 1.0
self.trapReward = -1.0
self.wallReward = -0.5
self.norReward = -0.1
self.map_to_state , self.state_to_map = self.init()
self.stateNumber = len(self.map_to_state)
self.legalstate = map(lambda x:self.map_to_state[x],self.legalstate)
# print(self.legalstate)
def init(self):
map_to_state = {}
state_to_map = {}
mapindex = 0
stateindex = 0
for i in range(self.rows) :
for j in range(self.cols) :
if self.map[i][j] != 'b' :
map_to_state[mapindex] = stateindex
state_to_map[stateindex] = mapindex
stateindex += 1
mapindex += 1
return map_to_state , state_to_map
print(len(state_to_map))
print(map_to_state)
print(state_to_map)
def simulation(self,state,action):
if state not in self.state_to_map :
print("exception: illegal state")
exit(0)
m_state = self.state_to_map[state]
nextstate = self.__execute(m_state,action)
c = 0
r = 0.0
if self.isLegalState(nextstate):
c , r = self.getReward(nextstate)
else:
r = self.norReward
c = 0
nextstate = m_state
if nextstate in self.walls :
nextstate = m_state
return [state,action,self.map_to_state[nextstate],r,c]
def __execute(self,state,action):
if action == 0 :
nextstate = state - self.rows
elif action == 1 :
nextstate = state + self.rows
elif action == 2 :
nextstate = state - 1
elif action == 3 :
nextstate = state + 1
else:
nextstate = state
return nextstate
def isTarget(self,state):
if state < 0 or state >= self.rows * self.cols:
return False
state = self.state_to_map[state]
row = state / self.cols
col = state - row * self.cols
if self.map[row][col] == 'g':
return True
return False
def getReward(self,state):
if state in self.targets :
return (-1,self.targetReward)
elif state in self.traps :
return (-1,self.trapReward)
elif state in self.walls :
return (0,self.wallReward)
else:
return (0,self.norReward)
def isLegalState(self,state):
if state >= 0 and state <= self.rows * self.cols - 1 :
return True
else:
return False
def state_to_coordinate(self,state):
if self.isLegalState(state) :
row = state / self.cols
col = state - self.cols * row
return (row+1,col+1)
else:
return (None,None)
def setMap(self,map):
self.Map = map
self.Map.gridsize = self.Map.gridsize * self.Map.grids
self.rows = int(self.Map.height / self.Map.grids) + 1
self.cols = int(self.Map.width / self.Map.grids) + 1
self.stateNumber = self.cols * self.rows
print(self.Map.height,self.Map.width)
print(self.rows,self.cols)
print(self.stateNumber)
print(self.Map.turtlebotPosition)
print(self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition)))
def getPosition(self):
if self.Map == None :
return None
else:
return self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition))
def doAction(self,action):
if action == 0 :
x = 0
y = 1
elif action == 1 :
x = 0
y = -1
elif action == 2 :
x = -1
y = 0
elif action == 3 :
x = 1
y = 0
else:
x = y = 0
return x , y
#把机器人发来的动作转换成地图中的下一个目标位置,然后发送给move_base
#同时接收返回的数据,包括reward、当前位置等,然后通过一定的reward机制评定最后的reward,最后返回给机器人
def doSimulation(self,action):
x , y = self.doAction(action)
print("action ",self.actionmap[action])
state = self.Map.sendGoal(x,y)
if state != None :
state.Print()
# time.sleep(1)
state1 = self.getStateNum(state.curCell)
state2 = self.getStateNum(state.realCell)
realstate = self.getStateNum(state.realCell)
targetCell = self.getCellFromStateNum(self.Target)
diff1 = math.fabs(targetCell.X-state.curCell.X) + math.fabs(targetCell.Y-state.curCell.Y)
diff2 = math.fabs(targetCell.X-state.realCell.X) + math.fabs(targetCell.Y-state.realCell.Y)
addreward = 0
extraReward = 0.0
if diff2 >= diff1 :
extraReward += 10
#原地不懂,惩罚,表明可能遇到了障碍物
if state1 == realstate :
extraReward += 10
#离目标越近,reward越高
if math.fabs( targetCell.X - state.curCell.X ) > math.fabs(targetCell.X - state.realCell.X) :
addreward += 20
if math.fabs(targetCell.X - state.realCell.X) <= 2 :
addreward += 11
self.changeRewardOnPath(40)
elif math.fabs(targetCell.X - state.realCell.X) <= 3 :
addreward += 9
self.changeRewardOnPath(30)
elif math.fabs(targetCell.X - state.realCell.X) <= 4 :
addreward += 7
self.changeRewardOnPath(20)
elif math.fabs(targetCell.X - state.realCell.X) <= 5 :
addreward += 5
self.changeRewardOnPath(15)
elif math.fabs(targetCell.X - state.realCell.X) <= 6 :
addreward += 5
self.changeRewardOnPath(10)
if math.fabs( targetCell.X - state.curCell.X ) < math.fabs(targetCell.X - state.realCell.X) :
addrewar | ll.Y ) > math.fabs(targetCell.Y - state.realCell.Y) :
addreward += 20
if math.fabs(targetCell.Y - state.realCell.Y) <= 1 :
addreward += 5
if math.fabs( targetCell.Y - state.curCell.Y ) < math.fabs(targetCell.Y - state.realCell.Y) :
addreward -40
state.reward -= extraReward
state.reward += addreward
print("a reward ",state.reward)
self.path.append([state1,action,state2,state.reward,state.c])
flag = self.checkGoal(state2)
if flag :
for i in range(len(self.path)):
self.path[i][3] += 10
if flag :
return realstate , False , self.path , state.reward , action
else:
return realstate , True , self.path , state.reward , action
def changeRewardOnPath(self,reward,rate=0.5,axis=0):
for i in range(len(self.path)-1,-1,-1):
cell1 = self.getCellFromStateNum(self.path[i][0])
cell2 = self.getCellFromStateNum(self.path[i][2])
targetCell = self.getCellFromStateNum(self.Target)
if math.fabs( targetCell.X - cell1.X ) > math.fabs(targetCell.X - cell2.X) :
self.path[i][3] += reward
if reward > 5 :
reward = reward * rate
else:
reward = 5
def checkGoal(self,state):
if state == self.Target :
print("get target ",self.getCellFromStateNum(self.Target))
return True
return False
def clearPath(self):
self.path = []
#根据地图中的cell获取状态编号
def getStateNum(self,cell):
num = cell.Y * self.cols + cell.X
return num
#根据状态编号获取地图中cell
def getCellFromStateNum(self,num):
y = num / self.cols
x = num - self.cols * y
cell = Cell(x,y)
return cell
def getPositionStateNum(self,pos):
if self.Map != None :
print(self.getStateNum(self.Map.getCell(pos)))
# mapspace = Map()
# world = World()
# def getMap(data):
# global mapspace
# data = data.data
# mapspace.setMap(data)
# def getMapMetaData(data):
# global mapspace
# mapspace.setMapParameters(data)
# # mapspace.Print()
# def getPosition(data):
# # print(data)
# global mapspace , world
# mapspace.setPosition(data.pose.pose.position)
# world.getPositionStateNum(data.pose.pose.position)
# if __name__ == '__main__' :
# # world = World()
# # print(world.cols)
# # state = 2
# # print(world.state_to_coordinate(state))
# # print(world.simulation(state,1))
# # print(world.state_to_coordinate(state))
# # print(world.state_to_coordinate(3))
# posePub = rospy.Publisher("/move_base_simple/goal",PoseStamped,queue_size=10)
# modelpositionPub = rospy.Publisher("/gazebo/set_model_state",ModelState,queue_size=10)
# rospy.init_node("test",anonymous=False)
# rospy.Subscriber("/map",OccupancyGrid,callback=getMap)
# rospy.Subscriber("/map_metadata",MapMetaData,getMapMetaData)
# rospy.Subscriber("/amcl_pose",PoseWithCovarianceStamped,getPosition)
# mapspace.init()
# # sim = rospy.get_param('/use_sim_time')
# # print(sim)
# while not mapspace.ready() :
# pass
# print("ready")
# mapspace.getNewMap()
# world.setMap(mapspace)
# # times = 0
# # while True:
# # state = mapspace.sendGoal(1,0)
# # if state != None :
# # state.Print()
# # print("send goal %d" % times)
# # times += 1
# # time.sleep(1)
# # if times > 10 :
# # break
# # pass
# rospy.spin() | d -= 50
if math.fabs( targetCell.Y - state.curCe | conditional_block |
world.py | #-*- coding:utf-8 -*-
import roslib
import rospy
import math
import time
import actionlib
from actionlib_msgs.msg import *
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from nav_msgs.msg import OccupancyGrid
from nav_msgs.msg import MapMetaData
from geometry_msgs.msg import Pose , Point , PoseWithCovarianceStamped , PoseStamped , Twist
from gazebo_msgs.msg import ModelState
from test import Map,Cell
class World(object):
def __init__(self):
self.map = [
['-','-','g','-'],
['.','.','.','.'],
['.','b','-','.'],
['.','.','.','.'],
]
self.Map = None
self.path = []
self.Target = None
self.rows = len(self.map)
self.cols = len(self.map[0])
self.traps = [0,1,3,10]
self.walls = [9]
self.targets = [2]
self.legalstate = filter(lambda x:not ( x in self.traps or x in self.targets or x in self.walls ) , [ i for i in range(self.rows * self.cols)])
# print(self.legalstate)
self.actionmap = {
0:'up',
1:'down',
2:'left',
3:'right'
}
self.targetReward = 1.0
self.trapReward = -1.0
self.wallReward = -0.5
self.norReward = -0.1
self.map_to_state , self.state_to_map = self.init()
self.stateNumber = len(self.map_to_state)
self.legalstate = map(lambda x:self.map_to_state[x],self.legalstate)
# print(self.legalstate)
def init(self):
map_to_state = {}
state_to_map = {}
mapindex = 0
stateindex = 0
for i in range(self.rows) :
for j in range(self.cols) :
if self.map[i][j] != 'b' :
map_to_state[mapindex] = stateindex
state_to_map[stateindex] = mapindex
stateindex += 1
mapindex += 1
return map_to_state , state_to_map
print(len(state_to_map))
print(map_to_state)
print(state_to_map)
def simulation(self,state,action):
if state not in self.state_to_map :
print("exception: illegal state")
exit(0)
m_state = self.state_to_map[state]
nextstate = self.__execute(m_state,action)
c = 0
r = 0.0
if self.isLegalState(nextstate):
c , r = self.getReward(nextstate)
else:
r = self.norReward
c = 0
nextstate = m_state
if nextstate in self.walls :
nextstate = m_state
return [state,action,self.map_to_state[nextstate],r,c]
def __execute(self,state,action):
if action == 0 :
nextstate = state - self.rows
elif action == 1 :
nextstate = state + self.rows
elif action == 2 :
nextstate = state - 1
elif action == 3 :
nextstate = state + 1
else:
nextstate = state
return nextstate
def isTarget(self,state):
if state < 0 or state >= self.rows * self.cols:
return False
state = self.state_to_map[state]
row = state / self.cols
col = state - row * self.cols
if self.map[row][col] == 'g':
return True
return False
def getReward(self,state):
if state in self.targets :
return (-1,self.targetReward)
elif state in self.traps :
return (-1,self.trapReward)
elif state in self.walls :
return (0,self.wallReward)
else:
return (0,self.norReward)
def isLegalState(self,state):
if state >= 0 and state <= self.rows * self.cols - 1 :
return True
else:
return False
def state_to_coordinate(self,state):
if self.isLegalState(state) :
row = state / self.cols
col = state - self.cols * row
return (row+1,col+1)
else:
return (None,None)
def setMap(self,map):
self.Map = map
self.Map.gridsize = self.Map.gridsize * self.Map.grids
self.rows = int(self.Map.height / self.Map.grids) + 1
self.cols = int(self.Map.width / self.Map.grids) + 1
self.stateNumber = self.cols * self.rows
print(self.Map.height,self.Map.width)
print(self.rows,self.cols)
print(self.stateNumber)
print(self.Map.turtlebotPosition)
print(self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition)))
def getPosition(self):
if self.Map == None :
return None
else:
return self.getStateNum(self.Map.getCell(self.Map.turtlebotPosition))
def doAction(self,action):
if action == 0 :
x = 0
y = 1
elif action == 1 :
x = 0
y = -1
elif action == 2 :
x = -1
y = 0
elif action == 3 :
x = 1
y = 0
else:
x = y = 0
return x , y
#把机器人发来的动作转换成地图中的下一个目标位置,然后发送给move_base
#同时接收返回的数据,包括reward、当前位置等,然后通过一定的reward机制评定最后的reward,最后返回给机器人
def doSimulation(self,action):
x , y = self.doAction(action)
print("action ",self.actionmap[action])
state = self.Map.sendGoal(x,y)
if state != None :
state.Print()
# time.sleep(1)
state1 = self.getStateNum(state.curCell)
state2 = self.getStateNum(state.realCell)
realstate = self.getStateNum(state.realCell)
targetCell = self.getCellFromStateNum(self.Target)
diff1 = math.fabs(targetCell.X-state.curCell.X) + math.fabs(targetCell.Y-state.curCell.Y)
diff2 = math.fabs(targetCell.X-state.realCell.X) + math.fabs(targetCell.Y-state.realCell.Y)
addreward = 0
extraReward = 0.0
if diff2 >= diff1 :
extraReward += 10
#原地不懂,惩罚,表明可能遇到了障碍物
if state1 == realstate :
extraReward += 10
#离目标越近,reward越高
if math.fabs( targetCell.X - state.curCell.X ) > math.fabs(targetCell.X - state.realCell.X) :
addreward += 20
if math.fabs(targetCell.X - state.realCell.X) <= 2 :
addreward += 11
self.changeRewardOnPath(40)
elif math.fabs(targetCell.X - state.realCell.X) <= 3 :
addreward += 9
self.changeRewardOnPath(30)
elif math.fabs(targetCell.X - state.realCell.X) <= 4 :
addreward += 7
self.changeRewardOnPath(20)
elif math.fabs(targetCell.X - state.realCell.X) <= 5 :
addreward += 5
self.changeRewardOnPath(15)
elif math.fabs(targetCell.X - state.realCell.X) <= 6 :
addreward += 5
self.changeRewardOnPath(10)
if math.fabs( targetCell.X - state.curCell.X ) < math.fabs(targetCell.X - state.realCell.X) :
addreward -= 50
if math.fabs( targetCell.Y - state.curCell.Y ) > math.fabs(targetCell.Y - state.realCell.Y) :
addreward += 20
if math.fabs(targetCell.Y - state.realCell.Y) <= 1 :
addreward += 5
if math.fabs( targetCell.Y - state.curCell.Y ) < math.fabs(targetCell.Y - state.realCell.Y) :
addreward -40
state.reward -= extraReward
state.reward += addreward
print("a reward ",state.reward)
self.path.append([state1,action,state2,state.reward,state.c])
flag = self.checkGoal(state2)
if flag :
for i in range(len(self.path)):
self.path[i][3] += 10
if flag :
return realstate , False , self.path , state.reward , action
else:
return realstate , True , self.path , state.reward , action
def changeRewardOnPath(self,reward,rate=0.5,axis=0):
for i in range(len(self.path)-1,-1,-1):
cell1 = self.getCellFromStateNum(self.path[i][0])
cell2 = self.getCellFromStateNum(self.path[i][2])
targetCell = self.getCellFromStateNum(self.Target)
if math.fabs( targetCell.X - cell1.X ) > math.fabs(targetCell.X - cell2.X) :
self.path[i][3] += reward
if reward > 5 :
reward = reward * rate
else:
reward = 5
def checkGoal(self,state):
if state == self.Target :
print("get target ",self.getCellFromStateNum(self.Target))
return True
return False
def clearPath(self):
self.path = []
#根据地图中的cell获取状态编号
def getStateNum(self,cell):
num = cell.Y * self.cols + cell.X
return num
#根据状态编号获取地图中cell
def getCellFromStateNum(self,num):
y = num / self.cols
x = num - self.cols * y
cell = Cell(x,y)
ret | if self.Map != None :
print(self.getStateNum(self.Map.getCell(pos)))
# mapspace = Map()
# world = World()
# def getMap(data):
# global mapspace
# data = data.data
# mapspace.setMap(data)
# def getMapMetaData(data):
# global mapspace
# mapspace.setMapParameters(data)
# # mapspace.Print()
# def getPosition(data):
# # print(data)
# global mapspace , world
# mapspace.setPosition(data.pose.pose.position)
# world.getPositionStateNum(data.pose.pose.position)
# if __name__ == '__main__' :
# # world = World()
# # print(world.cols)
# # state = 2
# # print(world.state_to_coordinate(state))
# # print(world.simulation(state,1))
# # print(world.state_to_coordinate(state))
# # print(world.state_to_coordinate(3))
# posePub = rospy.Publisher("/move_base_simple/goal",PoseStamped,queue_size=10)
# modelpositionPub = rospy.Publisher("/gazebo/set_model_state",ModelState,queue_size=10)
# rospy.init_node("test",anonymous=False)
# rospy.Subscriber("/map",OccupancyGrid,callback=getMap)
# rospy.Subscriber("/map_metadata",MapMetaData,getMapMetaData)
# rospy.Subscriber("/amcl_pose",PoseWithCovarianceStamped,getPosition)
# mapspace.init()
# # sim = rospy.get_param('/use_sim_time')
# # print(sim)
# while not mapspace.ready() :
# pass
# print("ready")
# mapspace.getNewMap()
# world.setMap(mapspace)
# # times = 0
# # while True:
# # state = mapspace.sendGoal(1,0)
# # if state != None :
# # state.Print()
# # print("send goal %d" % times)
# # times += 1
# # time.sleep(1)
# # if times > 10 :
# # break
# # pass
# rospy.spin() | urn cell
def getPositionStateNum(self,pos):
| identifier_body |
sheetDocument.ts | import _ from "lodash";
import {CM_TO_PX} from "../constants";
import {roundNumber} from "./utils";
// eslint-disable-next-line
import StaffToken from "./staffToken";
// eslint-disable-next-line
import * as LilyNotation from "../lilyNotation";
interface SheetMarkingData {
id: string;
text: string;
x: number;
y: number;
cls: string;
}
export interface SheetMeasure {
index: number;
tokens: StaffToken[];
headX: number;
lineX?: number;
matchedTokens?: StaffToken[]; // for baking mode
noteRange: {
begin: number,
end: number,
};
class?: {[key: string]: boolean};
};
export interface SheetStaff {
measures: SheetMeasure[];
tokens: StaffToken[];
markings?: Partial<SheetMarkingData>[];
// the third staff line Y coordinate value
// The third staff line Y supposed to be zero, but regarding to the line stroke width,
// there is some error for original values in SVG document (which erased by coordinate rounding).
yRoundOffset?: number; // 0.0657 for default
x: number;
y: number;
top?: number;
headWidth?: number;
};
export interface SheetSystem {
index?: number;
pageIndex?: number;
measureIndices?: [number, number][]; // [end_x, index]
staves: SheetStaff[];
tokens: StaffToken[];
x: number;
y: number;
width?: number;
top: number;
bottom: number;
};
export interface SheetPage {
width: string;
height: string;
viewBox: {
x: number,
y: number,
width: number,
height: number,
};
systems: SheetSystem[];
tokens: StaffToken[];
hidden?: boolean;
// DEPRECATED
rows?: SheetSystem[];
};
/*const ALTER_PREFIXES = {
[-2]: "\u266D\u266D",
[-1]: "\u266D",
[0]: "\u266E",
[1]: "\u266F",
[2]: "\uD834\uDD2A",
};*/
// char codes defined in music font
const ALTER_PREFIXES = {
[-2]: "\ue02a",
[-1]: "\ue021",
[0]: "\ue01d",
[1]: "\ue013",
[2]: "\ue01c",
};
let sheetMarkingIndex = 0;
class SheetMarking {
alter?: number;
index: number; // as v-for key
id?: string;
text?: string;
x?: number;
y?: number;
cls?: string;
constructor (fields: Partial<SheetMarkingData>) {
this.index = sheetMarkingIndex++;
Object.assign(this, fields);
}
get alterText (): string {
return Number.isInteger(this.alter) ? ALTER_PREFIXES[this.alter] : null;
}
};
const parseUnitExp = exp => {
if (/[\d.]+mm/.test(exp)) {
const [value] = exp.match(/[\d.]+/);
return Number(value) * 0.1 * CM_TO_PX;
}
return Number(exp);
};
type MeasureLocationTable = {[key: number]: {[key: number]: number}};
const cc = <T>(arrays: T[][]): T[] => [].concat(...arrays);
class SheetDocument {
pages: SheetPage[];
constructor (fields: Partial<SheetDocument>, {initialize = true} = {}) {
Object.assign(this, fields);
if (initialize)
this.updateTokenIndex();
}
get systems (): SheetSystem[] {
return [].concat(...this.pages.map(page => page.systems));
}
// DEPRECATED
get rows (): SheetSystem[] {
return this.systems;
}
get trackCount (): number{
return Math.max(...this.systems.map(system => system.staves.length), 0);
}
get pageSize (): {width: number, height: number} {
const page = this.pages && this.pages[0];
if (!page)
return null;
return {
width: parseUnitExp(page.width),
height: parseUnitExp(page.height),
};
}
updateTokenIndex () {
// remove null pages for broken document
this.pages = this.pages.filter(page => page);
this.pages.forEach((page, index) => page.systems.forEach(system => system.pageIndex = index));
let rowMeasureIndex = 1;
this.systems.forEach((system, index) => {
system.index = index;
system.width = system.tokens.concat(...system.staves.map(staff => staff.tokens))
.reduce((max, token) => Math.max(max, token.x), 0);
system.measureIndices = [];
system.staves = system.staves.filter(s => s);
system.staves.forEach((staff, t) => {
staff.measures.forEach((measure, i) => {
measure.index = rowMeasureIndex + i;
measure.class = {};
measure.tokens.forEach(token => {
token.system = index;
token.measure = measure.index;
token.endX = measure.noteRange.end;
});
measure.lineX = measure.lineX || 0;
if (i < staff.measures.length - 1)
staff.measures[i + 1].lineX = measure.noteRange.end;
if (t === 0)
system.measureIndices.push([measure.noteRange.end, measure.index]);
});
staff.markings = [];
staff.yRoundOffset = 0;
const line = staff.tokens.find(token => token.is("STAFF_LINE"));
if (line)
staff.yRoundOffset = line.y - line.ry;
});
rowMeasureIndex += Math.max(...system.staves.map(staff => staff.measures.length));
});
}
updateMatchedTokens (matchedIds: Set<string>) {
this.systems.forEach(system => {
system.staves.forEach(staff =>
staff.measures.forEach(measure => {
measure.matchedTokens = measure.tokens.filter(token => token.href && matchedIds.has(token.href));
if (!staff.yRoundOffset) {
const token = measure.matchedTokens[0];
if (token)
staff.yRoundOffset = token.y - token.ry;
}
}));
});
}
addMarking (systemIndex: number, staffIndex: number, data: Partial<SheetMarkingData>): SheetMarking {
const system = this.systems[systemIndex];
if (!system) {
console.warn("system index out of range:", systemIndex, this.systems.length);
return;
}
const staff = system.staves[staffIndex];
if (!staff) {
console.warn("staff index out of range:", staffIndex, system.staves.length);
return;
}
const marking = new SheetMarking(data);
staff.markings.push(marking);
return marking;
}
removeMarking (id: string) {
this.systems.forEach(system => system.staves.forEach(staff =>
staff.markings = staff.markings.filter(marking => marking.id !== id)));
}
clearMarkings () {
this.systems.forEach(system => system.staves.forEach(staff => staff.markings = []));
}
toJSON (): object {
return {
__prototype: "SheetDocument",
pages: this.pages,
};
}
getLocationTable (): MeasureLocationTable {
const table = {};
this.systems.forEach(system => system.staves.forEach(staff => staff.measures.forEach(measure => {
measure.tokens.forEach(token => {
if (token.href) {
const location = token.href.match(/\d+/g);
if (location) {
const [line, column] = location.map(Number);
table[line] = table[line] || {};
table[line][column] = Number.isFinite(table[line][column]) ? Math.min(table[line][column], measure.index) : measure.index;
}
else
console.warn("invalid href:", token.href);
}
});
})));
return table;
}
lookupMeasureIndex (systemIndex: number, x: number): number {
const system = this.systems[systemIndex];
if (!system || !system.measureIndices)
return null;
const [_, index] = system.measureIndices.find(([end]) => x < end) || [null, null];
return index;
}
tokensInSystem (systemIndex: number): StaffToken[] {
const system = this.systems[systemIndex];
if (!system)
return null;
return system.staves.reduce((tokens, staff) => {
const translate = token => token.translate({x: staff.x, y: staff.y});
tokens.push(...staff.tokens.map(translate));
staff.measures.forEach(measure => tokens.push(...measure.tokens.map(translate)));
return tokens;
}, [...system.tokens]);
}
tokensInPage (pageIndex: number, {withPageTokens = false} = {}): StaffToken[] {
const page = this.pages[pageIndex];
if (!page)
return null;
return page.systems.reduce((tokens, system) => {
tokens.push(...this.tokensInSystem(system.index).map(token => token.translate({x: system.x, y: system.y})));
return tokens;
}, withPageTokens ? [...page.tokens] : []);
}
fitPageViewbox ({margin = 5, verticalCropOnly = false, pageTokens = false} = {}) {
if (!this.pages || !this.pages.length)
return;
const svgScale = this.pageSize.width / this.pages[0].viewBox.width;
this.pages.forEach((page, i) => {
const rects = page.systems.filter(system => Number.isFinite(system.x + system.width + system.y + system.top + system.bottom))
.map(system => [system.x, system.x + system.width, system.y + system.top, system.y + system.bottom ]);
const tokens = this.tokensInPage(i, {withPageTokens: pageTokens}) || [];
const tokenXs = tokens.map(token => token.x).filter(Number.isFinite);
const tokenYs = tokens.map(token => token.y).filter(Number.isFinite);
//console.debug("tokens:", i, tokens, tokenXs, tokenYs);
if (!rects.length)
return;
| const bottom = Math.max(...rects.map(rect => rect[3]), ...tokenYs);
const x = verticalCropOnly ? page.viewBox.x : left - margin;
const y = (verticalCropOnly && i === 0) ? page.viewBox.y : top - margin;
const width = verticalCropOnly ? page.viewBox.width : right - left + margin * 2;
const height = (verticalCropOnly && i === 0) ? bottom + margin - y : bottom - top + margin * 2;
page.viewBox = {x, y, width, height};
page.width = (page.viewBox.width * svgScale).toString();
page.height = (page.viewBox.height * svgScale).toString();
});
}
getTokensOf (symbol: string): StaffToken[] {
return this.systems.reduce((tokens, system) => {
system.staves.forEach(staff => staff.measures.forEach(measure =>
tokens.push(...measure.tokens.filter(token => token.is(symbol)))));
return tokens;
}, []);
}
getNoteHeads (): StaffToken[] {
return this.getTokensOf("NOTEHEAD");
}
getNotes (): StaffToken[] {
return this.getTokensOf("NOTE");
}
getTokenMap (): Map<string, StaffToken> {
return this.systems.reduce((tokenMap, system) => {
system.staves.forEach(staff => staff.measures.forEach(measure => measure.tokens
.filter(token => token.href)
.forEach(token => tokenMap.set(token.href, token))));
return tokenMap;
}, new Map<string, StaffToken>());
}
findTokensAround (token: StaffToken, indices: number[]): StaffToken[] {
const system = this.systems[token.system];
if (system) {
const tokens = [
...system.tokens,
...cc(system.staves.map(staff => [
...staff.tokens,
...cc(staff.measures.map(measure => measure.tokens)),
])),
];
return tokens.filter(token => indices.includes(token.index));
}
return null;
}
findTokenAround (token: StaffToken, index: number): StaffToken {
const results = this.findTokensAround(token, [index]);
return results && results[0];
}
alignTokensWithNotation (notation: LilyNotation.Notation, {partial = false, assignFlags = false} = {}) {
const shortId = (href: string): string => href.split(":").slice(0, 2).join(":");
const noteTokens = this.getNotes();
const tokenMap = noteTokens.reduce((map, token) => {
const sid = token.href && shortId(token.href);
const tokens = map.get(sid) || [];
// shift column for command chord element
if (/^\\/.test(token.source)) {
const spaceCapture = token.source.match(/(?<=\s+)(\S|$)/);
if (spaceCapture) {
const [line, column] = token.href.match(/\d+/g).map(Number);
map.set(`${line}:${column + spaceCapture.index}`, [token]);
return map;
}
else
console.warn("unresolved command chord element:", token.source, token);
}
tokens.push(token);
token.href && map.set(sid, tokens);
return map;
}, new Map<string, StaffToken[]>());
//console.assert(tokenMap.size === noteTokens.length, "tokens noteTokens count dismatch:", tokenMap.size, noteTokens.length);
const tokenTickMap = new Map<StaffToken, {measureTick: number, tick: number}>();
// assign tick & track
notation.measures.forEach((measure, mi) => {
const pendingStems = new Map<StaffToken, StaffToken>(); // stem -> beam
measure.notes.forEach(note => {
const tokens = tokenMap.get(shortId(note.id));
if (tokens) {
tokens.forEach(token => {
token.href = note.id;
if (!Number.isFinite(token.tick)) {
tokenTickMap.set(token, {measureTick: measure.tick, tick: measure.tick + note.tick});
token.pitch = note.pitch;
token.track = note.track;
if (token.stems) {
const stems = this.findTokensAround(token, token.stems);
if (stems) {
const stem = stems.find(stem => stem.division === note.division && !Number.isFinite(stem.track));
if (stem) {
stem.track = note.track;
if (stem.beam >= 0) {
const beam = this.findTokenAround(stem, stem.beam);
if (stems.length < 2 || stems[0].division !== stems[1].division)
beam.track = stem.track;
}
}
else if (!stems.find(stem => stem.division === note.division))
console.warn("missed stem:", mi, token.href, note.division, token.stems, stems.map(stem => stem.division));
stems.forEach(stem => {
tokenTickMap.set(stem, {measureTick: measure.tick, tick: measure.tick + note.tick});
if (stems.length > 1 && stem.beam >= 0) {
const beam = this.findTokenAround(stem, stem.beam);
if (beam)
pendingStems.set(stem, beam);
}
});
}
else
console.warn("stems token missing:", token.system, token.stems, mi, token.href);
}
}
});
}
else if (!partial)
note.overlapped = true;
});
//if (pendingStems.size)
// console.log("pendingStems:", mi, [...pendingStems].map(s => s.index));
for (const [stem, beam] of pendingStems) {
if (Number.isFinite(beam.track))
stem.track = beam.track;
}
});
const tokenTickMapKeys = Array.from(tokenTickMap.keys());
this.systems.forEach(system => {
system.staves.forEach(staff => staff.measures.forEach(measure => {
const tokens = measure.tokens.filter(token => tokenTickMapKeys.includes(token));
const meastureTick = tokens.reduce((tick, token) => Math.min(tokenTickMap.get(token).measureTick, tick), Infinity);
tokens.forEach(token => token.tick = tokenTickMap.get(token).tick - meastureTick);
}));
});
if (assignFlags)
this.assignFlagsTrack();
}
assignFlagsTrack () {
const flags = this.getTokensOf("FLAG");
flags.forEach(flag => {
if (Number.isFinite(flag.stem)) {
const stem = this.findTokenAround(flag, flag.stem);
if (stem && Number.isFinite(stem.track))
flag.track = stem.track;
}
});
}
pruneForBakingMode () {
const round = x => roundNumber(x, 1e-4);
this.pages.forEach(page => {
page.tokens = [];
page.systems.forEach(system => {
system.tokens = [];
system.measureIndices = system.measureIndices && system.measureIndices.map(([x, i]) => [round(x), i]);
system.staves.forEach(staff => {
staff.tokens = [];
staff.yRoundOffset = round(staff.yRoundOffset);
delete staff.top;
delete staff.headWidth;
staff.measures.forEach(measure => {
measure.headX = round(measure.headX);
measure.lineX = round(measure.lineX);
measure.noteRange = {
begin: round(measure.noteRange.begin),
end: round(measure.noteRange.end),
};
measure.tokens = measure.matchedTokens.map(token => new StaffToken(_.pick(token, [
"x", "y", "symbol", "href", "scale", "tied",
])));
delete measure.matchedTokens;
});
});
});
});
}
appendLinkedTokensForStaves (): void {
const doneTokens = new Set();
const appendLink = (staff: SheetStaff, oldStaff: SheetStaff, token: StaffToken): void => {
if (doneTokens.has(token.index))
return;
//console.log("appendLink:", staff, oldStaff, token);
const dy = staff.y - oldStaff.y;
const measure = staff.measures.find(measure => measure.noteRange.end >= token.x);
if (measure) {
const newToken = new StaffToken({...token, symbols: new Set(), y: token.y - dy, ry: token.ry - dy});
token.addSymbol("ACROSS_STAVES");
newToken.addSymbol("ACROSS_STAVES");
newToken.addSymbol("DUPLICATED");
measure.tokens.push(newToken);
}
else
console.warn("appendLink failed, because no fit measure:", staff.measures, token);
doneTokens.add(token.index);
};
this.pages.forEach(page => {
const tokens: StaffToken[] = (page.systems
.map(system => system.staves
.map(staff => staff.measures
.map(measure => measure.tokens))) as any).flat(3);
const tokenStaffTable: Record<number, SheetStaff> = page.systems
.reduce((table, system) => system.staves
.reduce((table, staff) => staff.measures
.reduce((table, measure) => measure.tokens
.reduce((table, token) => {
table[token.index] = staff;
return table;
}, table), table), table), {});
//console.log("tokenStaffTable:", tokenStaffTable);
tokens.forEach(token => {
if (token.stems) {
const staff = tokenStaffTable[token.index];
token.stems.forEach(stem => {
if (tokenStaffTable[stem] !== staff)
appendLink(tokenStaffTable[stem], staff, token);
});
}
});
});
}
};
export default SheetDocument; | const left = Math.min(...rects.map(rect => rect[0]), ...tokenXs);
const right = Math.max(...rects.map(rect => rect[1]), ...tokenXs);
const top = Math.min(...rects.map(rect => rect[2]), ...tokenYs); | random_line_split |
sheetDocument.ts |
import _ from "lodash";
import {CM_TO_PX} from "../constants";
import {roundNumber} from "./utils";
// eslint-disable-next-line
import StaffToken from "./staffToken";
// eslint-disable-next-line
import * as LilyNotation from "../lilyNotation";
interface SheetMarkingData {
id: string;
text: string;
x: number;
y: number;
cls: string;
}
export interface SheetMeasure {
index: number;
tokens: StaffToken[];
headX: number;
lineX?: number;
matchedTokens?: StaffToken[]; // for baking mode
noteRange: {
begin: number,
end: number,
};
class?: {[key: string]: boolean};
};
export interface SheetStaff {
measures: SheetMeasure[];
tokens: StaffToken[];
markings?: Partial<SheetMarkingData>[];
// the third staff line Y coordinate value
// The third staff line Y supposed to be zero, but regarding to the line stroke width,
// there is some error for original values in SVG document (which erased by coordinate rounding).
yRoundOffset?: number; // 0.0657 for default
x: number;
y: number;
top?: number;
headWidth?: number;
};
export interface SheetSystem {
index?: number;
pageIndex?: number;
measureIndices?: [number, number][]; // [end_x, index]
staves: SheetStaff[];
tokens: StaffToken[];
x: number;
y: number;
width?: number;
top: number;
bottom: number;
};
export interface SheetPage {
width: string;
height: string;
viewBox: {
x: number,
y: number,
width: number,
height: number,
};
systems: SheetSystem[];
tokens: StaffToken[];
hidden?: boolean;
// DEPRECATED
rows?: SheetSystem[];
};
/*const ALTER_PREFIXES = {
[-2]: "\u266D\u266D",
[-1]: "\u266D",
[0]: "\u266E",
[1]: "\u266F",
[2]: "\uD834\uDD2A",
};*/
// char codes defined in music font
const ALTER_PREFIXES = {
[-2]: "\ue02a",
[-1]: "\ue021",
[0]: "\ue01d",
[1]: "\ue013",
[2]: "\ue01c",
};
let sheetMarkingIndex = 0;
class SheetMarking {
alter?: number;
index: number; // as v-for key
id?: string;
text?: string;
x?: number;
y?: number;
cls?: string;
constructor (fields: Partial<SheetMarkingData>) {
this.index = sheetMarkingIndex++;
Object.assign(this, fields);
}
get alterText (): string {
return Number.isInteger(this.alter) ? ALTER_PREFIXES[this.alter] : null;
}
};
const parseUnitExp = exp => {
if (/[\d.]+mm/.test(exp)) {
const [value] = exp.match(/[\d.]+/);
return Number(value) * 0.1 * CM_TO_PX;
}
return Number(exp);
};
type MeasureLocationTable = {[key: number]: {[key: number]: number}};
const cc = <T>(arrays: T[][]): T[] => [].concat(...arrays);
class SheetDocument {
pages: SheetPage[];
constructor (fields: Partial<SheetDocument>, {initialize = true} = {}) {
Object.assign(this, fields);
if (initialize)
this.updateTokenIndex();
}
get systems (): SheetSystem[] {
return [].concat(...this.pages.map(page => page.systems));
}
// DEPRECATED
get rows (): SheetSystem[] {
return this.systems;
}
get | (): number{
return Math.max(...this.systems.map(system => system.staves.length), 0);
}
get pageSize (): {width: number, height: number} {
const page = this.pages && this.pages[0];
if (!page)
return null;
return {
width: parseUnitExp(page.width),
height: parseUnitExp(page.height),
};
}
updateTokenIndex () {
// remove null pages for broken document
this.pages = this.pages.filter(page => page);
this.pages.forEach((page, index) => page.systems.forEach(system => system.pageIndex = index));
let rowMeasureIndex = 1;
this.systems.forEach((system, index) => {
system.index = index;
system.width = system.tokens.concat(...system.staves.map(staff => staff.tokens))
.reduce((max, token) => Math.max(max, token.x), 0);
system.measureIndices = [];
system.staves = system.staves.filter(s => s);
system.staves.forEach((staff, t) => {
staff.measures.forEach((measure, i) => {
measure.index = rowMeasureIndex + i;
measure.class = {};
measure.tokens.forEach(token => {
token.system = index;
token.measure = measure.index;
token.endX = measure.noteRange.end;
});
measure.lineX = measure.lineX || 0;
if (i < staff.measures.length - 1)
staff.measures[i + 1].lineX = measure.noteRange.end;
if (t === 0)
system.measureIndices.push([measure.noteRange.end, measure.index]);
});
staff.markings = [];
staff.yRoundOffset = 0;
const line = staff.tokens.find(token => token.is("STAFF_LINE"));
if (line)
staff.yRoundOffset = line.y - line.ry;
});
rowMeasureIndex += Math.max(...system.staves.map(staff => staff.measures.length));
});
}
updateMatchedTokens (matchedIds: Set<string>) {
this.systems.forEach(system => {
system.staves.forEach(staff =>
staff.measures.forEach(measure => {
measure.matchedTokens = measure.tokens.filter(token => token.href && matchedIds.has(token.href));
if (!staff.yRoundOffset) {
const token = measure.matchedTokens[0];
if (token)
staff.yRoundOffset = token.y - token.ry;
}
}));
});
}
addMarking (systemIndex: number, staffIndex: number, data: Partial<SheetMarkingData>): SheetMarking {
const system = this.systems[systemIndex];
if (!system) {
console.warn("system index out of range:", systemIndex, this.systems.length);
return;
}
const staff = system.staves[staffIndex];
if (!staff) {
console.warn("staff index out of range:", staffIndex, system.staves.length);
return;
}
const marking = new SheetMarking(data);
staff.markings.push(marking);
return marking;
}
removeMarking (id: string) {
this.systems.forEach(system => system.staves.forEach(staff =>
staff.markings = staff.markings.filter(marking => marking.id !== id)));
}
clearMarkings () {
this.systems.forEach(system => system.staves.forEach(staff => staff.markings = []));
}
toJSON (): object {
return {
__prototype: "SheetDocument",
pages: this.pages,
};
}
getLocationTable (): MeasureLocationTable {
const table = {};
this.systems.forEach(system => system.staves.forEach(staff => staff.measures.forEach(measure => {
measure.tokens.forEach(token => {
if (token.href) {
const location = token.href.match(/\d+/g);
if (location) {
const [line, column] = location.map(Number);
table[line] = table[line] || {};
table[line][column] = Number.isFinite(table[line][column]) ? Math.min(table[line][column], measure.index) : measure.index;
}
else
console.warn("invalid href:", token.href);
}
});
})));
return table;
}
lookupMeasureIndex (systemIndex: number, x: number): number {
const system = this.systems[systemIndex];
if (!system || !system.measureIndices)
return null;
const [_, index] = system.measureIndices.find(([end]) => x < end) || [null, null];
return index;
}
tokensInSystem (systemIndex: number): StaffToken[] {
const system = this.systems[systemIndex];
if (!system)
return null;
return system.staves.reduce((tokens, staff) => {
const translate = token => token.translate({x: staff.x, y: staff.y});
tokens.push(...staff.tokens.map(translate));
staff.measures.forEach(measure => tokens.push(...measure.tokens.map(translate)));
return tokens;
}, [...system.tokens]);
}
tokensInPage (pageIndex: number, {withPageTokens = false} = {}): StaffToken[] {
const page = this.pages[pageIndex];
if (!page)
return null;
return page.systems.reduce((tokens, system) => {
tokens.push(...this.tokensInSystem(system.index).map(token => token.translate({x: system.x, y: system.y})));
return tokens;
}, withPageTokens ? [...page.tokens] : []);
}
fitPageViewbox ({margin = 5, verticalCropOnly = false, pageTokens = false} = {}) {
if (!this.pages || !this.pages.length)
return;
const svgScale = this.pageSize.width / this.pages[0].viewBox.width;
this.pages.forEach((page, i) => {
const rects = page.systems.filter(system => Number.isFinite(system.x + system.width + system.y + system.top + system.bottom))
.map(system => [system.x, system.x + system.width, system.y + system.top, system.y + system.bottom ]);
const tokens = this.tokensInPage(i, {withPageTokens: pageTokens}) || [];
const tokenXs = tokens.map(token => token.x).filter(Number.isFinite);
const tokenYs = tokens.map(token => token.y).filter(Number.isFinite);
//console.debug("tokens:", i, tokens, tokenXs, tokenYs);
if (!rects.length)
return;
const left = Math.min(...rects.map(rect => rect[0]), ...tokenXs);
const right = Math.max(...rects.map(rect => rect[1]), ...tokenXs);
const top = Math.min(...rects.map(rect => rect[2]), ...tokenYs);
const bottom = Math.max(...rects.map(rect => rect[3]), ...tokenYs);
const x = verticalCropOnly ? page.viewBox.x : left - margin;
const y = (verticalCropOnly && i === 0) ? page.viewBox.y : top - margin;
const width = verticalCropOnly ? page.viewBox.width : right - left + margin * 2;
const height = (verticalCropOnly && i === 0) ? bottom + margin - y : bottom - top + margin * 2;
page.viewBox = {x, y, width, height};
page.width = (page.viewBox.width * svgScale).toString();
page.height = (page.viewBox.height * svgScale).toString();
});
}
getTokensOf (symbol: string): StaffToken[] {
return this.systems.reduce((tokens, system) => {
system.staves.forEach(staff => staff.measures.forEach(measure =>
tokens.push(...measure.tokens.filter(token => token.is(symbol)))));
return tokens;
}, []);
}
getNoteHeads (): StaffToken[] {
return this.getTokensOf("NOTEHEAD");
}
getNotes (): StaffToken[] {
return this.getTokensOf("NOTE");
}
getTokenMap (): Map<string, StaffToken> {
return this.systems.reduce((tokenMap, system) => {
system.staves.forEach(staff => staff.measures.forEach(measure => measure.tokens
.filter(token => token.href)
.forEach(token => tokenMap.set(token.href, token))));
return tokenMap;
}, new Map<string, StaffToken>());
}
findTokensAround (token: StaffToken, indices: number[]): StaffToken[] {
const system = this.systems[token.system];
if (system) {
const tokens = [
...system.tokens,
...cc(system.staves.map(staff => [
...staff.tokens,
...cc(staff.measures.map(measure => measure.tokens)),
])),
];
return tokens.filter(token => indices.includes(token.index));
}
return null;
}
findTokenAround (token: StaffToken, index: number): StaffToken {
const results = this.findTokensAround(token, [index]);
return results && results[0];
}
alignTokensWithNotation (notation: LilyNotation.Notation, {partial = false, assignFlags = false} = {}) {
const shortId = (href: string): string => href.split(":").slice(0, 2).join(":");
const noteTokens = this.getNotes();
const tokenMap = noteTokens.reduce((map, token) => {
const sid = token.href && shortId(token.href);
const tokens = map.get(sid) || [];
// shift column for command chord element
if (/^\\/.test(token.source)) {
const spaceCapture = token.source.match(/(?<=\s+)(\S|$)/);
if (spaceCapture) {
const [line, column] = token.href.match(/\d+/g).map(Number);
map.set(`${line}:${column + spaceCapture.index}`, [token]);
return map;
}
else
console.warn("unresolved command chord element:", token.source, token);
}
tokens.push(token);
token.href && map.set(sid, tokens);
return map;
}, new Map<string, StaffToken[]>());
//console.assert(tokenMap.size === noteTokens.length, "tokens noteTokens count dismatch:", tokenMap.size, noteTokens.length);
const tokenTickMap = new Map<StaffToken, {measureTick: number, tick: number}>();
// assign tick & track
notation.measures.forEach((measure, mi) => {
const pendingStems = new Map<StaffToken, StaffToken>(); // stem -> beam
measure.notes.forEach(note => {
const tokens = tokenMap.get(shortId(note.id));
if (tokens) {
tokens.forEach(token => {
token.href = note.id;
if (!Number.isFinite(token.tick)) {
tokenTickMap.set(token, {measureTick: measure.tick, tick: measure.tick + note.tick});
token.pitch = note.pitch;
token.track = note.track;
if (token.stems) {
const stems = this.findTokensAround(token, token.stems);
if (stems) {
const stem = stems.find(stem => stem.division === note.division && !Number.isFinite(stem.track));
if (stem) {
stem.track = note.track;
if (stem.beam >= 0) {
const beam = this.findTokenAround(stem, stem.beam);
if (stems.length < 2 || stems[0].division !== stems[1].division)
beam.track = stem.track;
}
}
else if (!stems.find(stem => stem.division === note.division))
console.warn("missed stem:", mi, token.href, note.division, token.stems, stems.map(stem => stem.division));
stems.forEach(stem => {
tokenTickMap.set(stem, {measureTick: measure.tick, tick: measure.tick + note.tick});
if (stems.length > 1 && stem.beam >= 0) {
const beam = this.findTokenAround(stem, stem.beam);
if (beam)
pendingStems.set(stem, beam);
}
});
}
else
console.warn("stems token missing:", token.system, token.stems, mi, token.href);
}
}
});
}
else if (!partial)
note.overlapped = true;
});
//if (pendingStems.size)
// console.log("pendingStems:", mi, [...pendingStems].map(s => s.index));
for (const [stem, beam] of pendingStems) {
if (Number.isFinite(beam.track))
stem.track = beam.track;
}
});
const tokenTickMapKeys = Array.from(tokenTickMap.keys());
this.systems.forEach(system => {
system.staves.forEach(staff => staff.measures.forEach(measure => {
const tokens = measure.tokens.filter(token => tokenTickMapKeys.includes(token));
const meastureTick = tokens.reduce((tick, token) => Math.min(tokenTickMap.get(token).measureTick, tick), Infinity);
tokens.forEach(token => token.tick = tokenTickMap.get(token).tick - meastureTick);
}));
});
if (assignFlags)
this.assignFlagsTrack();
}
assignFlagsTrack () {
const flags = this.getTokensOf("FLAG");
flags.forEach(flag => {
if (Number.isFinite(flag.stem)) {
const stem = this.findTokenAround(flag, flag.stem);
if (stem && Number.isFinite(stem.track))
flag.track = stem.track;
}
});
}
pruneForBakingMode () {
const round = x => roundNumber(x, 1e-4);
this.pages.forEach(page => {
page.tokens = [];
page.systems.forEach(system => {
system.tokens = [];
system.measureIndices = system.measureIndices && system.measureIndices.map(([x, i]) => [round(x), i]);
system.staves.forEach(staff => {
staff.tokens = [];
staff.yRoundOffset = round(staff.yRoundOffset);
delete staff.top;
delete staff.headWidth;
staff.measures.forEach(measure => {
measure.headX = round(measure.headX);
measure.lineX = round(measure.lineX);
measure.noteRange = {
begin: round(measure.noteRange.begin),
end: round(measure.noteRange.end),
};
measure.tokens = measure.matchedTokens.map(token => new StaffToken(_.pick(token, [
"x", "y", "symbol", "href", "scale", "tied",
])));
delete measure.matchedTokens;
});
});
});
});
}
appendLinkedTokensForStaves (): void {
const doneTokens = new Set();
const appendLink = (staff: SheetStaff, oldStaff: SheetStaff, token: StaffToken): void => {
if (doneTokens.has(token.index))
return;
//console.log("appendLink:", staff, oldStaff, token);
const dy = staff.y - oldStaff.y;
const measure = staff.measures.find(measure => measure.noteRange.end >= token.x);
if (measure) {
const newToken = new StaffToken({...token, symbols: new Set(), y: token.y - dy, ry: token.ry - dy});
token.addSymbol("ACROSS_STAVES");
newToken.addSymbol("ACROSS_STAVES");
newToken.addSymbol("DUPLICATED");
measure.tokens.push(newToken);
}
else
console.warn("appendLink failed, because no fit measure:", staff.measures, token);
doneTokens.add(token.index);
};
this.pages.forEach(page => {
const tokens: StaffToken[] = (page.systems
.map(system => system.staves
.map(staff => staff.measures
.map(measure => measure.tokens))) as any).flat(3);
const tokenStaffTable: Record<number, SheetStaff> = page.systems
.reduce((table, system) => system.staves
.reduce((table, staff) => staff.measures
.reduce((table, measure) => measure.tokens
.reduce((table, token) => {
table[token.index] = staff;
return table;
}, table), table), table), {});
//console.log("tokenStaffTable:", tokenStaffTable);
tokens.forEach(token => {
if (token.stems) {
const staff = tokenStaffTable[token.index];
token.stems.forEach(stem => {
if (tokenStaffTable[stem] !== staff)
appendLink(tokenStaffTable[stem], staff, token);
});
}
});
});
}
};
export default SheetDocument;
| trackCount | identifier_name |
sw_06_cv_functions.py | import cv2
import numpy as np
from numpy.lib.stride_tricks import as_strided
import traceback
import warnings
import numpy
# import rospy
## Software Exercise 6: Choose your category (1 or 2) and replace the cv2 code by your own!
## CATEGORY 1
def inRange(hsv_image, low_range, high_range):
return cv2.inRange(hsv_image, low_range, high_range)
def bitwise_or(bitwise1, bitwise2):
return cv2.bitwise_or(bitwise1, bitwise2)
def bitwise_and(bitwise1, bitwise2):
return cv2.bitwise_and(bitwise1, bitwise2)
def getStructuringElement(shape, size):
return cv2.getStructuringElement(shape, size)
def dilate(bitwise, kernel):
return cv2.dilate(bitwise, kernel)
## CATEGORY 2
def Canny(image, threshold1, threshold2, apertureSize=3):
if apertureSize != 3:
warnings.warn(UserWarning("Using apertureSize of 3, even though a different value was passed."))
apertureSize = 3
try:
dx, dy, edge_gradients = get_image_gradients(image)
gradient_directions = snap_angles(np.arctan2(dy, dx))
if len(image.shape) == 3 and image.shape[2] == 3:
# convert image to grayscale if it isn't already.
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
filtered_image = non_maximum_suppression(image, edge_gradients, gradient_directions)
result = hysteresis_thresholding(filtered_image, edge_gradients, threshold1, threshold2)
expected = cv2.Canny(image, threshold1, threshold2, apertureSize=3)
# print(result.shape, expected.shape)
# print(set(result.flat), set(expected.flat))
# print(np.mean(result), np.mean(expected))
# print(np.count_nonzero(result), np.count_nonzero(expected))
# print(np.median(result), np.median(expected))
# print(np.max(result), np.max(expected))
return expected
return result
except Exception as e:
traceback.print_exc()
def get_image_gradients(image):
num_channels = image.shape[-1] if len(image.shape) == 3 else 1
if num_channels == 1:
image = image[:,:, np.newaxis]
dxs = np.zeros_like(image)
dys = np.zeros_like(image)
image = image.astype(float)
for channel in range(num_channels):
image[..., channel] = gaussian_blurring(image[..., channel], std=1, kernel_size=5)
dx, dy = image_derivatives(image[..., channel])
dxs[..., channel] = dx
dys[..., channel] = dy
## TODO: comment this out, just testing to check if the sobel operation is the issue
# sobel_x = cv2.Sobel(image, cv2.CV_8U, 1, 0, scale=1, ksize=3)
# sobel_y = cv2.Sobel(image, cv2.CV_8U, 0, 1, scale=1, ksize=3)
# dxs, dys = sobel_x, sobel_y
edge_gradients = np.sqrt(dxs ** 2 + dys ** 2)
# We use the grad magnitude and dx and dy's from the channel with the highest gradient.
max_grad_indices = np.argmax(edge_gradients, axis=-1)
edge_gradients = np.max(edge_gradients, axis=-1)
mask = np.zeros_like(image, dtype=bool)
mask[max_grad_indices] = True
dxs &= mask
dx = np.sum(dxs, axis=-1)
dys &= mask
dy = np.sum(dys, axis=-1)
return dx, dy, edge_gradients
def hysteresis_thresholding(image, image_gradients, min_val, max_val):
"""
Perform hysteresis thresholding using some bitwise magic.
"""
print("BEFORE HYSTERISIS THRESHOLDING:", image)
print("gradients:", image_gradients)
largest_gradient_value = np.max(image_gradients)
while largest_gradient_value < max_val:
print("Largest gradient value:", largest_gradient_value)
warnings.warn(UserWarning("Image has no edge gradients above upper threshold, increasing all gradients values!"))
# return np.zeros_like(image)
image_gradients *= 1.5
largest_gradient_value = np.max(image_gradients)
# print("Largest gradient value:", largest_gradient_value)
# the set of all 'strong' indices.
strong_indices = indices_where(image_gradients >= max_val)
off_indices = indices_where(image_gradients < min_val)
weak_indices = indices_where((min_val <= image_gradients) & (image_gradients < max_val))
image_height = image.shape[0]
image_width = image.shape[1]
# get the neighbours of all strong edges.
# convert their neighbours with weak edges to strong edges.
to_explore = np.zeros_like(image_gradients, dtype=bool)
to_explore[index_with(strong_indices)] = True
explored = np.zeros_like(image_gradients, dtype=bool)
strong = np.zeros_like(image_gradients, dtype=bool)
strong[index_with(strong_indices)] = True
# print("strong:", strong)
weak = np.zeros_like(image_gradients, dtype=bool)
weak[index_with(weak_indices)] = True
unexplored_indices = aggregate(np.nonzero(to_explore))
# print("unexplored (initial):", [str(v) for v in unexplored])
# print("weak indices (initial):", [str(v) for v in weak_indices])
# print("off indices (initial):", [str(v) for v in off_indices])
already_explored = np.zeros_like(to_explore)
while len(unexplored_indices) > 0:
# print("exploring indices ", [str(v) for v in indices])
# print(indices)
neighbours = neighbourhood(unexplored_indices, image_width, image_height)
is_neighbour = np.zeros_like(weak)
is_neighbour[index_with(neighbours)] = True
is_weak_neighbour = is_neighbour & weak
weak_neighbours = aggregate(np.nonzero(is_weak_neighbour))
# weak_neighbours = common_rows_between(neighbours, weak_indices)
# print("The neighbours of (", ",".join(str(pixel) for pixel in indices), ") are ", neighbours)
# print("weak neighbours:", [str(v) for v in weak_neighbours])
strong[index_with(weak_neighbours)] = True
weak[index_with(weak_neighbours)] = False
# mark that we need to explore these:
already_explored[index_with(unexplored_indices)] = True
# explore the indices of the weak neighbours, if they haven't been explored already.
to_explore[index_with(weak_neighbours)] = True
# do not re-explore already explored indices.
to_explore &= ~already_explored
unexplored_indices = aggregate(np.nonzero(to_explore))
out = np.zeros_like(image_gradients)
out[~strong] = 0
out[strong] = 255
print("AFTER HYSTERISIS THRESHOLDING:", out)
return out
def aggregate(list_of_indices):
|
def indices_where(condition):
return np.concatenate(np.dstack(np.where(condition)))
def index_with(list_of_indices):
return list_of_indices[:, 0], list_of_indices[:, 1]
def neighbourhood(index, image_width, image_height):
"""Returns the coordinates of the neighbours of a given coordinate or list of coordinates.
Arguments:
index {np.ndarray} -- either a list of coordinates (as an ndarray) or a coordinate itself, in the form (i, j)
NOTE: the pixels neighbours are clipped of (image_height-1, )
Returns:
np.ndarray -- ndarray of shape [?, 2], which contains the indices of the neighbouring pixels
"""
neighbourhoods = np.concatenate(np.dstack((np.indices([3,3]) - 1)))
if len(index.shape) == 2:
neighbourhoods = neighbourhoods[:, np.newaxis, :]
neighbours_and_itself = index + neighbourhoods
keep = np.ones(9, dtype=bool)
keep[4] = False # drop the point itself, but keep the neighbours.
neighbours = neighbours_and_itself[keep]
if len(index.shape) == 2:
neighbours = np.stack(neighbours, axis=1)
mask = np.ones_like(neighbours, dtype=bool)
# remove all neighbours that have either a negative value in them
negative = np.where(neighbours < 0)
mask[negative] = False
# or a value equal to image_height in x
greater_than_image_height = np.where(neighbours[..., 0] >= image_height)
mask[greater_than_image_height] = False
# or image_width in z
greater_than_image_width = np.where(neighbours[..., 1] >= image_height)
mask[greater_than_image_width] = False
# or that correspond to an index in 'index'
tiled = np.expand_dims(index, 1)
tiled = np.tile(tiled, (1, neighbours.shape[1], 1))
equal_to_index = np.equal(neighbours, tiled)
equal_to_index = np.all(equal_to_index, axis=-1)
mask[equal_to_index] = False
mask = np.all(mask, axis=-1)
# print(mask)
# for i, (m, n) in enumerate(zip(mask, neighbours)):
# if len(index.shape) == 2:
# for keep, (i, j) in zip(m, n):
# print("point", i, j, "is good:", keep)
# else:
# keep = m
# i, j = n
# print("point", i, j, "is good:", keep)
neighbours = neighbours[mask]
# get rid of duplicates:
neighbours = np.unique(neighbours, axis=0)
return neighbours
# # print(image[row, col])
# min_x = max(i-1, 0)
# max_x = min(i+1, image_w-1)
# min_y = max(j-1, 0)
# max_y = min(j+1, image_h-1)
# indices = set(
# (x, y)
# for x in range(min_x, max_x + 1)
# for y in range(min_y, max_y + 1)
# )
# print(indices)
# indices.discard((i, j))
# return indices
# # return np.array(indices)
def common_rows_between(array_1, array_2):
"""TAKEN FROM https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays
Arguments:
array_1 {np.ndarray} -- a 2d array
array_2 {np.ndarray} -- another 2d array
Returns:
np.ndarray -- a 2d array containing the common rows in both array_1 and array_2.
"""
nrows, ncols = array_1.shape
dtype={
'names': ['f{}'.format(i) for i in range(ncols)],
'formats': ncols * [array_1.dtype]
}
C = np.intersect1d(array_1.view(dtype), array_2.view(dtype))
# This last bit is optional if you're okay with "C" being a structured array...
C = C.view(array_1.dtype).reshape(-1, ncols)
return C
def non_maximum_suppression(image, image_gradients, gradient_directions):
"""Non-maximum suppression
To be honest, I'm very proud of this piece of code. No for-loops were needed.
Arguments:
image {[type]} -- the image to preform non-maximum suppresion on.
gradient_directions {[type]} -- the gradient directions
"""
print("Before non-maximum suppression:", image)
# Get where to check depending on the "direction"
direction_offset_x = np.round(np.cos(gradient_directions)).astype(int)
direction_offset_y = np.round(np.sin(gradient_directions)).astype(int)
direction_offset = np.dstack((direction_offset_x, direction_offset_y))
# the (i, j) indices of all points in the image.
row, col = np.indices(image.shape)
# in order not to cause any indexing errors, we create a
# padded version of the image with the edge values duplicated.
# a pixel at (row, col) in the image is located at (row+1, col+1) in the padded image.
image_ = np.pad(image, 1, mode="edge")
row_, col_ = row + 1, col + 1
# get the image pixels before and after each pixel in the image.
pixel_middle = image[row, col]
pixel_forward = image_[row_ + direction_offset_x, col_ + direction_offset_y]
pixel_backward = image_[row_ - direction_offset_x, col_ - direction_offset_y]
higher_than_forward = pixel_middle > pixel_forward
higher_than_backward = pixel_middle > pixel_backward
is_local_maximum = higher_than_backward & higher_than_forward
out = np.copy(image)
out[~is_local_maximum] = 0
print("AFTER non-maximum suppression: ", out)
return out
def snap_angles(angles):
"""Snaps a given set of angles to one of the horizontal, vertical, or one of the two diagonal orientations.
Arguments:
angles -- an array of anges in radians
"""
pi_over_four = np.pi / 4
return np.round(angles / pi_over_four) * pi_over_four
def image_derivatives(image):
""" Computes the Sobel X and Y operators for this image.
Loosely based on https://en.wikipedia.org/wiki/Sobel_operator
Arguments:
image {[type]} -- [description]
Returns:
[type] -- [description]
"""
sobel_sign = np.array([[-1, 0, 1]])
sobel_mag = np.array([[1, 2, 1]])
temp1 = conv2d(image, sobel_sign)
image_dx = conv2d(temp1, sobel_mag.T)
temp2 = conv2d(image, sobel_mag)
image_dy = conv2d(temp2, -sobel_sign.T)
return image_dx, image_dy
# save these for comparison
image_dx_1, image_dy_1 = image_dx, image_dy
# Slower alternative (from OpenCV docs):
sobel_x = np.array([
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1],
])
image_dx = conv2d(image, sobel_x)
image_dy = conv2d(image, -sobel_x.T)
assert np.all(np.isclose(image_dy, image_dy_1))
assert np.all(np.isclose(image_dx, image_dx_1))
return image_dx, image_dy
def conv2d(x, kernel, stride=1, padding="auto", padding_mode="constant"):
"""
TAKEN AND ADAPTED FROM https://stackoverflow.com/questions/54962004/implement-max-mean-poolingwith-stride-with-numpy
ALSO INSPIRED FROM https://cs231n.github.io/convolutional-networks/
2D Pooling
Parameters:
A: input 2D array
kernel: int, the size of the window
stride: int, the stride of the window
padding: int or string, implicit zero paddings on both sides of the input
"""
# Padding
assert len(kernel.shape) == 2, "kernel should be 2d."
assert kernel.shape[0] % 2 == 1 and kernel.shape[1] % 2 == 1, "only odd-sized kernels are allowed"
kernel_size = kernel.shape[0]
if padding == "auto":
padding = np.array(kernel.shape) // 2
x = np.pad(x, padding, mode=padding_mode)
# Window view of X
output_shape = ((x.shape[0] - kernel.shape[0])//stride + 1,
(x.shape[1] - kernel.shape[1])//stride + 1)
x_w = as_strided(
x,
shape=output_shape + kernel.shape,
strides = (
stride*x.strides[0],
stride*x.strides[1]
) + x.strides
)
# ADAPTATION BELOW:
# patches is [#patches, k, k]
patches = x_w.reshape(-1, *kernel.shape)
flattened_kernel = kernel.flat
flattened_patches = patches.reshape([patches.shape[0], -1])
return np.dot(flattened_patches, flattened_kernel).reshape(output_shape)
def separable_conv2d(x, kernel_1d, stride=1, padding="auto", padding_mode="edge"):
assert len(kernel_1d.shape) == 1, kernel_1d
k = kernel_1d.shape[0]
k1 = kernel_1d.reshape([1, k])
result_1 = conv2d(x, k1, stride, padding, padding_mode)
k2 = k1.T
result_2 = conv2d(result_1, k2, stride, padding, padding_mode)
return result_2
def gaussian_kernel_1d(std=1, kernel_size=5):
x = np.arange(-(kernel_size//2), (kernel_size//2)+1, dtype=float)
g = np.exp(- (x**2 / (2 * std**2))) / (np.sqrt(2 * np.pi) * std)
# normalize the sum to 1
g = g / np.sum(g)
return g
def gaussian_blurring(image, std=1, kernel_size=5):
# # print(kernel)
# kernel = np.expand_dims(kernel, axis=0)
# kernel = kernel.T @ kernel
# kernel /= np.sum(kernel)
# print("BEFORE GAUSSIAN BLURRING:\n", image)
kernel = gaussian_kernel_1d(std, kernel_size)
image1 = separable_conv2d(image, kernel, padding_mode="constant")
# print("AFTER GAUSSIAN BLURRING1:\n", image1)
return image1
# slower alternative from openCV documentation:
kernel = 1 / 159 * np.array([
[2, 4, 5, 4, 2],
[4, 9, 12, 9, 4],
[5, 12, 15, 12, 5],
[4, 9, 12, 9, 4],
[2, 4, 5, 4, 2]
])
image2 = conv2d(image, kernel)
print("AFTER GAUSSIAN BLURRING2:\n", image2)
return image2
def gaussian_derivative_filtering(image, std=1, kernel_size=5):
kernel = gaussian_derivative_1d(std, kernel_size)
return separable_conv2d(image, kernel)
def gaussian_derivative_1d(sigma, kernel_size):
"""
ADAPTED FROM https://github.com/scipy/scipy/blob/5681835ec51b728fa0ea6237d46aa8032b9e1400/scipy/ndimage/filters.py#L136
Computes a 1D Gaussian derivative kernel's
"""
# #0th order kernel.
phi_x = gaussian_kernel_1d(sigma, kernel_size)
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
# p'(x) = -1 / sigma ** 2
# Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
# coefficients of q(x)
q = np.zeros(2)
q[0] = 1
D = np.diag([1], 1) # D @ q(x) = q'(x)
sigma2 = sigma * sigma
P = np.diag(np.ones(1)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
Q_deriv = D + P
q = Q_deriv.dot(q)
x = np.arange(-(kernel_size//2), kernel_size//2 + 1)
exponent_range = np.arange(2)
q = (x[:, None] ** exponent_range).dot(q)
return q * phi_x
## CATEGORY 3 (This is a bonus!)
def HoughLinesP(image, rho, theta, threshold, lines, minLineLength, maxLineGap):
return cv2.HoughLinesP(image, rho, theta, threshold, lines, minLineLength, maxLineGap)
# X = np.random.rand(80, 160, 3)
X = np.array([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 255, 255, 255, 7],
[5, 0, 23, 3, 6],
[1, 2, 3, 4, 8],
])
X = np.tile(X[..., np.newaxis], (10, 10, 3))
X[:,:,1] = X[:,:,0] * 0.5
X[:,:,2] = X[:,:,0] * 0.3
print(X.shape)
bob = Canny(X.astype(np.uint8), 75, 200)
print(bob)
| return np.concatenate(np.dstack(list_of_indices)) | identifier_body |
sw_06_cv_functions.py | import cv2
import numpy as np
from numpy.lib.stride_tricks import as_strided
import traceback
import warnings
import numpy
# import rospy
## Software Exercise 6: Choose your category (1 or 2) and replace the cv2 code by your own!
## CATEGORY 1
def inRange(hsv_image, low_range, high_range):
return cv2.inRange(hsv_image, low_range, high_range)
def bitwise_or(bitwise1, bitwise2):
return cv2.bitwise_or(bitwise1, bitwise2)
def bitwise_and(bitwise1, bitwise2):
return cv2.bitwise_and(bitwise1, bitwise2)
def getStructuringElement(shape, size):
return cv2.getStructuringElement(shape, size)
def dilate(bitwise, kernel):
return cv2.dilate(bitwise, kernel)
## CATEGORY 2
def Canny(image, threshold1, threshold2, apertureSize=3):
if apertureSize != 3:
warnings.warn(UserWarning("Using apertureSize of 3, even though a different value was passed."))
apertureSize = 3
try:
dx, dy, edge_gradients = get_image_gradients(image)
gradient_directions = snap_angles(np.arctan2(dy, dx))
if len(image.shape) == 3 and image.shape[2] == 3:
# convert image to grayscale if it isn't already.
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
filtered_image = non_maximum_suppression(image, edge_gradients, gradient_directions)
result = hysteresis_thresholding(filtered_image, edge_gradients, threshold1, threshold2)
expected = cv2.Canny(image, threshold1, threshold2, apertureSize=3)
# print(result.shape, expected.shape)
# print(set(result.flat), set(expected.flat))
# print(np.mean(result), np.mean(expected))
# print(np.count_nonzero(result), np.count_nonzero(expected))
# print(np.median(result), np.median(expected))
# print(np.max(result), np.max(expected))
return expected
return result
except Exception as e:
traceback.print_exc()
def get_image_gradients(image):
num_channels = image.shape[-1] if len(image.shape) == 3 else 1
if num_channels == 1:
image = image[:,:, np.newaxis]
dxs = np.zeros_like(image)
dys = np.zeros_like(image)
image = image.astype(float)
for channel in range(num_channels):
image[..., channel] = gaussian_blurring(image[..., channel], std=1, kernel_size=5)
dx, dy = image_derivatives(image[..., channel])
dxs[..., channel] = dx
dys[..., channel] = dy
## TODO: comment this out, just testing to check if the sobel operation is the issue
# sobel_x = cv2.Sobel(image, cv2.CV_8U, 1, 0, scale=1, ksize=3)
# sobel_y = cv2.Sobel(image, cv2.CV_8U, 0, 1, scale=1, ksize=3)
# dxs, dys = sobel_x, sobel_y
edge_gradients = np.sqrt(dxs ** 2 + dys ** 2)
# We use the grad magnitude and dx and dy's from the channel with the highest gradient.
max_grad_indices = np.argmax(edge_gradients, axis=-1)
edge_gradients = np.max(edge_gradients, axis=-1)
mask = np.zeros_like(image, dtype=bool)
mask[max_grad_indices] = True
dxs &= mask
dx = np.sum(dxs, axis=-1)
dys &= mask
dy = np.sum(dys, axis=-1)
return dx, dy, edge_gradients
def hysteresis_thresholding(image, image_gradients, min_val, max_val):
"""
Perform hysteresis thresholding using some bitwise magic.
"""
print("BEFORE HYSTERISIS THRESHOLDING:", image)
print("gradients:", image_gradients)
largest_gradient_value = np.max(image_gradients)
while largest_gradient_value < max_val:
print("Largest gradient value:", largest_gradient_value)
warnings.warn(UserWarning("Image has no edge gradients above upper threshold, increasing all gradients values!"))
# return np.zeros_like(image)
image_gradients *= 1.5
largest_gradient_value = np.max(image_gradients)
# print("Largest gradient value:", largest_gradient_value)
# the set of all 'strong' indices.
strong_indices = indices_where(image_gradients >= max_val)
off_indices = indices_where(image_gradients < min_val)
weak_indices = indices_where((min_val <= image_gradients) & (image_gradients < max_val))
image_height = image.shape[0]
image_width = image.shape[1]
# get the neighbours of all strong edges.
# convert their neighbours with weak edges to strong edges.
to_explore = np.zeros_like(image_gradients, dtype=bool)
to_explore[index_with(strong_indices)] = True
explored = np.zeros_like(image_gradients, dtype=bool)
strong = np.zeros_like(image_gradients, dtype=bool)
strong[index_with(strong_indices)] = True
# print("strong:", strong)
weak = np.zeros_like(image_gradients, dtype=bool)
weak[index_with(weak_indices)] = True
unexplored_indices = aggregate(np.nonzero(to_explore))
# print("unexplored (initial):", [str(v) for v in unexplored])
# print("weak indices (initial):", [str(v) for v in weak_indices])
# print("off indices (initial):", [str(v) for v in off_indices])
already_explored = np.zeros_like(to_explore)
while len(unexplored_indices) > 0:
# print("exploring indices ", [str(v) for v in indices])
# print(indices)
neighbours = neighbourhood(unexplored_indices, image_width, image_height)
is_neighbour = np.zeros_like(weak)
is_neighbour[index_with(neighbours)] = True
is_weak_neighbour = is_neighbour & weak
weak_neighbours = aggregate(np.nonzero(is_weak_neighbour))
# weak_neighbours = common_rows_between(neighbours, weak_indices)
# print("The neighbours of (", ",".join(str(pixel) for pixel in indices), ") are ", neighbours)
# print("weak neighbours:", [str(v) for v in weak_neighbours])
strong[index_with(weak_neighbours)] = True
weak[index_with(weak_neighbours)] = False
# mark that we need to explore these:
already_explored[index_with(unexplored_indices)] = True
# explore the indices of the weak neighbours, if they haven't been explored already.
to_explore[index_with(weak_neighbours)] = True
# do not re-explore already explored indices.
to_explore &= ~already_explored
unexplored_indices = aggregate(np.nonzero(to_explore))
out = np.zeros_like(image_gradients)
out[~strong] = 0
out[strong] = 255
print("AFTER HYSTERISIS THRESHOLDING:", out)
return out
def | (list_of_indices):
return np.concatenate(np.dstack(list_of_indices))
def indices_where(condition):
return np.concatenate(np.dstack(np.where(condition)))
def index_with(list_of_indices):
return list_of_indices[:, 0], list_of_indices[:, 1]
def neighbourhood(index, image_width, image_height):
"""Returns the coordinates of the neighbours of a given coordinate or list of coordinates.
Arguments:
index {np.ndarray} -- either a list of coordinates (as an ndarray) or a coordinate itself, in the form (i, j)
NOTE: the pixels neighbours are clipped of (image_height-1, )
Returns:
np.ndarray -- ndarray of shape [?, 2], which contains the indices of the neighbouring pixels
"""
neighbourhoods = np.concatenate(np.dstack((np.indices([3,3]) - 1)))
if len(index.shape) == 2:
neighbourhoods = neighbourhoods[:, np.newaxis, :]
neighbours_and_itself = index + neighbourhoods
keep = np.ones(9, dtype=bool)
keep[4] = False # drop the point itself, but keep the neighbours.
neighbours = neighbours_and_itself[keep]
if len(index.shape) == 2:
neighbours = np.stack(neighbours, axis=1)
mask = np.ones_like(neighbours, dtype=bool)
# remove all neighbours that have either a negative value in them
negative = np.where(neighbours < 0)
mask[negative] = False
# or a value equal to image_height in x
greater_than_image_height = np.where(neighbours[..., 0] >= image_height)
mask[greater_than_image_height] = False
# or image_width in z
greater_than_image_width = np.where(neighbours[..., 1] >= image_height)
mask[greater_than_image_width] = False
# or that correspond to an index in 'index'
tiled = np.expand_dims(index, 1)
tiled = np.tile(tiled, (1, neighbours.shape[1], 1))
equal_to_index = np.equal(neighbours, tiled)
equal_to_index = np.all(equal_to_index, axis=-1)
mask[equal_to_index] = False
mask = np.all(mask, axis=-1)
# print(mask)
# for i, (m, n) in enumerate(zip(mask, neighbours)):
# if len(index.shape) == 2:
# for keep, (i, j) in zip(m, n):
# print("point", i, j, "is good:", keep)
# else:
# keep = m
# i, j = n
# print("point", i, j, "is good:", keep)
neighbours = neighbours[mask]
# get rid of duplicates:
neighbours = np.unique(neighbours, axis=0)
return neighbours
# # print(image[row, col])
# min_x = max(i-1, 0)
# max_x = min(i+1, image_w-1)
# min_y = max(j-1, 0)
# max_y = min(j+1, image_h-1)
# indices = set(
# (x, y)
# for x in range(min_x, max_x + 1)
# for y in range(min_y, max_y + 1)
# )
# print(indices)
# indices.discard((i, j))
# return indices
# # return np.array(indices)
def common_rows_between(array_1, array_2):
"""TAKEN FROM https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays
Arguments:
array_1 {np.ndarray} -- a 2d array
array_2 {np.ndarray} -- another 2d array
Returns:
np.ndarray -- a 2d array containing the common rows in both array_1 and array_2.
"""
nrows, ncols = array_1.shape
dtype={
'names': ['f{}'.format(i) for i in range(ncols)],
'formats': ncols * [array_1.dtype]
}
C = np.intersect1d(array_1.view(dtype), array_2.view(dtype))
# This last bit is optional if you're okay with "C" being a structured array...
C = C.view(array_1.dtype).reshape(-1, ncols)
return C
def non_maximum_suppression(image, image_gradients, gradient_directions):
"""Non-maximum suppression
To be honest, I'm very proud of this piece of code. No for-loops were needed.
Arguments:
image {[type]} -- the image to preform non-maximum suppresion on.
gradient_directions {[type]} -- the gradient directions
"""
print("Before non-maximum suppression:", image)
# Get where to check depending on the "direction"
direction_offset_x = np.round(np.cos(gradient_directions)).astype(int)
direction_offset_y = np.round(np.sin(gradient_directions)).astype(int)
direction_offset = np.dstack((direction_offset_x, direction_offset_y))
# the (i, j) indices of all points in the image.
row, col = np.indices(image.shape)
# in order not to cause any indexing errors, we create a
# padded version of the image with the edge values duplicated.
# a pixel at (row, col) in the image is located at (row+1, col+1) in the padded image.
image_ = np.pad(image, 1, mode="edge")
row_, col_ = row + 1, col + 1
# get the image pixels before and after each pixel in the image.
pixel_middle = image[row, col]
pixel_forward = image_[row_ + direction_offset_x, col_ + direction_offset_y]
pixel_backward = image_[row_ - direction_offset_x, col_ - direction_offset_y]
higher_than_forward = pixel_middle > pixel_forward
higher_than_backward = pixel_middle > pixel_backward
is_local_maximum = higher_than_backward & higher_than_forward
out = np.copy(image)
out[~is_local_maximum] = 0
print("AFTER non-maximum suppression: ", out)
return out
def snap_angles(angles):
"""Snaps a given set of angles to one of the horizontal, vertical, or one of the two diagonal orientations.
Arguments:
angles -- an array of anges in radians
"""
pi_over_four = np.pi / 4
return np.round(angles / pi_over_four) * pi_over_four
def image_derivatives(image):
""" Computes the Sobel X and Y operators for this image.
Loosely based on https://en.wikipedia.org/wiki/Sobel_operator
Arguments:
image {[type]} -- [description]
Returns:
[type] -- [description]
"""
sobel_sign = np.array([[-1, 0, 1]])
sobel_mag = np.array([[1, 2, 1]])
temp1 = conv2d(image, sobel_sign)
image_dx = conv2d(temp1, sobel_mag.T)
temp2 = conv2d(image, sobel_mag)
image_dy = conv2d(temp2, -sobel_sign.T)
return image_dx, image_dy
# save these for comparison
image_dx_1, image_dy_1 = image_dx, image_dy
# Slower alternative (from OpenCV docs):
sobel_x = np.array([
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1],
])
image_dx = conv2d(image, sobel_x)
image_dy = conv2d(image, -sobel_x.T)
assert np.all(np.isclose(image_dy, image_dy_1))
assert np.all(np.isclose(image_dx, image_dx_1))
return image_dx, image_dy
def conv2d(x, kernel, stride=1, padding="auto", padding_mode="constant"):
"""
TAKEN AND ADAPTED FROM https://stackoverflow.com/questions/54962004/implement-max-mean-poolingwith-stride-with-numpy
ALSO INSPIRED FROM https://cs231n.github.io/convolutional-networks/
2D Pooling
Parameters:
A: input 2D array
kernel: int, the size of the window
stride: int, the stride of the window
padding: int or string, implicit zero paddings on both sides of the input
"""
# Padding
assert len(kernel.shape) == 2, "kernel should be 2d."
assert kernel.shape[0] % 2 == 1 and kernel.shape[1] % 2 == 1, "only odd-sized kernels are allowed"
kernel_size = kernel.shape[0]
if padding == "auto":
padding = np.array(kernel.shape) // 2
x = np.pad(x, padding, mode=padding_mode)
# Window view of X
output_shape = ((x.shape[0] - kernel.shape[0])//stride + 1,
(x.shape[1] - kernel.shape[1])//stride + 1)
x_w = as_strided(
x,
shape=output_shape + kernel.shape,
strides = (
stride*x.strides[0],
stride*x.strides[1]
) + x.strides
)
# ADAPTATION BELOW:
# patches is [#patches, k, k]
patches = x_w.reshape(-1, *kernel.shape)
flattened_kernel = kernel.flat
flattened_patches = patches.reshape([patches.shape[0], -1])
return np.dot(flattened_patches, flattened_kernel).reshape(output_shape)
def separable_conv2d(x, kernel_1d, stride=1, padding="auto", padding_mode="edge"):
assert len(kernel_1d.shape) == 1, kernel_1d
k = kernel_1d.shape[0]
k1 = kernel_1d.reshape([1, k])
result_1 = conv2d(x, k1, stride, padding, padding_mode)
k2 = k1.T
result_2 = conv2d(result_1, k2, stride, padding, padding_mode)
return result_2
def gaussian_kernel_1d(std=1, kernel_size=5):
x = np.arange(-(kernel_size//2), (kernel_size//2)+1, dtype=float)
g = np.exp(- (x**2 / (2 * std**2))) / (np.sqrt(2 * np.pi) * std)
# normalize the sum to 1
g = g / np.sum(g)
return g
def gaussian_blurring(image, std=1, kernel_size=5):
# # print(kernel)
# kernel = np.expand_dims(kernel, axis=0)
# kernel = kernel.T @ kernel
# kernel /= np.sum(kernel)
# print("BEFORE GAUSSIAN BLURRING:\n", image)
kernel = gaussian_kernel_1d(std, kernel_size)
image1 = separable_conv2d(image, kernel, padding_mode="constant")
# print("AFTER GAUSSIAN BLURRING1:\n", image1)
return image1
# slower alternative from openCV documentation:
kernel = 1 / 159 * np.array([
[2, 4, 5, 4, 2],
[4, 9, 12, 9, 4],
[5, 12, 15, 12, 5],
[4, 9, 12, 9, 4],
[2, 4, 5, 4, 2]
])
image2 = conv2d(image, kernel)
print("AFTER GAUSSIAN BLURRING2:\n", image2)
return image2
def gaussian_derivative_filtering(image, std=1, kernel_size=5):
kernel = gaussian_derivative_1d(std, kernel_size)
return separable_conv2d(image, kernel)
def gaussian_derivative_1d(sigma, kernel_size):
"""
ADAPTED FROM https://github.com/scipy/scipy/blob/5681835ec51b728fa0ea6237d46aa8032b9e1400/scipy/ndimage/filters.py#L136
Computes a 1D Gaussian derivative kernel's
"""
# #0th order kernel.
phi_x = gaussian_kernel_1d(sigma, kernel_size)
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
# p'(x) = -1 / sigma ** 2
# Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
# coefficients of q(x)
q = np.zeros(2)
q[0] = 1
D = np.diag([1], 1) # D @ q(x) = q'(x)
sigma2 = sigma * sigma
P = np.diag(np.ones(1)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
Q_deriv = D + P
q = Q_deriv.dot(q)
x = np.arange(-(kernel_size//2), kernel_size//2 + 1)
exponent_range = np.arange(2)
q = (x[:, None] ** exponent_range).dot(q)
return q * phi_x
## CATEGORY 3 (This is a bonus!)
def HoughLinesP(image, rho, theta, threshold, lines, minLineLength, maxLineGap):
return cv2.HoughLinesP(image, rho, theta, threshold, lines, minLineLength, maxLineGap)
# X = np.random.rand(80, 160, 3)
X = np.array([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 255, 255, 255, 7],
[5, 0, 23, 3, 6],
[1, 2, 3, 4, 8],
])
X = np.tile(X[..., np.newaxis], (10, 10, 3))
X[:,:,1] = X[:,:,0] * 0.5
X[:,:,2] = X[:,:,0] * 0.3
print(X.shape)
bob = Canny(X.astype(np.uint8), 75, 200)
print(bob)
| aggregate | identifier_name |
sw_06_cv_functions.py | import cv2
import numpy as np
from numpy.lib.stride_tricks import as_strided
import traceback
import warnings
import numpy
# import rospy
## Software Exercise 6: Choose your category (1 or 2) and replace the cv2 code by your own!
## CATEGORY 1
def inRange(hsv_image, low_range, high_range):
return cv2.inRange(hsv_image, low_range, high_range)
def bitwise_or(bitwise1, bitwise2):
return cv2.bitwise_or(bitwise1, bitwise2)
def bitwise_and(bitwise1, bitwise2):
return cv2.bitwise_and(bitwise1, bitwise2)
def getStructuringElement(shape, size):
return cv2.getStructuringElement(shape, size)
def dilate(bitwise, kernel):
return cv2.dilate(bitwise, kernel)
## CATEGORY 2
def Canny(image, threshold1, threshold2, apertureSize=3):
if apertureSize != 3:
warnings.warn(UserWarning("Using apertureSize of 3, even though a different value was passed."))
apertureSize = 3
try:
dx, dy, edge_gradients = get_image_gradients(image)
gradient_directions = snap_angles(np.arctan2(dy, dx))
if len(image.shape) == 3 and image.shape[2] == 3:
# convert image to grayscale if it isn't already.
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
filtered_image = non_maximum_suppression(image, edge_gradients, gradient_directions)
result = hysteresis_thresholding(filtered_image, edge_gradients, threshold1, threshold2)
expected = cv2.Canny(image, threshold1, threshold2, apertureSize=3)
# print(result.shape, expected.shape)
# print(set(result.flat), set(expected.flat))
# print(np.mean(result), np.mean(expected))
# print(np.count_nonzero(result), np.count_nonzero(expected))
# print(np.median(result), np.median(expected))
# print(np.max(result), np.max(expected))
return expected
return result
except Exception as e:
traceback.print_exc()
def get_image_gradients(image):
num_channels = image.shape[-1] if len(image.shape) == 3 else 1
if num_channels == 1:
image = image[:,:, np.newaxis]
dxs = np.zeros_like(image)
dys = np.zeros_like(image)
image = image.astype(float)
for channel in range(num_channels):
image[..., channel] = gaussian_blurring(image[..., channel], std=1, kernel_size=5)
dx, dy = image_derivatives(image[..., channel])
dxs[..., channel] = dx
dys[..., channel] = dy
## TODO: comment this out, just testing to check if the sobel operation is the issue
# sobel_x = cv2.Sobel(image, cv2.CV_8U, 1, 0, scale=1, ksize=3)
# sobel_y = cv2.Sobel(image, cv2.CV_8U, 0, 1, scale=1, ksize=3)
# dxs, dys = sobel_x, sobel_y
edge_gradients = np.sqrt(dxs ** 2 + dys ** 2)
# We use the grad magnitude and dx and dy's from the channel with the highest gradient.
max_grad_indices = np.argmax(edge_gradients, axis=-1)
edge_gradients = np.max(edge_gradients, axis=-1)
mask = np.zeros_like(image, dtype=bool)
mask[max_grad_indices] = True
dxs &= mask
dx = np.sum(dxs, axis=-1)
dys &= mask
dy = np.sum(dys, axis=-1)
return dx, dy, edge_gradients
def hysteresis_thresholding(image, image_gradients, min_val, max_val):
"""
Perform hysteresis thresholding using some bitwise magic.
"""
print("BEFORE HYSTERISIS THRESHOLDING:", image)
print("gradients:", image_gradients)
largest_gradient_value = np.max(image_gradients)
while largest_gradient_value < max_val:
print("Largest gradient value:", largest_gradient_value)
warnings.warn(UserWarning("Image has no edge gradients above upper threshold, increasing all gradients values!"))
# return np.zeros_like(image)
image_gradients *= 1.5
largest_gradient_value = np.max(image_gradients)
# print("Largest gradient value:", largest_gradient_value)
# the set of all 'strong' indices.
strong_indices = indices_where(image_gradients >= max_val)
off_indices = indices_where(image_gradients < min_val)
weak_indices = indices_where((min_val <= image_gradients) & (image_gradients < max_val))
image_height = image.shape[0]
image_width = image.shape[1]
# get the neighbours of all strong edges.
# convert their neighbours with weak edges to strong edges.
to_explore = np.zeros_like(image_gradients, dtype=bool)
to_explore[index_with(strong_indices)] = True
explored = np.zeros_like(image_gradients, dtype=bool)
strong = np.zeros_like(image_gradients, dtype=bool)
strong[index_with(strong_indices)] = True
# print("strong:", strong)
weak = np.zeros_like(image_gradients, dtype=bool)
weak[index_with(weak_indices)] = True
unexplored_indices = aggregate(np.nonzero(to_explore))
# print("unexplored (initial):", [str(v) for v in unexplored])
# print("weak indices (initial):", [str(v) for v in weak_indices])
# print("off indices (initial):", [str(v) for v in off_indices])
already_explored = np.zeros_like(to_explore)
while len(unexplored_indices) > 0:
# print("exploring indices ", [str(v) for v in indices])
# print(indices)
neighbours = neighbourhood(unexplored_indices, image_width, image_height)
is_neighbour = np.zeros_like(weak)
is_neighbour[index_with(neighbours)] = True
is_weak_neighbour = is_neighbour & weak
weak_neighbours = aggregate(np.nonzero(is_weak_neighbour))
# weak_neighbours = common_rows_between(neighbours, weak_indices)
# print("The neighbours of (", ",".join(str(pixel) for pixel in indices), ") are ", neighbours)
# print("weak neighbours:", [str(v) for v in weak_neighbours])
strong[index_with(weak_neighbours)] = True
weak[index_with(weak_neighbours)] = False
# mark that we need to explore these:
already_explored[index_with(unexplored_indices)] = True
# explore the indices of the weak neighbours, if they haven't been explored already.
to_explore[index_with(weak_neighbours)] = True
# do not re-explore already explored indices.
to_explore &= ~already_explored
unexplored_indices = aggregate(np.nonzero(to_explore))
out = np.zeros_like(image_gradients)
out[~strong] = 0
out[strong] = 255
print("AFTER HYSTERISIS THRESHOLDING:", out)
return out
def aggregate(list_of_indices):
return np.concatenate(np.dstack(list_of_indices))
def indices_where(condition):
return np.concatenate(np.dstack(np.where(condition)))
def index_with(list_of_indices):
return list_of_indices[:, 0], list_of_indices[:, 1]
def neighbourhood(index, image_width, image_height):
"""Returns the coordinates of the neighbours of a given coordinate or list of coordinates.
Arguments:
index {np.ndarray} -- either a list of coordinates (as an ndarray) or a coordinate itself, in the form (i, j)
NOTE: the pixels neighbours are clipped of (image_height-1, )
Returns:
np.ndarray -- ndarray of shape [?, 2], which contains the indices of the neighbouring pixels
"""
neighbourhoods = np.concatenate(np.dstack((np.indices([3,3]) - 1)))
if len(index.shape) == 2:
neighbourhoods = neighbourhoods[:, np.newaxis, :]
neighbours_and_itself = index + neighbourhoods
keep = np.ones(9, dtype=bool)
keep[4] = False # drop the point itself, but keep the neighbours.
neighbours = neighbours_and_itself[keep]
if len(index.shape) == 2:
neighbours = np.stack(neighbours, axis=1)
mask = np.ones_like(neighbours, dtype=bool)
# remove all neighbours that have either a negative value in them
negative = np.where(neighbours < 0)
mask[negative] = False
# or a value equal to image_height in x
greater_than_image_height = np.where(neighbours[..., 0] >= image_height)
mask[greater_than_image_height] = False
# or image_width in z
greater_than_image_width = np.where(neighbours[..., 1] >= image_height)
mask[greater_than_image_width] = False
# or that correspond to an index in 'index'
tiled = np.expand_dims(index, 1)
tiled = np.tile(tiled, (1, neighbours.shape[1], 1))
equal_to_index = np.equal(neighbours, tiled)
equal_to_index = np.all(equal_to_index, axis=-1)
mask[equal_to_index] = False
mask = np.all(mask, axis=-1)
# print(mask)
# for i, (m, n) in enumerate(zip(mask, neighbours)):
# if len(index.shape) == 2:
# for keep, (i, j) in zip(m, n):
# print("point", i, j, "is good:", keep)
# else:
# keep = m
# i, j = n
# print("point", i, j, "is good:", keep)
neighbours = neighbours[mask]
# get rid of duplicates:
neighbours = np.unique(neighbours, axis=0)
return neighbours
# # print(image[row, col])
# min_x = max(i-1, 0)
# max_x = min(i+1, image_w-1)
# min_y = max(j-1, 0)
# max_y = min(j+1, image_h-1)
# indices = set(
# (x, y)
# for x in range(min_x, max_x + 1)
# for y in range(min_y, max_y + 1)
# )
# print(indices)
# indices.discard((i, j))
# return indices
# # return np.array(indices)
def common_rows_between(array_1, array_2):
"""TAKEN FROM https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays
Arguments:
array_1 {np.ndarray} -- a 2d array
array_2 {np.ndarray} -- another 2d array
Returns:
np.ndarray -- a 2d array containing the common rows in both array_1 and array_2.
"""
nrows, ncols = array_1.shape
dtype={
'names': ['f{}'.format(i) for i in range(ncols)],
'formats': ncols * [array_1.dtype]
}
C = np.intersect1d(array_1.view(dtype), array_2.view(dtype))
# This last bit is optional if you're okay with "C" being a structured array...
C = C.view(array_1.dtype).reshape(-1, ncols)
return C
def non_maximum_suppression(image, image_gradients, gradient_directions):
"""Non-maximum suppression
To be honest, I'm very proud of this piece of code. No for-loops were needed.
Arguments:
image {[type]} -- the image to preform non-maximum suppresion on.
gradient_directions {[type]} -- the gradient directions
"""
print("Before non-maximum suppression:", image)
# Get where to check depending on the "direction"
direction_offset_x = np.round(np.cos(gradient_directions)).astype(int)
direction_offset_y = np.round(np.sin(gradient_directions)).astype(int)
direction_offset = np.dstack((direction_offset_x, direction_offset_y))
# the (i, j) indices of all points in the image.
row, col = np.indices(image.shape)
# in order not to cause any indexing errors, we create a
# padded version of the image with the edge values duplicated.
# a pixel at (row, col) in the image is located at (row+1, col+1) in the padded image.
image_ = np.pad(image, 1, mode="edge")
row_, col_ = row + 1, col + 1
# get the image pixels before and after each pixel in the image.
pixel_middle = image[row, col]
pixel_forward = image_[row_ + direction_offset_x, col_ + direction_offset_y]
pixel_backward = image_[row_ - direction_offset_x, col_ - direction_offset_y]
higher_than_forward = pixel_middle > pixel_forward
higher_than_backward = pixel_middle > pixel_backward
is_local_maximum = higher_than_backward & higher_than_forward
out = np.copy(image)
out[~is_local_maximum] = 0
print("AFTER non-maximum suppression: ", out)
return out
def snap_angles(angles):
"""Snaps a given set of angles to one of the horizontal, vertical, or one of the two diagonal orientations.
Arguments:
angles -- an array of anges in radians
"""
pi_over_four = np.pi / 4
return np.round(angles / pi_over_four) * pi_over_four
def image_derivatives(image):
""" Computes the Sobel X and Y operators for this image.
Loosely based on https://en.wikipedia.org/wiki/Sobel_operator
Arguments:
image {[type]} -- [description]
Returns:
[type] -- [description]
"""
sobel_sign = np.array([[-1, 0, 1]])
sobel_mag = np.array([[1, 2, 1]])
temp1 = conv2d(image, sobel_sign)
image_dx = conv2d(temp1, sobel_mag.T)
temp2 = conv2d(image, sobel_mag)
image_dy = conv2d(temp2, -sobel_sign.T)
return image_dx, image_dy
# save these for comparison
image_dx_1, image_dy_1 = image_dx, image_dy
# Slower alternative (from OpenCV docs):
sobel_x = np.array([
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1],
])
image_dx = conv2d(image, sobel_x)
image_dy = conv2d(image, -sobel_x.T)
assert np.all(np.isclose(image_dy, image_dy_1))
assert np.all(np.isclose(image_dx, image_dx_1))
return image_dx, image_dy
def conv2d(x, kernel, stride=1, padding="auto", padding_mode="constant"):
"""
TAKEN AND ADAPTED FROM https://stackoverflow.com/questions/54962004/implement-max-mean-poolingwith-stride-with-numpy
ALSO INSPIRED FROM https://cs231n.github.io/convolutional-networks/
2D Pooling
Parameters:
A: input 2D array
kernel: int, the size of the window
stride: int, the stride of the window
padding: int or string, implicit zero paddings on both sides of the input
"""
# Padding
assert len(kernel.shape) == 2, "kernel should be 2d."
assert kernel.shape[0] % 2 == 1 and kernel.shape[1] % 2 == 1, "only odd-sized kernels are allowed"
kernel_size = kernel.shape[0]
if padding == "auto":
|
x = np.pad(x, padding, mode=padding_mode)
# Window view of X
output_shape = ((x.shape[0] - kernel.shape[0])//stride + 1,
(x.shape[1] - kernel.shape[1])//stride + 1)
x_w = as_strided(
x,
shape=output_shape + kernel.shape,
strides = (
stride*x.strides[0],
stride*x.strides[1]
) + x.strides
)
# ADAPTATION BELOW:
# patches is [#patches, k, k]
patches = x_w.reshape(-1, *kernel.shape)
flattened_kernel = kernel.flat
flattened_patches = patches.reshape([patches.shape[0], -1])
return np.dot(flattened_patches, flattened_kernel).reshape(output_shape)
def separable_conv2d(x, kernel_1d, stride=1, padding="auto", padding_mode="edge"):
assert len(kernel_1d.shape) == 1, kernel_1d
k = kernel_1d.shape[0]
k1 = kernel_1d.reshape([1, k])
result_1 = conv2d(x, k1, stride, padding, padding_mode)
k2 = k1.T
result_2 = conv2d(result_1, k2, stride, padding, padding_mode)
return result_2
def gaussian_kernel_1d(std=1, kernel_size=5):
x = np.arange(-(kernel_size//2), (kernel_size//2)+1, dtype=float)
g = np.exp(- (x**2 / (2 * std**2))) / (np.sqrt(2 * np.pi) * std)
# normalize the sum to 1
g = g / np.sum(g)
return g
def gaussian_blurring(image, std=1, kernel_size=5):
# # print(kernel)
# kernel = np.expand_dims(kernel, axis=0)
# kernel = kernel.T @ kernel
# kernel /= np.sum(kernel)
# print("BEFORE GAUSSIAN BLURRING:\n", image)
kernel = gaussian_kernel_1d(std, kernel_size)
image1 = separable_conv2d(image, kernel, padding_mode="constant")
# print("AFTER GAUSSIAN BLURRING1:\n", image1)
return image1
# slower alternative from openCV documentation:
kernel = 1 / 159 * np.array([
[2, 4, 5, 4, 2],
[4, 9, 12, 9, 4],
[5, 12, 15, 12, 5],
[4, 9, 12, 9, 4],
[2, 4, 5, 4, 2]
])
image2 = conv2d(image, kernel)
print("AFTER GAUSSIAN BLURRING2:\n", image2)
return image2
def gaussian_derivative_filtering(image, std=1, kernel_size=5):
kernel = gaussian_derivative_1d(std, kernel_size)
return separable_conv2d(image, kernel)
def gaussian_derivative_1d(sigma, kernel_size):
"""
ADAPTED FROM https://github.com/scipy/scipy/blob/5681835ec51b728fa0ea6237d46aa8032b9e1400/scipy/ndimage/filters.py#L136
Computes a 1D Gaussian derivative kernel's
"""
# #0th order kernel.
phi_x = gaussian_kernel_1d(sigma, kernel_size)
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
# p'(x) = -1 / sigma ** 2
# Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
# coefficients of q(x)
q = np.zeros(2)
q[0] = 1
D = np.diag([1], 1) # D @ q(x) = q'(x)
sigma2 = sigma * sigma
P = np.diag(np.ones(1)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
Q_deriv = D + P
q = Q_deriv.dot(q)
x = np.arange(-(kernel_size//2), kernel_size//2 + 1)
exponent_range = np.arange(2)
q = (x[:, None] ** exponent_range).dot(q)
return q * phi_x
## CATEGORY 3 (This is a bonus!)
def HoughLinesP(image, rho, theta, threshold, lines, minLineLength, maxLineGap):
return cv2.HoughLinesP(image, rho, theta, threshold, lines, minLineLength, maxLineGap)
# X = np.random.rand(80, 160, 3)
X = np.array([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 255, 255, 255, 7],
[5, 0, 23, 3, 6],
[1, 2, 3, 4, 8],
])
X = np.tile(X[..., np.newaxis], (10, 10, 3))
X[:,:,1] = X[:,:,0] * 0.5
X[:,:,2] = X[:,:,0] * 0.3
print(X.shape)
bob = Canny(X.astype(np.uint8), 75, 200)
print(bob)
| padding = np.array(kernel.shape) // 2 | conditional_block |
sw_06_cv_functions.py | import cv2
import numpy as np
from numpy.lib.stride_tricks import as_strided
import traceback
import warnings
import numpy
# import rospy
## Software Exercise 6: Choose your category (1 or 2) and replace the cv2 code by your own!
## CATEGORY 1
def inRange(hsv_image, low_range, high_range):
return cv2.inRange(hsv_image, low_range, high_range)
def bitwise_or(bitwise1, bitwise2):
return cv2.bitwise_or(bitwise1, bitwise2)
def bitwise_and(bitwise1, bitwise2):
return cv2.bitwise_and(bitwise1, bitwise2)
def getStructuringElement(shape, size):
return cv2.getStructuringElement(shape, size)
def dilate(bitwise, kernel):
return cv2.dilate(bitwise, kernel)
## CATEGORY 2
def Canny(image, threshold1, threshold2, apertureSize=3):
if apertureSize != 3:
warnings.warn(UserWarning("Using apertureSize of 3, even though a different value was passed."))
apertureSize = 3
try:
dx, dy, edge_gradients = get_image_gradients(image)
gradient_directions = snap_angles(np.arctan2(dy, dx))
if len(image.shape) == 3 and image.shape[2] == 3:
# convert image to grayscale if it isn't already.
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
filtered_image = non_maximum_suppression(image, edge_gradients, gradient_directions)
result = hysteresis_thresholding(filtered_image, edge_gradients, threshold1, threshold2)
expected = cv2.Canny(image, threshold1, threshold2, apertureSize=3)
# print(result.shape, expected.shape)
# print(set(result.flat), set(expected.flat))
# print(np.mean(result), np.mean(expected))
# print(np.count_nonzero(result), np.count_nonzero(expected))
# print(np.median(result), np.median(expected))
# print(np.max(result), np.max(expected))
return expected
return result
except Exception as e:
traceback.print_exc()
def get_image_gradients(image):
num_channels = image.shape[-1] if len(image.shape) == 3 else 1
if num_channels == 1:
image = image[:,:, np.newaxis]
dxs = np.zeros_like(image)
dys = np.zeros_like(image)
image = image.astype(float)
for channel in range(num_channels):
image[..., channel] = gaussian_blurring(image[..., channel], std=1, kernel_size=5)
dx, dy = image_derivatives(image[..., channel])
dxs[..., channel] = dx
dys[..., channel] = dy
## TODO: comment this out, just testing to check if the sobel operation is the issue
# sobel_x = cv2.Sobel(image, cv2.CV_8U, 1, 0, scale=1, ksize=3)
# sobel_y = cv2.Sobel(image, cv2.CV_8U, 0, 1, scale=1, ksize=3)
# dxs, dys = sobel_x, sobel_y
edge_gradients = np.sqrt(dxs ** 2 + dys ** 2)
# We use the grad magnitude and dx and dy's from the channel with the highest gradient.
max_grad_indices = np.argmax(edge_gradients, axis=-1)
edge_gradients = np.max(edge_gradients, axis=-1)
mask = np.zeros_like(image, dtype=bool)
mask[max_grad_indices] = True
dxs &= mask
dx = np.sum(dxs, axis=-1)
dys &= mask
dy = np.sum(dys, axis=-1)
return dx, dy, edge_gradients
def hysteresis_thresholding(image, image_gradients, min_val, max_val):
"""
Perform hysteresis thresholding using some bitwise magic.
"""
print("BEFORE HYSTERISIS THRESHOLDING:", image)
print("gradients:", image_gradients)
largest_gradient_value = np.max(image_gradients)
while largest_gradient_value < max_val:
print("Largest gradient value:", largest_gradient_value)
warnings.warn(UserWarning("Image has no edge gradients above upper threshold, increasing all gradients values!"))
# return np.zeros_like(image)
image_gradients *= 1.5
largest_gradient_value = np.max(image_gradients)
# print("Largest gradient value:", largest_gradient_value)
# the set of all 'strong' indices.
strong_indices = indices_where(image_gradients >= max_val)
off_indices = indices_where(image_gradients < min_val)
weak_indices = indices_where((min_val <= image_gradients) & (image_gradients < max_val))
image_height = image.shape[0]
image_width = image.shape[1]
# get the neighbours of all strong edges.
# convert their neighbours with weak edges to strong edges.
to_explore = np.zeros_like(image_gradients, dtype=bool)
to_explore[index_with(strong_indices)] = True
explored = np.zeros_like(image_gradients, dtype=bool)
strong = np.zeros_like(image_gradients, dtype=bool)
strong[index_with(strong_indices)] = True
# print("strong:", strong)
weak = np.zeros_like(image_gradients, dtype=bool)
weak[index_with(weak_indices)] = True
unexplored_indices = aggregate(np.nonzero(to_explore))
# print("unexplored (initial):", [str(v) for v in unexplored])
# print("weak indices (initial):", [str(v) for v in weak_indices])
# print("off indices (initial):", [str(v) for v in off_indices])
already_explored = np.zeros_like(to_explore)
while len(unexplored_indices) > 0:
# print("exploring indices ", [str(v) for v in indices])
# print(indices)
neighbours = neighbourhood(unexplored_indices, image_width, image_height)
is_neighbour = np.zeros_like(weak)
is_neighbour[index_with(neighbours)] = True
is_weak_neighbour = is_neighbour & weak
weak_neighbours = aggregate(np.nonzero(is_weak_neighbour))
# weak_neighbours = common_rows_between(neighbours, weak_indices)
# print("The neighbours of (", ",".join(str(pixel) for pixel in indices), ") are ", neighbours)
# print("weak neighbours:", [str(v) for v in weak_neighbours])
strong[index_with(weak_neighbours)] = True
weak[index_with(weak_neighbours)] = False
# mark that we need to explore these:
already_explored[index_with(unexplored_indices)] = True
# explore the indices of the weak neighbours, if they haven't been explored already.
to_explore[index_with(weak_neighbours)] = True
# do not re-explore already explored indices.
to_explore &= ~already_explored
unexplored_indices = aggregate(np.nonzero(to_explore))
out = np.zeros_like(image_gradients)
out[~strong] = 0
out[strong] = 255
print("AFTER HYSTERISIS THRESHOLDING:", out)
return out
def aggregate(list_of_indices):
return np.concatenate(np.dstack(list_of_indices))
def indices_where(condition):
return np.concatenate(np.dstack(np.where(condition)))
def index_with(list_of_indices):
return list_of_indices[:, 0], list_of_indices[:, 1]
def neighbourhood(index, image_width, image_height):
"""Returns the coordinates of the neighbours of a given coordinate or list of coordinates.
Arguments:
index {np.ndarray} -- either a list of coordinates (as an ndarray) or a coordinate itself, in the form (i, j)
| NOTE: the pixels neighbours are clipped of (image_height-1, )
Returns:
np.ndarray -- ndarray of shape [?, 2], which contains the indices of the neighbouring pixels
"""
neighbourhoods = np.concatenate(np.dstack((np.indices([3,3]) - 1)))
if len(index.shape) == 2:
neighbourhoods = neighbourhoods[:, np.newaxis, :]
neighbours_and_itself = index + neighbourhoods
keep = np.ones(9, dtype=bool)
keep[4] = False # drop the point itself, but keep the neighbours.
neighbours = neighbours_and_itself[keep]
if len(index.shape) == 2:
neighbours = np.stack(neighbours, axis=1)
mask = np.ones_like(neighbours, dtype=bool)
# remove all neighbours that have either a negative value in them
negative = np.where(neighbours < 0)
mask[negative] = False
# or a value equal to image_height in x
greater_than_image_height = np.where(neighbours[..., 0] >= image_height)
mask[greater_than_image_height] = False
# or image_width in z
greater_than_image_width = np.where(neighbours[..., 1] >= image_height)
mask[greater_than_image_width] = False
# or that correspond to an index in 'index'
tiled = np.expand_dims(index, 1)
tiled = np.tile(tiled, (1, neighbours.shape[1], 1))
equal_to_index = np.equal(neighbours, tiled)
equal_to_index = np.all(equal_to_index, axis=-1)
mask[equal_to_index] = False
mask = np.all(mask, axis=-1)
# print(mask)
# for i, (m, n) in enumerate(zip(mask, neighbours)):
# if len(index.shape) == 2:
# for keep, (i, j) in zip(m, n):
# print("point", i, j, "is good:", keep)
# else:
# keep = m
# i, j = n
# print("point", i, j, "is good:", keep)
neighbours = neighbours[mask]
# get rid of duplicates:
neighbours = np.unique(neighbours, axis=0)
return neighbours
# # print(image[row, col])
# min_x = max(i-1, 0)
# max_x = min(i+1, image_w-1)
# min_y = max(j-1, 0)
# max_y = min(j+1, image_h-1)
# indices = set(
# (x, y)
# for x in range(min_x, max_x + 1)
# for y in range(min_y, max_y + 1)
# )
# print(indices)
# indices.discard((i, j))
# return indices
# # return np.array(indices)
def common_rows_between(array_1, array_2):
"""TAKEN FROM https://stackoverflow.com/questions/8317022/get-intersecting-rows-across-two-2d-numpy-arrays
Arguments:
array_1 {np.ndarray} -- a 2d array
array_2 {np.ndarray} -- another 2d array
Returns:
np.ndarray -- a 2d array containing the common rows in both array_1 and array_2.
"""
nrows, ncols = array_1.shape
dtype={
'names': ['f{}'.format(i) for i in range(ncols)],
'formats': ncols * [array_1.dtype]
}
C = np.intersect1d(array_1.view(dtype), array_2.view(dtype))
# This last bit is optional if you're okay with "C" being a structured array...
C = C.view(array_1.dtype).reshape(-1, ncols)
return C
def non_maximum_suppression(image, image_gradients, gradient_directions):
"""Non-maximum suppression
To be honest, I'm very proud of this piece of code. No for-loops were needed.
Arguments:
image {[type]} -- the image to preform non-maximum suppresion on.
gradient_directions {[type]} -- the gradient directions
"""
print("Before non-maximum suppression:", image)
# Get where to check depending on the "direction"
direction_offset_x = np.round(np.cos(gradient_directions)).astype(int)
direction_offset_y = np.round(np.sin(gradient_directions)).astype(int)
direction_offset = np.dstack((direction_offset_x, direction_offset_y))
# the (i, j) indices of all points in the image.
row, col = np.indices(image.shape)
# in order not to cause any indexing errors, we create a
# padded version of the image with the edge values duplicated.
# a pixel at (row, col) in the image is located at (row+1, col+1) in the padded image.
image_ = np.pad(image, 1, mode="edge")
row_, col_ = row + 1, col + 1
# get the image pixels before and after each pixel in the image.
pixel_middle = image[row, col]
pixel_forward = image_[row_ + direction_offset_x, col_ + direction_offset_y]
pixel_backward = image_[row_ - direction_offset_x, col_ - direction_offset_y]
higher_than_forward = pixel_middle > pixel_forward
higher_than_backward = pixel_middle > pixel_backward
is_local_maximum = higher_than_backward & higher_than_forward
out = np.copy(image)
out[~is_local_maximum] = 0
print("AFTER non-maximum suppression: ", out)
return out
def snap_angles(angles):
"""Snaps a given set of angles to one of the horizontal, vertical, or one of the two diagonal orientations.
Arguments:
angles -- an array of anges in radians
"""
pi_over_four = np.pi / 4
return np.round(angles / pi_over_four) * pi_over_four
def image_derivatives(image):
""" Computes the Sobel X and Y operators for this image.
Loosely based on https://en.wikipedia.org/wiki/Sobel_operator
Arguments:
image {[type]} -- [description]
Returns:
[type] -- [description]
"""
sobel_sign = np.array([[-1, 0, 1]])
sobel_mag = np.array([[1, 2, 1]])
temp1 = conv2d(image, sobel_sign)
image_dx = conv2d(temp1, sobel_mag.T)
temp2 = conv2d(image, sobel_mag)
image_dy = conv2d(temp2, -sobel_sign.T)
return image_dx, image_dy
# save these for comparison
image_dx_1, image_dy_1 = image_dx, image_dy
# Slower alternative (from OpenCV docs):
sobel_x = np.array([
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1],
])
image_dx = conv2d(image, sobel_x)
image_dy = conv2d(image, -sobel_x.T)
assert np.all(np.isclose(image_dy, image_dy_1))
assert np.all(np.isclose(image_dx, image_dx_1))
return image_dx, image_dy
def conv2d(x, kernel, stride=1, padding="auto", padding_mode="constant"):
"""
TAKEN AND ADAPTED FROM https://stackoverflow.com/questions/54962004/implement-max-mean-poolingwith-stride-with-numpy
ALSO INSPIRED FROM https://cs231n.github.io/convolutional-networks/
2D Pooling
Parameters:
A: input 2D array
kernel: int, the size of the window
stride: int, the stride of the window
padding: int or string, implicit zero paddings on both sides of the input
"""
# Padding
assert len(kernel.shape) == 2, "kernel should be 2d."
assert kernel.shape[0] % 2 == 1 and kernel.shape[1] % 2 == 1, "only odd-sized kernels are allowed"
kernel_size = kernel.shape[0]
if padding == "auto":
padding = np.array(kernel.shape) // 2
x = np.pad(x, padding, mode=padding_mode)
# Window view of X
output_shape = ((x.shape[0] - kernel.shape[0])//stride + 1,
(x.shape[1] - kernel.shape[1])//stride + 1)
x_w = as_strided(
x,
shape=output_shape + kernel.shape,
strides = (
stride*x.strides[0],
stride*x.strides[1]
) + x.strides
)
# ADAPTATION BELOW:
# patches is [#patches, k, k]
patches = x_w.reshape(-1, *kernel.shape)
flattened_kernel = kernel.flat
flattened_patches = patches.reshape([patches.shape[0], -1])
return np.dot(flattened_patches, flattened_kernel).reshape(output_shape)
def separable_conv2d(x, kernel_1d, stride=1, padding="auto", padding_mode="edge"):
assert len(kernel_1d.shape) == 1, kernel_1d
k = kernel_1d.shape[0]
k1 = kernel_1d.reshape([1, k])
result_1 = conv2d(x, k1, stride, padding, padding_mode)
k2 = k1.T
result_2 = conv2d(result_1, k2, stride, padding, padding_mode)
return result_2
def gaussian_kernel_1d(std=1, kernel_size=5):
x = np.arange(-(kernel_size//2), (kernel_size//2)+1, dtype=float)
g = np.exp(- (x**2 / (2 * std**2))) / (np.sqrt(2 * np.pi) * std)
# normalize the sum to 1
g = g / np.sum(g)
return g
def gaussian_blurring(image, std=1, kernel_size=5):
# # print(kernel)
# kernel = np.expand_dims(kernel, axis=0)
# kernel = kernel.T @ kernel
# kernel /= np.sum(kernel)
# print("BEFORE GAUSSIAN BLURRING:\n", image)
kernel = gaussian_kernel_1d(std, kernel_size)
image1 = separable_conv2d(image, kernel, padding_mode="constant")
# print("AFTER GAUSSIAN BLURRING1:\n", image1)
return image1
# slower alternative from openCV documentation:
kernel = 1 / 159 * np.array([
[2, 4, 5, 4, 2],
[4, 9, 12, 9, 4],
[5, 12, 15, 12, 5],
[4, 9, 12, 9, 4],
[2, 4, 5, 4, 2]
])
image2 = conv2d(image, kernel)
print("AFTER GAUSSIAN BLURRING2:\n", image2)
return image2
def gaussian_derivative_filtering(image, std=1, kernel_size=5):
kernel = gaussian_derivative_1d(std, kernel_size)
return separable_conv2d(image, kernel)
def gaussian_derivative_1d(sigma, kernel_size):
"""
ADAPTED FROM https://github.com/scipy/scipy/blob/5681835ec51b728fa0ea6237d46aa8032b9e1400/scipy/ndimage/filters.py#L136
Computes a 1D Gaussian derivative kernel's
"""
# #0th order kernel.
phi_x = gaussian_kernel_1d(sigma, kernel_size)
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
# p'(x) = -1 / sigma ** 2
# Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
# coefficients of q(x)
q = np.zeros(2)
q[0] = 1
D = np.diag([1], 1) # D @ q(x) = q'(x)
sigma2 = sigma * sigma
P = np.diag(np.ones(1)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
Q_deriv = D + P
q = Q_deriv.dot(q)
x = np.arange(-(kernel_size//2), kernel_size//2 + 1)
exponent_range = np.arange(2)
q = (x[:, None] ** exponent_range).dot(q)
return q * phi_x
## CATEGORY 3 (This is a bonus!)
def HoughLinesP(image, rho, theta, threshold, lines, minLineLength, maxLineGap):
return cv2.HoughLinesP(image, rho, theta, threshold, lines, minLineLength, maxLineGap)
# X = np.random.rand(80, 160, 3)
X = np.array([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 255, 255, 255, 7],
[5, 0, 23, 3, 6],
[1, 2, 3, 4, 8],
])
X = np.tile(X[..., np.newaxis], (10, 10, 3))
X[:,:,1] = X[:,:,0] * 0.5
X[:,:,2] = X[:,:,0] * 0.3
print(X.shape)
bob = Canny(X.astype(np.uint8), 75, 200)
print(bob) | random_line_split | |
apitest.go | package apitest
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"github.com/PaesslerAG/jsonpath"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"net/http/httputil"
"strings"
"testing"
)
// APITest is the top level struct holding the test spec
type APITest struct {
handler http.Handler
request *Request
response *Response
observer Observe
t *testing.T
}
// New creates a new api test with the given http.Handler
func New(handler http.Handler) *Request {
apiTest := &APITest{}
request := &Request{apiTest: apiTest}
response := &Response{apiTest: apiTest}
apiTest.request = request
apiTest.response = response
apiTest.handler = handler
return apiTest.request
}
// Observer will be called by with the request and response on completion
type Observe func(*http.Response, *http.Request)
// Request is the user defined request that will be invoked on the handler under test
type Request struct {
method string
url string
body string
query map[string]string
queryCollection map[string][]string
headers map[string]string
cookies map[string]string
basicAuth string
apiTest *APITest
}
type pair struct {
l string
r string
}
var DumpHttp Observe = func(res *http.Response, req *http.Request) {
requestDump, err := httputil.DumpRequest(req, true)
if err == nil {
fmt.Println("--> http request dump\n\n" + string(requestDump))
}
responseDump, err := httputil.DumpResponse(res, true)
if err == nil {
fmt.Println("<-- http response dump:\n\n" + string(responseDump))
}
}
// Observe is a builder method for setting the observer
func (r *Request) Observe(observer Observe) *Request {
r.apiTest.observer = observer
return r
}
// Method is a builder method for setting the http method of the request
func (r *Request) Method(method string) *Request {
r.method = method
return r
}
// URL is a builder method for setting the url of the request
func (r *Request) URL(url string) *Request {
r.url = url
return r
}
// Get is a convenience method for setting the request as http.MethodGet
func (r *Request) Get(url string) *Request {
r.method = http.MethodGet
r.url = url
return r
}
// Post is a convenience method for setting the request as http.MethodPost
func (r *Request) Post(url string) *Request {
r.method = http.MethodPost
r.url = url
return r
}
// Put is a convenience method for setting the request as http.MethodPut
func (r *Request) Put(url string) *Request {
r.method = http.MethodPut
r.url = url
return r
}
// Delete is a convenience method for setting the request as http.MethodDelete
func (r *Request) Delete(url string) *Request {
r.method = http.MethodDelete
r.url = url
return r
}
// Patch is a convenience method for setting the request as http.MethodPatch
func (r *Request) Patch(url string) *Request {
r.method = http.MethodPatch
r.url = url
return r
}
// Body is a builder method to set the request body
func (r *Request) Body(b string) *Request {
r.body = b
return r
}
// Query is a builder method to set the request query parameters.
// This can be used in combination with request.QueryCollection
func (r *Request) Query(q map[string]string) *Request {
r.query = q
return r
}
// Query is a builder method to set the request query parameters
// This can be used in combination with request.Query
func (r *Request) QueryCollection(q map[string][]string) *Request {
r.queryCollection = q
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Headers(h map[string]string) *Request {
r.headers = h
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Cookies(c map[string]string) *Request {
r.cookies = c
return r
}
// BasicAuth is a builder method to sets basic auth on the request.
// The credentials should be provided delimited by a colon, e.g. "username:password"
func (r *Request) BasicAuth(auth string) *Request {
r.basicAuth = auth
return r
}
// Expect marks the request spec as complete and following code will define the expected response
func (r *Request) Expect(t *testing.T) *Response {
r.apiTest.t = t
return r.apiTest.response
}
// Response is the user defined expected response from the application under test
type Response struct {
status int
body string
headers map[string]string
cookies map[string]string
cookiesPresent []string
cookiesNotPresent []string
httpCookies []http.Cookie
jsonPathExpression string
jsonPathAssert func(interface{})
apiTest *APITest
assert Assert
}
// Assert is a user defined custom assertion function
type Assert func(*http.Response, *http.Request) error
// Body is the expected response body
func (r *Response) Body(b string) *Response {
r.body = b
return r
}
// Cookies is the expected response cookies
func (r *Response) Cookies(cookies map[string]string) *Response {
r.cookies = cookies
return r
}
// HttpCookies is the expected response cookies
func (r *Response) HttpCookies(cookies []http.Cookie) *Response {
r.httpCookies = cookies
return r
}
// CookiePresent is used to assert that a cookie is present in the response,
// regardless of its value
func (r *Response) CookiePresent(cookieName string) *Response {
r.cookiesPresent = append(r.cookiesPresent, cookieName)
return r
}
| func (r *Response) CookieNotPresent(cookieName string) *Response {
r.cookiesNotPresent = append(r.cookiesNotPresent, cookieName)
return r
}
// Headers is the expected response headers
func (r *Response) Headers(headers map[string]string) *Response {
r.headers = headers
return r
}
// Status is the expected response http status code
func (r *Response) Status(s int) *Response {
r.status = s
return r
}
// Assert allows the consumer to provide a user defined function containing their own
// custom assertions
func (r *Response) Assert(fn func(*http.Response, *http.Request) error) *Response {
r.assert = fn
return r.apiTest.response
}
// JSONPath provides support for jsonpath expectations as defined by https://goessner.net/articles/JsonPath/
func (r *Response) JSONPath(expression string, assert func(interface{})) *Response {
r.jsonPathExpression = expression
r.jsonPathAssert = assert
return r.apiTest.response
}
// End runs the test and all defined assertions
func (r *Response) End() {
r.apiTest.run()
}
func (a *APITest) run() {
res, req := a.runTest()
if a.observer != nil {
a.observer(res.Result(), req)
}
a.assertResponse(res)
a.assertHeaders(res)
a.assertCookies(res)
a.assertJSONPath(res)
if a.response.assert != nil {
err := a.response.assert(res.Result(), req)
if err != nil {
a.t.Fatal(err.Error())
}
}
}
func (a *APITest) runTest() (*httptest.ResponseRecorder, *http.Request) {
req := a.buildRequestFromTestCase()
res := httptest.NewRecorder()
a.handler.ServeHTTP(res, req)
return res, req
}
func (a *APITest) buildRequestFromTestCase() *http.Request {
req, _ := http.NewRequest(a.request.method, a.request.url, bytes.NewBufferString(a.request.body))
query := req.URL.Query()
if a.request.queryCollection != nil {
for _, param := range buildQueryCollection(a.request.queryCollection) {
query.Add(param.l, param.r)
}
}
if a.request.query != nil {
for k, v := range a.request.query {
query.Add(k, v)
}
}
if len(query) > 0 {
req.URL.RawQuery = query.Encode()
}
for k, v := range a.request.headers {
req.Header.Set(k, v)
}
for k, v := range a.request.cookies {
cookie := &http.Cookie{Name: k, Value: v}
req.AddCookie(cookie)
}
if a.request.basicAuth != "" {
parts := strings.Split(a.request.basicAuth, ":")
req.SetBasicAuth(parts[0], parts[1])
}
return req
}
func buildQueryCollection(params map[string][]string) []pair {
if len(params) == 0 {
return []pair{}
}
var pairs []pair
for k, v := range params {
for _, paramValue := range v {
pairs = append(pairs, pair{l: k, r: paramValue})
}
}
return pairs
}
func (a *APITest) assertResponse(res *httptest.ResponseRecorder) {
if a.response.status != 0 {
assert.Equal(a.t, a.response.status, res.Code)
}
if a.response.body != "" {
if isJSON(a.response.body) {
assert.JSONEq(a.t, a.response.body, res.Body.String())
} else {
assert.Equal(a.t, a.response.body, res.Body.String())
}
}
}
func (a *APITest) assertCookies(response *httptest.ResponseRecorder) {
if a.response.cookies != nil {
for name, value := range a.response.cookies {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == name && cookie.Value == value {
foundCookie = true
}
}
assert.Equal(a.t, true, foundCookie, "Cookie not found - "+name)
}
}
if len(a.response.cookiesPresent) > 0 {
for _, cookieName := range a.response.cookiesPresent {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == cookieName {
foundCookie = true
}
}
assert.True(a.t, foundCookie, "Cookie not found - "+cookieName)
}
}
if len(a.response.cookiesNotPresent) > 0 {
for _, cookieName := range a.response.cookiesNotPresent {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == cookieName {
foundCookie = true
}
}
assert.False(a.t, foundCookie, "Cookie found - "+cookieName)
}
}
if len(a.response.httpCookies) > 0 {
for _, httpCookie := range a.response.httpCookies {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if compareHttpCookies(cookie, &httpCookie) {
foundCookie = true
}
}
assert.True(a.t, foundCookie, "Cookie not found - "+httpCookie.Name)
}
}
}
// only compare a subset of fields for flexibility
func compareHttpCookies(l *http.Cookie, r *http.Cookie) bool {
return l.Name == r.Name &&
l.Value == r.Value &&
l.Domain == r.Domain &&
l.Expires == r.Expires &&
l.MaxAge == r.MaxAge &&
l.Secure == r.Secure &&
l.HttpOnly == r.HttpOnly &&
l.SameSite == r.SameSite
}
func getResponseCookies(response *httptest.ResponseRecorder) []*http.Cookie {
for _, rawCookieString := range response.Result().Header["Set-Cookie"] {
rawRequest := fmt.Sprintf("GET / HTTP/1.0\r\nCookie: %s\r\n\r\n", rawCookieString)
req, err := http.ReadRequest(bufio.NewReader(strings.NewReader(rawRequest)))
if err != nil {
panic("failed to parse response cookies. error: " + err.Error())
}
return req.Cookies()
}
return []*http.Cookie{}
}
func (a *APITest) assertHeaders(res *httptest.ResponseRecorder) {
if a.response.headers != nil {
for k, v := range a.response.headers {
header := res.Header().Get(k)
assert.Equal(a.t, v, header, fmt.Sprintf("'%s' header should be equal", k))
}
}
}
func (a *APITest) assertJSONPath(res *httptest.ResponseRecorder) {
if a.response.jsonPathExpression != "" {
v := interface{}(nil)
err := json.Unmarshal(res.Body.Bytes(), &v)
value, err := jsonpath.Get(a.response.jsonPathExpression, v)
if err != nil {
assert.Nil(a.t, err)
}
a.response.jsonPathAssert(value.(interface{}))
}
}
func isJSON(s string) bool {
var js map[string]interface{}
return json.Unmarshal([]byte(s), &js) == nil
} | // CookieNotPresent is used to assert that a cookie is not present in the response | random_line_split |
apitest.go | package apitest
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"github.com/PaesslerAG/jsonpath"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"net/http/httputil"
"strings"
"testing"
)
// APITest is the top level struct holding the test spec
type APITest struct {
handler http.Handler
request *Request
response *Response
observer Observe
t *testing.T
}
// New creates a new api test with the given http.Handler
func New(handler http.Handler) *Request {
apiTest := &APITest{}
request := &Request{apiTest: apiTest}
response := &Response{apiTest: apiTest}
apiTest.request = request
apiTest.response = response
apiTest.handler = handler
return apiTest.request
}
// Observer will be called by with the request and response on completion
type Observe func(*http.Response, *http.Request)
// Request is the user defined request that will be invoked on the handler under test
type Request struct {
method string
url string
body string
query map[string]string
queryCollection map[string][]string
headers map[string]string
cookies map[string]string
basicAuth string
apiTest *APITest
}
type pair struct {
l string
r string
}
var DumpHttp Observe = func(res *http.Response, req *http.Request) {
requestDump, err := httputil.DumpRequest(req, true)
if err == nil {
fmt.Println("--> http request dump\n\n" + string(requestDump))
}
responseDump, err := httputil.DumpResponse(res, true)
if err == nil {
fmt.Println("<-- http response dump:\n\n" + string(responseDump))
}
}
// Observe is a builder method for setting the observer
func (r *Request) Observe(observer Observe) *Request {
r.apiTest.observer = observer
return r
}
// Method is a builder method for setting the http method of the request
func (r *Request) Method(method string) *Request {
r.method = method
return r
}
// URL is a builder method for setting the url of the request
func (r *Request) URL(url string) *Request {
r.url = url
return r
}
// Get is a convenience method for setting the request as http.MethodGet
func (r *Request) Get(url string) *Request {
r.method = http.MethodGet
r.url = url
return r
}
// Post is a convenience method for setting the request as http.MethodPost
func (r *Request) | (url string) *Request {
r.method = http.MethodPost
r.url = url
return r
}
// Put is a convenience method for setting the request as http.MethodPut
func (r *Request) Put(url string) *Request {
r.method = http.MethodPut
r.url = url
return r
}
// Delete is a convenience method for setting the request as http.MethodDelete
func (r *Request) Delete(url string) *Request {
r.method = http.MethodDelete
r.url = url
return r
}
// Patch is a convenience method for setting the request as http.MethodPatch
func (r *Request) Patch(url string) *Request {
r.method = http.MethodPatch
r.url = url
return r
}
// Body is a builder method to set the request body
func (r *Request) Body(b string) *Request {
r.body = b
return r
}
// Query is a builder method to set the request query parameters.
// This can be used in combination with request.QueryCollection
func (r *Request) Query(q map[string]string) *Request {
r.query = q
return r
}
// Query is a builder method to set the request query parameters
// This can be used in combination with request.Query
func (r *Request) QueryCollection(q map[string][]string) *Request {
r.queryCollection = q
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Headers(h map[string]string) *Request {
r.headers = h
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Cookies(c map[string]string) *Request {
r.cookies = c
return r
}
// BasicAuth is a builder method to sets basic auth on the request.
// The credentials should be provided delimited by a colon, e.g. "username:password"
func (r *Request) BasicAuth(auth string) *Request {
r.basicAuth = auth
return r
}
// Expect marks the request spec as complete and following code will define the expected response
func (r *Request) Expect(t *testing.T) *Response {
r.apiTest.t = t
return r.apiTest.response
}
// Response is the user defined expected response from the application under test
type Response struct {
status int
body string
headers map[string]string
cookies map[string]string
cookiesPresent []string
cookiesNotPresent []string
httpCookies []http.Cookie
jsonPathExpression string
jsonPathAssert func(interface{})
apiTest *APITest
assert Assert
}
// Assert is a user defined custom assertion function
type Assert func(*http.Response, *http.Request) error
// Body is the expected response body
func (r *Response) Body(b string) *Response {
r.body = b
return r
}
// Cookies is the expected response cookies
func (r *Response) Cookies(cookies map[string]string) *Response {
r.cookies = cookies
return r
}
// HttpCookies is the expected response cookies
func (r *Response) HttpCookies(cookies []http.Cookie) *Response {
r.httpCookies = cookies
return r
}
// CookiePresent is used to assert that a cookie is present in the response,
// regardless of its value
func (r *Response) CookiePresent(cookieName string) *Response {
r.cookiesPresent = append(r.cookiesPresent, cookieName)
return r
}
// CookieNotPresent is used to assert that a cookie is not present in the response
func (r *Response) CookieNotPresent(cookieName string) *Response {
r.cookiesNotPresent = append(r.cookiesNotPresent, cookieName)
return r
}
// Headers is the expected response headers
func (r *Response) Headers(headers map[string]string) *Response {
r.headers = headers
return r
}
// Status is the expected response http status code
func (r *Response) Status(s int) *Response {
r.status = s
return r
}
// Assert allows the consumer to provide a user defined function containing their own
// custom assertions
func (r *Response) Assert(fn func(*http.Response, *http.Request) error) *Response {
r.assert = fn
return r.apiTest.response
}
// JSONPath provides support for jsonpath expectations as defined by https://goessner.net/articles/JsonPath/
func (r *Response) JSONPath(expression string, assert func(interface{})) *Response {
r.jsonPathExpression = expression
r.jsonPathAssert = assert
return r.apiTest.response
}
// End runs the test and all defined assertions
func (r *Response) End() {
r.apiTest.run()
}
func (a *APITest) run() {
res, req := a.runTest()
if a.observer != nil {
a.observer(res.Result(), req)
}
a.assertResponse(res)
a.assertHeaders(res)
a.assertCookies(res)
a.assertJSONPath(res)
if a.response.assert != nil {
err := a.response.assert(res.Result(), req)
if err != nil {
a.t.Fatal(err.Error())
}
}
}
func (a *APITest) runTest() (*httptest.ResponseRecorder, *http.Request) {
req := a.buildRequestFromTestCase()
res := httptest.NewRecorder()
a.handler.ServeHTTP(res, req)
return res, req
}
func (a *APITest) buildRequestFromTestCase() *http.Request {
req, _ := http.NewRequest(a.request.method, a.request.url, bytes.NewBufferString(a.request.body))
query := req.URL.Query()
if a.request.queryCollection != nil {
for _, param := range buildQueryCollection(a.request.queryCollection) {
query.Add(param.l, param.r)
}
}
if a.request.query != nil {
for k, v := range a.request.query {
query.Add(k, v)
}
}
if len(query) > 0 {
req.URL.RawQuery = query.Encode()
}
for k, v := range a.request.headers {
req.Header.Set(k, v)
}
for k, v := range a.request.cookies {
cookie := &http.Cookie{Name: k, Value: v}
req.AddCookie(cookie)
}
if a.request.basicAuth != "" {
parts := strings.Split(a.request.basicAuth, ":")
req.SetBasicAuth(parts[0], parts[1])
}
return req
}
func buildQueryCollection(params map[string][]string) []pair {
if len(params) == 0 {
return []pair{}
}
var pairs []pair
for k, v := range params {
for _, paramValue := range v {
pairs = append(pairs, pair{l: k, r: paramValue})
}
}
return pairs
}
func (a *APITest) assertResponse(res *httptest.ResponseRecorder) {
if a.response.status != 0 {
assert.Equal(a.t, a.response.status, res.Code)
}
if a.response.body != "" {
if isJSON(a.response.body) {
assert.JSONEq(a.t, a.response.body, res.Body.String())
} else {
assert.Equal(a.t, a.response.body, res.Body.String())
}
}
}
func (a *APITest) assertCookies(response *httptest.ResponseRecorder) {
if a.response.cookies != nil {
for name, value := range a.response.cookies {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == name && cookie.Value == value {
foundCookie = true
}
}
assert.Equal(a.t, true, foundCookie, "Cookie not found - "+name)
}
}
if len(a.response.cookiesPresent) > 0 {
for _, cookieName := range a.response.cookiesPresent {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == cookieName {
foundCookie = true
}
}
assert.True(a.t, foundCookie, "Cookie not found - "+cookieName)
}
}
if len(a.response.cookiesNotPresent) > 0 {
for _, cookieName := range a.response.cookiesNotPresent {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == cookieName {
foundCookie = true
}
}
assert.False(a.t, foundCookie, "Cookie found - "+cookieName)
}
}
if len(a.response.httpCookies) > 0 {
for _, httpCookie := range a.response.httpCookies {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if compareHttpCookies(cookie, &httpCookie) {
foundCookie = true
}
}
assert.True(a.t, foundCookie, "Cookie not found - "+httpCookie.Name)
}
}
}
// only compare a subset of fields for flexibility
func compareHttpCookies(l *http.Cookie, r *http.Cookie) bool {
return l.Name == r.Name &&
l.Value == r.Value &&
l.Domain == r.Domain &&
l.Expires == r.Expires &&
l.MaxAge == r.MaxAge &&
l.Secure == r.Secure &&
l.HttpOnly == r.HttpOnly &&
l.SameSite == r.SameSite
}
func getResponseCookies(response *httptest.ResponseRecorder) []*http.Cookie {
for _, rawCookieString := range response.Result().Header["Set-Cookie"] {
rawRequest := fmt.Sprintf("GET / HTTP/1.0\r\nCookie: %s\r\n\r\n", rawCookieString)
req, err := http.ReadRequest(bufio.NewReader(strings.NewReader(rawRequest)))
if err != nil {
panic("failed to parse response cookies. error: " + err.Error())
}
return req.Cookies()
}
return []*http.Cookie{}
}
func (a *APITest) assertHeaders(res *httptest.ResponseRecorder) {
if a.response.headers != nil {
for k, v := range a.response.headers {
header := res.Header().Get(k)
assert.Equal(a.t, v, header, fmt.Sprintf("'%s' header should be equal", k))
}
}
}
func (a *APITest) assertJSONPath(res *httptest.ResponseRecorder) {
if a.response.jsonPathExpression != "" {
v := interface{}(nil)
err := json.Unmarshal(res.Body.Bytes(), &v)
value, err := jsonpath.Get(a.response.jsonPathExpression, v)
if err != nil {
assert.Nil(a.t, err)
}
a.response.jsonPathAssert(value.(interface{}))
}
}
func isJSON(s string) bool {
var js map[string]interface{}
return json.Unmarshal([]byte(s), &js) == nil
}
| Post | identifier_name |
apitest.go | package apitest
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"github.com/PaesslerAG/jsonpath"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"net/http/httputil"
"strings"
"testing"
)
// APITest is the top level struct holding the test spec
type APITest struct {
handler http.Handler
request *Request
response *Response
observer Observe
t *testing.T
}
// New creates a new api test with the given http.Handler
func New(handler http.Handler) *Request {
apiTest := &APITest{}
request := &Request{apiTest: apiTest}
response := &Response{apiTest: apiTest}
apiTest.request = request
apiTest.response = response
apiTest.handler = handler
return apiTest.request
}
// Observer will be called by with the request and response on completion
type Observe func(*http.Response, *http.Request)
// Request is the user defined request that will be invoked on the handler under test
type Request struct {
method string
url string
body string
query map[string]string
queryCollection map[string][]string
headers map[string]string
cookies map[string]string
basicAuth string
apiTest *APITest
}
type pair struct {
l string
r string
}
var DumpHttp Observe = func(res *http.Response, req *http.Request) {
requestDump, err := httputil.DumpRequest(req, true)
if err == nil {
fmt.Println("--> http request dump\n\n" + string(requestDump))
}
responseDump, err := httputil.DumpResponse(res, true)
if err == nil {
fmt.Println("<-- http response dump:\n\n" + string(responseDump))
}
}
// Observe is a builder method for setting the observer
func (r *Request) Observe(observer Observe) *Request {
r.apiTest.observer = observer
return r
}
// Method is a builder method for setting the http method of the request
func (r *Request) Method(method string) *Request {
r.method = method
return r
}
// URL is a builder method for setting the url of the request
func (r *Request) URL(url string) *Request {
r.url = url
return r
}
// Get is a convenience method for setting the request as http.MethodGet
func (r *Request) Get(url string) *Request {
r.method = http.MethodGet
r.url = url
return r
}
// Post is a convenience method for setting the request as http.MethodPost
func (r *Request) Post(url string) *Request {
r.method = http.MethodPost
r.url = url
return r
}
// Put is a convenience method for setting the request as http.MethodPut
func (r *Request) Put(url string) *Request {
r.method = http.MethodPut
r.url = url
return r
}
// Delete is a convenience method for setting the request as http.MethodDelete
func (r *Request) Delete(url string) *Request {
r.method = http.MethodDelete
r.url = url
return r
}
// Patch is a convenience method for setting the request as http.MethodPatch
func (r *Request) Patch(url string) *Request {
r.method = http.MethodPatch
r.url = url
return r
}
// Body is a builder method to set the request body
func (r *Request) Body(b string) *Request {
r.body = b
return r
}
// Query is a builder method to set the request query parameters.
// This can be used in combination with request.QueryCollection
func (r *Request) Query(q map[string]string) *Request {
r.query = q
return r
}
// Query is a builder method to set the request query parameters
// This can be used in combination with request.Query
func (r *Request) QueryCollection(q map[string][]string) *Request {
r.queryCollection = q
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Headers(h map[string]string) *Request {
r.headers = h
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Cookies(c map[string]string) *Request {
r.cookies = c
return r
}
// BasicAuth is a builder method to sets basic auth on the request.
// The credentials should be provided delimited by a colon, e.g. "username:password"
func (r *Request) BasicAuth(auth string) *Request {
r.basicAuth = auth
return r
}
// Expect marks the request spec as complete and following code will define the expected response
func (r *Request) Expect(t *testing.T) *Response {
r.apiTest.t = t
return r.apiTest.response
}
// Response is the user defined expected response from the application under test
type Response struct {
status int
body string
headers map[string]string
cookies map[string]string
cookiesPresent []string
cookiesNotPresent []string
httpCookies []http.Cookie
jsonPathExpression string
jsonPathAssert func(interface{})
apiTest *APITest
assert Assert
}
// Assert is a user defined custom assertion function
type Assert func(*http.Response, *http.Request) error
// Body is the expected response body
func (r *Response) Body(b string) *Response {
r.body = b
return r
}
// Cookies is the expected response cookies
func (r *Response) Cookies(cookies map[string]string) *Response {
r.cookies = cookies
return r
}
// HttpCookies is the expected response cookies
func (r *Response) HttpCookies(cookies []http.Cookie) *Response {
r.httpCookies = cookies
return r
}
// CookiePresent is used to assert that a cookie is present in the response,
// regardless of its value
func (r *Response) CookiePresent(cookieName string) *Response {
r.cookiesPresent = append(r.cookiesPresent, cookieName)
return r
}
// CookieNotPresent is used to assert that a cookie is not present in the response
func (r *Response) CookieNotPresent(cookieName string) *Response {
r.cookiesNotPresent = append(r.cookiesNotPresent, cookieName)
return r
}
// Headers is the expected response headers
func (r *Response) Headers(headers map[string]string) *Response {
r.headers = headers
return r
}
// Status is the expected response http status code
func (r *Response) Status(s int) *Response {
r.status = s
return r
}
// Assert allows the consumer to provide a user defined function containing their own
// custom assertions
func (r *Response) Assert(fn func(*http.Response, *http.Request) error) *Response {
r.assert = fn
return r.apiTest.response
}
// JSONPath provides support for jsonpath expectations as defined by https://goessner.net/articles/JsonPath/
func (r *Response) JSONPath(expression string, assert func(interface{})) *Response {
r.jsonPathExpression = expression
r.jsonPathAssert = assert
return r.apiTest.response
}
// End runs the test and all defined assertions
func (r *Response) End() {
r.apiTest.run()
}
func (a *APITest) run() {
res, req := a.runTest()
if a.observer != nil {
a.observer(res.Result(), req)
}
a.assertResponse(res)
a.assertHeaders(res)
a.assertCookies(res)
a.assertJSONPath(res)
if a.response.assert != nil {
err := a.response.assert(res.Result(), req)
if err != nil {
a.t.Fatal(err.Error())
}
}
}
func (a *APITest) runTest() (*httptest.ResponseRecorder, *http.Request) {
req := a.buildRequestFromTestCase()
res := httptest.NewRecorder()
a.handler.ServeHTTP(res, req)
return res, req
}
func (a *APITest) buildRequestFromTestCase() *http.Request {
req, _ := http.NewRequest(a.request.method, a.request.url, bytes.NewBufferString(a.request.body))
query := req.URL.Query()
if a.request.queryCollection != nil {
for _, param := range buildQueryCollection(a.request.queryCollection) {
query.Add(param.l, param.r)
}
}
if a.request.query != nil {
for k, v := range a.request.query {
query.Add(k, v)
}
}
if len(query) > 0 {
req.URL.RawQuery = query.Encode()
}
for k, v := range a.request.headers {
req.Header.Set(k, v)
}
for k, v := range a.request.cookies {
cookie := &http.Cookie{Name: k, Value: v}
req.AddCookie(cookie)
}
if a.request.basicAuth != "" {
parts := strings.Split(a.request.basicAuth, ":")
req.SetBasicAuth(parts[0], parts[1])
}
return req
}
func buildQueryCollection(params map[string][]string) []pair {
if len(params) == 0 {
return []pair{}
}
var pairs []pair
for k, v := range params {
for _, paramValue := range v {
pairs = append(pairs, pair{l: k, r: paramValue})
}
}
return pairs
}
func (a *APITest) assertResponse(res *httptest.ResponseRecorder) {
if a.response.status != 0 {
assert.Equal(a.t, a.response.status, res.Code)
}
if a.response.body != "" {
if isJSON(a.response.body) {
assert.JSONEq(a.t, a.response.body, res.Body.String())
} else {
assert.Equal(a.t, a.response.body, res.Body.String())
}
}
}
func (a *APITest) assertCookies(response *httptest.ResponseRecorder) {
if a.response.cookies != nil {
for name, value := range a.response.cookies {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == name && cookie.Value == value {
foundCookie = true
}
}
assert.Equal(a.t, true, foundCookie, "Cookie not found - "+name)
}
}
if len(a.response.cookiesPresent) > 0 {
for _, cookieName := range a.response.cookiesPresent {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == cookieName {
foundCookie = true
}
}
assert.True(a.t, foundCookie, "Cookie not found - "+cookieName)
}
}
if len(a.response.cookiesNotPresent) > 0 {
for _, cookieName := range a.response.cookiesNotPresent {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == cookieName {
foundCookie = true
}
}
assert.False(a.t, foundCookie, "Cookie found - "+cookieName)
}
}
if len(a.response.httpCookies) > 0 {
for _, httpCookie := range a.response.httpCookies {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if compareHttpCookies(cookie, &httpCookie) {
foundCookie = true
}
}
assert.True(a.t, foundCookie, "Cookie not found - "+httpCookie.Name)
}
}
}
// only compare a subset of fields for flexibility
func compareHttpCookies(l *http.Cookie, r *http.Cookie) bool {
return l.Name == r.Name &&
l.Value == r.Value &&
l.Domain == r.Domain &&
l.Expires == r.Expires &&
l.MaxAge == r.MaxAge &&
l.Secure == r.Secure &&
l.HttpOnly == r.HttpOnly &&
l.SameSite == r.SameSite
}
func getResponseCookies(response *httptest.ResponseRecorder) []*http.Cookie {
for _, rawCookieString := range response.Result().Header["Set-Cookie"] {
rawRequest := fmt.Sprintf("GET / HTTP/1.0\r\nCookie: %s\r\n\r\n", rawCookieString)
req, err := http.ReadRequest(bufio.NewReader(strings.NewReader(rawRequest)))
if err != nil {
panic("failed to parse response cookies. error: " + err.Error())
}
return req.Cookies()
}
return []*http.Cookie{}
}
func (a *APITest) assertHeaders(res *httptest.ResponseRecorder) {
if a.response.headers != nil {
for k, v := range a.response.headers {
header := res.Header().Get(k)
assert.Equal(a.t, v, header, fmt.Sprintf("'%s' header should be equal", k))
}
}
}
func (a *APITest) assertJSONPath(res *httptest.ResponseRecorder) {
if a.response.jsonPathExpression != "" {
v := interface{}(nil)
err := json.Unmarshal(res.Body.Bytes(), &v)
value, err := jsonpath.Get(a.response.jsonPathExpression, v)
if err != nil |
a.response.jsonPathAssert(value.(interface{}))
}
}
func isJSON(s string) bool {
var js map[string]interface{}
return json.Unmarshal([]byte(s), &js) == nil
}
| {
assert.Nil(a.t, err)
} | conditional_block |
apitest.go | package apitest
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"github.com/PaesslerAG/jsonpath"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"net/http/httputil"
"strings"
"testing"
)
// APITest is the top level struct holding the test spec
type APITest struct {
handler http.Handler
request *Request
response *Response
observer Observe
t *testing.T
}
// New creates a new api test with the given http.Handler
func New(handler http.Handler) *Request {
apiTest := &APITest{}
request := &Request{apiTest: apiTest}
response := &Response{apiTest: apiTest}
apiTest.request = request
apiTest.response = response
apiTest.handler = handler
return apiTest.request
}
// Observer will be called by with the request and response on completion
type Observe func(*http.Response, *http.Request)
// Request is the user defined request that will be invoked on the handler under test
type Request struct {
method string
url string
body string
query map[string]string
queryCollection map[string][]string
headers map[string]string
cookies map[string]string
basicAuth string
apiTest *APITest
}
type pair struct {
l string
r string
}
var DumpHttp Observe = func(res *http.Response, req *http.Request) {
requestDump, err := httputil.DumpRequest(req, true)
if err == nil {
fmt.Println("--> http request dump\n\n" + string(requestDump))
}
responseDump, err := httputil.DumpResponse(res, true)
if err == nil {
fmt.Println("<-- http response dump:\n\n" + string(responseDump))
}
}
// Observe is a builder method for setting the observer
func (r *Request) Observe(observer Observe) *Request {
r.apiTest.observer = observer
return r
}
// Method is a builder method for setting the http method of the request
func (r *Request) Method(method string) *Request {
r.method = method
return r
}
// URL is a builder method for setting the url of the request
func (r *Request) URL(url string) *Request {
r.url = url
return r
}
// Get is a convenience method for setting the request as http.MethodGet
func (r *Request) Get(url string) *Request {
r.method = http.MethodGet
r.url = url
return r
}
// Post is a convenience method for setting the request as http.MethodPost
func (r *Request) Post(url string) *Request {
r.method = http.MethodPost
r.url = url
return r
}
// Put is a convenience method for setting the request as http.MethodPut
func (r *Request) Put(url string) *Request {
r.method = http.MethodPut
r.url = url
return r
}
// Delete is a convenience method for setting the request as http.MethodDelete
func (r *Request) Delete(url string) *Request {
r.method = http.MethodDelete
r.url = url
return r
}
// Patch is a convenience method for setting the request as http.MethodPatch
func (r *Request) Patch(url string) *Request {
r.method = http.MethodPatch
r.url = url
return r
}
// Body is a builder method to set the request body
func (r *Request) Body(b string) *Request {
r.body = b
return r
}
// Query is a builder method to set the request query parameters.
// This can be used in combination with request.QueryCollection
func (r *Request) Query(q map[string]string) *Request {
r.query = q
return r
}
// Query is a builder method to set the request query parameters
// This can be used in combination with request.Query
func (r *Request) QueryCollection(q map[string][]string) *Request {
r.queryCollection = q
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Headers(h map[string]string) *Request {
r.headers = h
return r
}
// Headers is a builder method to set the request headers
func (r *Request) Cookies(c map[string]string) *Request {
r.cookies = c
return r
}
// BasicAuth is a builder method to sets basic auth on the request.
// The credentials should be provided delimited by a colon, e.g. "username:password"
func (r *Request) BasicAuth(auth string) *Request {
r.basicAuth = auth
return r
}
// Expect marks the request spec as complete and following code will define the expected response
func (r *Request) Expect(t *testing.T) *Response {
r.apiTest.t = t
return r.apiTest.response
}
// Response is the user defined expected response from the application under test
type Response struct {
status int
body string
headers map[string]string
cookies map[string]string
cookiesPresent []string
cookiesNotPresent []string
httpCookies []http.Cookie
jsonPathExpression string
jsonPathAssert func(interface{})
apiTest *APITest
assert Assert
}
// Assert is a user defined custom assertion function
type Assert func(*http.Response, *http.Request) error
// Body is the expected response body
func (r *Response) Body(b string) *Response {
r.body = b
return r
}
// Cookies is the expected response cookies
func (r *Response) Cookies(cookies map[string]string) *Response {
r.cookies = cookies
return r
}
// HttpCookies is the expected response cookies
func (r *Response) HttpCookies(cookies []http.Cookie) *Response {
r.httpCookies = cookies
return r
}
// CookiePresent is used to assert that a cookie is present in the response,
// regardless of its value
func (r *Response) CookiePresent(cookieName string) *Response {
r.cookiesPresent = append(r.cookiesPresent, cookieName)
return r
}
// CookieNotPresent is used to assert that a cookie is not present in the response
func (r *Response) CookieNotPresent(cookieName string) *Response {
r.cookiesNotPresent = append(r.cookiesNotPresent, cookieName)
return r
}
// Headers is the expected response headers
func (r *Response) Headers(headers map[string]string) *Response {
r.headers = headers
return r
}
// Status is the expected response http status code
func (r *Response) Status(s int) *Response {
r.status = s
return r
}
// Assert allows the consumer to provide a user defined function containing their own
// custom assertions
func (r *Response) Assert(fn func(*http.Response, *http.Request) error) *Response {
r.assert = fn
return r.apiTest.response
}
// JSONPath provides support for jsonpath expectations as defined by https://goessner.net/articles/JsonPath/
func (r *Response) JSONPath(expression string, assert func(interface{})) *Response {
r.jsonPathExpression = expression
r.jsonPathAssert = assert
return r.apiTest.response
}
// End runs the test and all defined assertions
func (r *Response) End() {
r.apiTest.run()
}
func (a *APITest) run() {
res, req := a.runTest()
if a.observer != nil {
a.observer(res.Result(), req)
}
a.assertResponse(res)
a.assertHeaders(res)
a.assertCookies(res)
a.assertJSONPath(res)
if a.response.assert != nil {
err := a.response.assert(res.Result(), req)
if err != nil {
a.t.Fatal(err.Error())
}
}
}
func (a *APITest) runTest() (*httptest.ResponseRecorder, *http.Request) {
req := a.buildRequestFromTestCase()
res := httptest.NewRecorder()
a.handler.ServeHTTP(res, req)
return res, req
}
func (a *APITest) buildRequestFromTestCase() *http.Request |
func buildQueryCollection(params map[string][]string) []pair {
if len(params) == 0 {
return []pair{}
}
var pairs []pair
for k, v := range params {
for _, paramValue := range v {
pairs = append(pairs, pair{l: k, r: paramValue})
}
}
return pairs
}
func (a *APITest) assertResponse(res *httptest.ResponseRecorder) {
if a.response.status != 0 {
assert.Equal(a.t, a.response.status, res.Code)
}
if a.response.body != "" {
if isJSON(a.response.body) {
assert.JSONEq(a.t, a.response.body, res.Body.String())
} else {
assert.Equal(a.t, a.response.body, res.Body.String())
}
}
}
func (a *APITest) assertCookies(response *httptest.ResponseRecorder) {
if a.response.cookies != nil {
for name, value := range a.response.cookies {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == name && cookie.Value == value {
foundCookie = true
}
}
assert.Equal(a.t, true, foundCookie, "Cookie not found - "+name)
}
}
if len(a.response.cookiesPresent) > 0 {
for _, cookieName := range a.response.cookiesPresent {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == cookieName {
foundCookie = true
}
}
assert.True(a.t, foundCookie, "Cookie not found - "+cookieName)
}
}
if len(a.response.cookiesNotPresent) > 0 {
for _, cookieName := range a.response.cookiesNotPresent {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if cookie.Name == cookieName {
foundCookie = true
}
}
assert.False(a.t, foundCookie, "Cookie found - "+cookieName)
}
}
if len(a.response.httpCookies) > 0 {
for _, httpCookie := range a.response.httpCookies {
foundCookie := false
for _, cookie := range getResponseCookies(response) {
if compareHttpCookies(cookie, &httpCookie) {
foundCookie = true
}
}
assert.True(a.t, foundCookie, "Cookie not found - "+httpCookie.Name)
}
}
}
// only compare a subset of fields for flexibility
func compareHttpCookies(l *http.Cookie, r *http.Cookie) bool {
return l.Name == r.Name &&
l.Value == r.Value &&
l.Domain == r.Domain &&
l.Expires == r.Expires &&
l.MaxAge == r.MaxAge &&
l.Secure == r.Secure &&
l.HttpOnly == r.HttpOnly &&
l.SameSite == r.SameSite
}
func getResponseCookies(response *httptest.ResponseRecorder) []*http.Cookie {
for _, rawCookieString := range response.Result().Header["Set-Cookie"] {
rawRequest := fmt.Sprintf("GET / HTTP/1.0\r\nCookie: %s\r\n\r\n", rawCookieString)
req, err := http.ReadRequest(bufio.NewReader(strings.NewReader(rawRequest)))
if err != nil {
panic("failed to parse response cookies. error: " + err.Error())
}
return req.Cookies()
}
return []*http.Cookie{}
}
func (a *APITest) assertHeaders(res *httptest.ResponseRecorder) {
if a.response.headers != nil {
for k, v := range a.response.headers {
header := res.Header().Get(k)
assert.Equal(a.t, v, header, fmt.Sprintf("'%s' header should be equal", k))
}
}
}
func (a *APITest) assertJSONPath(res *httptest.ResponseRecorder) {
if a.response.jsonPathExpression != "" {
v := interface{}(nil)
err := json.Unmarshal(res.Body.Bytes(), &v)
value, err := jsonpath.Get(a.response.jsonPathExpression, v)
if err != nil {
assert.Nil(a.t, err)
}
a.response.jsonPathAssert(value.(interface{}))
}
}
func isJSON(s string) bool {
var js map[string]interface{}
return json.Unmarshal([]byte(s), &js) == nil
}
| {
req, _ := http.NewRequest(a.request.method, a.request.url, bytes.NewBufferString(a.request.body))
query := req.URL.Query()
if a.request.queryCollection != nil {
for _, param := range buildQueryCollection(a.request.queryCollection) {
query.Add(param.l, param.r)
}
}
if a.request.query != nil {
for k, v := range a.request.query {
query.Add(k, v)
}
}
if len(query) > 0 {
req.URL.RawQuery = query.Encode()
}
for k, v := range a.request.headers {
req.Header.Set(k, v)
}
for k, v := range a.request.cookies {
cookie := &http.Cookie{Name: k, Value: v}
req.AddCookie(cookie)
}
if a.request.basicAuth != "" {
parts := strings.Split(a.request.basicAuth, ":")
req.SetBasicAuth(parts[0], parts[1])
}
return req
} | identifier_body |
raft.go | package raft
import (
"flag"
"fmt"
"log"
"math"
"math/rand"
"strings"
"time"
)
var hostfile string
var duration int
var verbose bool
// returns true when an agent has a majority of votes for the proposed view
func (r *RaftNode) wonElection() bool {
return haveMajority(r.votes, "ELECTION", r.verbose)
}
func haveMajority(votes map[HostID]bool, label string, verbose bool) bool {
var sb strings.Builder
nVoters := len(votes)
nReq := int(math.Floor(float64(nVoters)/2)) + 1
nFound := 0
sb.WriteString("[")
for hostID, votedYes := range votes {
if votedYes {
nFound++
}
sb.WriteString(fmt.Sprintf("|host %d, votedYes %t|", hostID, votedYes))
}
sb.WriteString("]")
if verbose {
log.Printf("Checking %s majority. nVoters: %d, nReq: %d, nFound: %d, Votes: %s", label, nVoters, nReq, nFound, sb.String())
}
return nFound >= nReq
}
// NOTE - important that for all the shiftTo...() functions, we must first set our state variable
func (r *RaftNode) shiftToFollower(t Term, leaderID HostID) {
if r.verbose {
log.Printf("############ SHIFT TO FOLLOWER, Term: %d, LeaderID: %d", t, leaderID)
}
r.state = follower
r.CurrentTerm = t
r.currentLeader = leaderID
r.nextIndex = nil
r.matchIndex = nil
r.VotedFor = -1
}
// NOTE - We only become leader by doing shiftToCandidate() and voting for ourself
// Therefore, we know who we voted for.
// We have already adjusted our currentTerm (during shiftToCandidare())
func (r *RaftNode) shiftToLeader() {
defer r.heartbeatAppendEntriesRPC() // We need to confirm leadership with all nodes
if r.verbose {
log.Printf("############ SHIFT TO LEADER. Term: %d", r.CurrentTerm)
}
r.state = leader
r.currentLeader = r.id
// Reset leader volatile state
r.nextIndex = make(map[HostID]LogIndex)
r.matchIndex = make(map[HostID]LogIndex)
for peerID := range r.hosts {
r.nextIndex[peerID] = r.getLastLogIndex() + 1
r.matchIndex[peerID] = LogIndex(0)
}
}
func (r *RaftNode) election() {
r.shiftToCandidate()
r.multiRequestVoteRPC()
}
func (r *RaftNode) shiftToCandidate() {
r.resetTickers()
if r.verbose {
log.Println("############ SHIFT TO CANDIDATE")
}
r.votes = make(electionResults)
r.votes[r.id] = true
for hostID := range r.hosts {
if hostID != r.id {
r.votes[hostID] = false
}
}
r.state = candidate
r.CurrentTerm++
r.VotedFor = r.id
}
// StoreClientData allows a client to send data to the raft cluster via RPC for storage
// We fill the reply struct with "success = true" if we are leader and store the data successfully.
// If we are not leader, we will reply with the id of another node, and the client
// must detect this and retry at that node.
// If we do not know or do not yet have a leader, we will reply with leader = -1 and
// client may choose to retry at us or another random node.
// TODO - need a version of StoreClientData that ensures some form of commitment after leader responds to a message?
func (r *RaftNode) StoreClientData(cd ClientDataStruct, response *ClientResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("############ StoreClientData()")
}
// NOTE - if we do not yet know leader, client will see response.leader = -1.
// They should wait before recontact, and may recontact us or another random node
defer r.persistState()
defer r.executeLog()
response.Leader = r.currentLeader
response.Success = false // by default, assume we will fail
if r.state != leader {
return nil
}
// Try to short-circuit based on the client serial num
if haveNewer, prevReply := r.haveNewerSerialNum(cd.ClientID, cd.ClientSerialNum); haveNewer {
response.Success = prevReply.Success
// response.leader = prevReply.leader
// NOTE - we do not want to notify about the previous leader, because if it is not us, the client will
// just get confused and contact the wrong node next time
// this situation only arises if the client's previous attempt was partially successful, but leader crashed before replying
return nil
}
// We are the leader and this is a new entry. Attempt to replicate this to all peer logs
response.Success = true
entry := LogEntry{
Term: r.CurrentTerm,
ClientData: cd.Data,
ClientID: cd.ClientID,
ClientSerialNum: cd.ClientSerialNum,
ClientResponse: ClientResponse{
Success: response.Success,
Leader: r.id}}
r.append(entry)
go r.heartbeatAppendEntriesRPC()
return nil
}
// After sending updates to other nodes, we try to advance our commitIndex
// At the end, we try to execute log
func (r *RaftNode) updateCommitIndex() {
// If there exists an N such that:
// 1) N > commitIndex,
// 2) a majority of matchIndex[i] >= N, and
// 3) log[N].term == currentTerm
// Then:
// set commitIndex = N
for n := r.commitIndex + 1; n <= r.getLastLogIndex(); n++ {
if r.Log[n].Term != r.CurrentTerm {
if r.verbose {
log.Printf("commitIndex %d ineligible because of log entry %s", n, r.Log[n].String())
}
continue
}
peersAtThisLevel := make(map[HostID]bool)
for hostID := range r.hosts {
if hostID == r.id {
peersAtThisLevel[hostID] = true
} else {
peersAtThisLevel[hostID] = r.matchIndex[hostID] >= n
}
}
if haveMajority(peersAtThisLevel, "COMMIT IDX", r.verbose) {
r.commitIndex = n
}
}
}
// Based on our commit index, apply any log entries that are ready for commit
// This function should be idempotent and safe to apply often.
func (r *RaftNode) executeLog() {
for r.commitIndex > r.lastApplied {
r.lastApplied++
r.StateMachine.apply(r.Log[r.lastApplied])
}
}
// AppendEntries is called by RPC from the leader to modify the log of a follower.
// TODO - some amount of duplicated logic in AppendEntries() and Vote()
// Returns false if entries were rejected, or true if accepted
func (r *RaftNode) | (ae AppendEntriesStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Printf("AppendEntries(). ae: %s", ae.String())
log.Printf("My log: %s", r.Log.String())
}
response.Term = r.CurrentTerm
if ae.LeaderID == r.currentLeader {
if r.verbose {
log.Println("AppendEntries from leader - reset tickers")
}
r.resetTickers()
}
// Reply false if term < currentTerm
if ae.Term < r.CurrentTerm {
if r.verbose {
log.Println("AE from stale term")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// NOTE - shifting to follower each time might sound misleading, but keeps things uniform
r.shiftToFollower(ae.Term, ae.LeaderID)
// Reply false if log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm
if int(ae.PrevLogIndex) >= len(r.Log) || // index out-of-bounds
r.Log[ae.PrevLogIndex].Term != ae.PrevLogTerm {
if r.verbose {
log.Println("my PrevLogTerm does not match theirs")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// If an existing entry conflicts with a new one (same index, but different terms),
// delete the existing entry and all that follow it
if r.verbose {
log.Println("Applying entries...")
}
offset := int(ae.PrevLogIndex) + 1
for i, entry := range ae.Entries {
if i+offset >= len(r.Log) { // We certainly have no conflict
if r.verbose {
log.Printf("Apply without conflict: index=%d", i+offset)
}
r.append(entry)
} else {
if r.Log[i+offset].Term != ae.Entries[i].Term { // We have conflicting entry
if r.verbose {
log.Printf("Conflict - delete suffix! (we have term=%d, they have term=%d). Delete our log from index=%d onwards.", r.Log[i+offset].Term, ae.Entries[i].Term, i+offset)
}
r.Log = r.Log[:i+offset] // delete the existing entry and all that follow it
r.append(entry) // append the current entry
log.Printf("\n\nLog: %s\n\n", stringOneLog(r.Log))
} else if r.Log[i+offset] != entry {
log.Printf("\nOURS: %s\n\nTHEIRS: %s", r.Log[i+offset].String(), entry.String())
panic("log safety violation occurred somewhere")
}
}
}
response.Success = true
lastIndex := r.getLastLogIndex()
// Now we need to decide how to set our local commit index
if ae.LeaderCommit > r.commitIndex {
r.commitIndex = min(lastIndex, ae.LeaderCommit)
}
r.executeLog()
r.persistState()
return nil
}
// CandidateLooksEligible allows a raft node to decide whether another host's log is sufficiently up-to-date to become leader
// Returns true if the incoming RequestVote shows that the peer is at least as up-to-date as we are
// See paper section 5.4
func (r *RaftNode) CandidateLooksEligible(candLastLogIdx LogIndex, candLastLogTerm Term) bool {
ourLastLogTerm := r.getLastLogTerm()
ourLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("We have: lastLogTerm=%d, lastLogIdx=%d. They have: lastLogTerm=%d, lastLogIdx=%d", ourLastLogTerm, ourLastLogIdx, candLastLogTerm, candLastLogIdx)
}
if ourLastLogTerm == candLastLogTerm {
return candLastLogIdx >= ourLastLogIdx
}
return candLastLogTerm >= ourLastLogTerm
}
// Vote is called by RPC from a candidate. We can observe the following from the raft.github.io simulation:
// 1) If we get a requestVoteRPC from a future term, we immediately jump to that term and send our vote
// 2) If we are already collecting votes for the next election, and simultaneously get a request from another node to vote for them, we do NOT give them our vote
// (we've already voted for ourselves!)
// 3) if we've been offline, and wakeup and try to get votes: we get rejections, that also tell us the new term, and we immediately jump to that term as a follower
func (r *RaftNode) Vote(rv RequestVoteStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("Vote()")
}
defer r.persistState()
response.Term = r.CurrentTerm
myLastLogTerm := r.getLastLogTerm()
myLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("RequestVoteStruct: %s. \nMy node: term: %d, votedFor %d, lastLogTerm: %d, lastLogIdx: %d",
rv.String(), r.CurrentTerm, r.VotedFor, myLastLogTerm, myLastLogIdx)
}
looksEligible := r.CandidateLooksEligible(rv.LastLogIdx, rv.LastLogTerm)
if rv.Term > r.CurrentTerm {
r.shiftToFollower(rv.Term, HostID(-1)) // We do not yet know who is leader for this term
}
if rv.Term < r.CurrentTerm {
if r.verbose {
log.Println("RV from prior term - do not grant vote")
}
response.Success = false
} else if (r.VotedFor == -1 || r.VotedFor == rv.CandidateID) && looksEligible {
if r.verbose {
log.Println("Grant vote")
}
r.resetTickers()
response.Success = true
r.VotedFor = rv.CandidateID
} else {
if r.verbose {
log.Println("Do not grant vote")
}
response.Success = false
}
return nil
}
func (r *RaftNode) getLastLogIndex() LogIndex {
if len(r.Log) > 0 {
return LogIndex(len(r.Log) - 1)
}
return LogIndex(0)
}
func (r *RaftNode) getLastLogTerm() Term {
return Term(r.Log[int(r.getLastLogIndex())].Term)
}
func max(x LogIndex, y LogIndex) LogIndex {
if x > y {
return x
}
return y
}
func min(x LogIndex, y LogIndex) LogIndex {
if x < y {
return x
}
return y
}
func (r *RaftNode) QuitChan() chan bool {
return r.quitChan
}
// Start is the entrypoint for a raft node (constructed here or in a test) to begin the protocol
func (r *RaftNode) Start() {
go r.recvDaemon()
go r.protocol()
go r.quitter(duration)
<-r.quitChan
log.Println("FINISH EXPERIMENT...")
r.printResults()
}
// Main Raft protocol
func (r *RaftNode) protocol() {
r.resetTickers()
log.Printf("Begin Protocol. verbose: %t", r.verbose)
for {
select {
case m := <-r.incomingChan:
if m.response.Term > r.CurrentTerm {
if r.verbose {
log.Printf("Received reply from hostID %d with higher term: %d and leaderid: %d", m.hostID, m.response.Term, m.response.LeaderID)
}
r.shiftToFollower(m.response.Term, m.response.LeaderID)
}
switch m.msgType {
case vote:
if r.verbose {
log.Printf("processing vote reply, hostID=%d: response=%s", m.hostID, m.response.String())
}
r.Lock()
if r.state == candidate {
r.votes[m.hostID] = m.response.Success
if r.wonElection() {
r.shiftToLeader()
}
}
r.Unlock()
case appendEntries:
r.Lock()
if r.state == leader { // We might have been deposed
// Inspect response and update our tracking variables appropriately
if !m.response.Success {
// TODO - When responding to AppendEntries, the follower should return success if they do a new append, OR if they already have appended that entry
prev := r.nextIndex[m.hostID]
next := max(0, r.nextIndex[m.hostID]-1)
if r.verbose {
log.Printf("Decrement nextIndex for hostID %d from %d to %d", m.hostID, prev, next)
}
r.nextIndex[m.hostID] = next
} else {
prev := r.nextIndex[m.hostID]
next := prev + LogIndex(m.aeLength)
if r.verbose {
log.Printf("Increment nextIndex for hostID %d from %d to %d", m.hostID, prev, next)
}
r.matchIndex[m.hostID] = prev
r.nextIndex[m.hostID] = next
}
r.executeLog()
}
r.Unlock()
default:
panic(fmt.Sprintf("invalid msg type. %s", m.String()))
}
case <-r.heartbeatTicker.C: // Send append entries, either empty or full depending on the peer's log index
if r.state == leader {
r.Lock()
r.heartbeatAppendEntriesRPC()
r.updateCommitIndex()
r.executeLog()
r.Unlock()
}
case <-r.electionTicker.C: // Periodically time out and start a new election
if r.state == follower || r.state == candidate {
if r.verbose {
log.Println("TIMED OUT - STARTING ELECTION")
}
r.election()
}
case <-r.quitChan:
r.shutdown()
}
}
}
// Quit the protocol on a timer (to be run in separate goroutine)
func (r *RaftNode) quitter(quitTime int) {
for {
select {
case <-r.quitChan: // the node decided it should quit
return
case <-time.After(time.Duration(quitTime) * time.Second): // we decide the node should quit
r.quitChan <- true
return
}
}
}
func (r *RaftNode) resetElectionTicker() time.Duration {
var newTimeout time.Duration = selectElectionTimeout(r.id) * r.timeoutUnits
if r.verbose {
log.Printf("new election timeout: %s", newTimeout.String())
}
r.electionTicker = *time.NewTicker(newTimeout)
return newTimeout
}
func (r *RaftNode) resetHeartbeatTicker() time.Duration {
var newTimeout time.Duration = heartbeatTimeout * r.timeoutUnits
if r.verbose {
log.Printf("new heartbeat timeout: %s", newTimeout.String())
}
r.heartbeatTicker = *time.NewTicker(newTimeout)
return newTimeout
}
func (r *RaftNode) resetTickers() (time.Duration, time.Duration) {
electionTimeout := r.resetElectionTicker()
heartbeatTimeout := r.resetHeartbeatTicker()
return electionTimeout, heartbeatTimeout
}
func (r *RaftNode) shutdown() {
log.Println("RAFT NODE SHUTTING DOWN")
r.quitChan <- true
}
func init() {
flag.StringVar(&hostfile, "h", "hostfile.json", "name of hostfile")
flag.IntVar(&duration, "d", 30, "time until node shutdown")
flag.BoolVar(&verbose, "v", false, "verbose output")
rand.Seed(time.Now().UTC().UnixNano())
}
// Raft is the entrypoint function for the raft replicated state machine protocol
func Raft() {
flag.Parse()
hosts := make(HostMap)
clients := make(ClientMap)
quitChan := make(chan bool)
intID, recvPort := ResolveAllPeers(hosts, clients, hostfile, true)
id := HostID(intID)
r := NewRaftNode(id, recvPort, hosts, clients, quitChan)
log.Printf("RaftNode: %s", r.String())
r.Start()
}
| AppendEntries | identifier_name |
raft.go | package raft
import (
"flag"
"fmt"
"log"
"math"
"math/rand"
"strings"
"time"
)
var hostfile string
var duration int
var verbose bool
// returns true when an agent has a majority of votes for the proposed view
func (r *RaftNode) wonElection() bool {
return haveMajority(r.votes, "ELECTION", r.verbose)
}
func haveMajority(votes map[HostID]bool, label string, verbose bool) bool {
var sb strings.Builder
nVoters := len(votes)
nReq := int(math.Floor(float64(nVoters)/2)) + 1
nFound := 0
sb.WriteString("[")
for hostID, votedYes := range votes {
if votedYes {
nFound++
}
sb.WriteString(fmt.Sprintf("|host %d, votedYes %t|", hostID, votedYes))
}
sb.WriteString("]")
if verbose {
log.Printf("Checking %s majority. nVoters: %d, nReq: %d, nFound: %d, Votes: %s", label, nVoters, nReq, nFound, sb.String())
}
return nFound >= nReq
}
// NOTE - important that for all the shiftTo...() functions, we must first set our state variable
func (r *RaftNode) shiftToFollower(t Term, leaderID HostID) {
if r.verbose {
log.Printf("############ SHIFT TO FOLLOWER, Term: %d, LeaderID: %d", t, leaderID)
}
r.state = follower
r.CurrentTerm = t
r.currentLeader = leaderID
r.nextIndex = nil
r.matchIndex = nil
r.VotedFor = -1
}
// NOTE - We only become leader by doing shiftToCandidate() and voting for ourself
// Therefore, we know who we voted for.
// We have already adjusted our currentTerm (during shiftToCandidare())
func (r *RaftNode) shiftToLeader() {
defer r.heartbeatAppendEntriesRPC() // We need to confirm leadership with all nodes
if r.verbose {
log.Printf("############ SHIFT TO LEADER. Term: %d", r.CurrentTerm)
}
r.state = leader
r.currentLeader = r.id
// Reset leader volatile state
r.nextIndex = make(map[HostID]LogIndex)
r.matchIndex = make(map[HostID]LogIndex)
for peerID := range r.hosts {
r.nextIndex[peerID] = r.getLastLogIndex() + 1
r.matchIndex[peerID] = LogIndex(0)
}
}
func (r *RaftNode) election() {
r.shiftToCandidate()
r.multiRequestVoteRPC()
}
func (r *RaftNode) shiftToCandidate() {
r.resetTickers()
if r.verbose {
log.Println("############ SHIFT TO CANDIDATE")
}
r.votes = make(electionResults)
r.votes[r.id] = true
for hostID := range r.hosts {
if hostID != r.id {
r.votes[hostID] = false
}
}
r.state = candidate
r.CurrentTerm++
r.VotedFor = r.id
}
// StoreClientData allows a client to send data to the raft cluster via RPC for storage
// We fill the reply struct with "success = true" if we are leader and store the data successfully.
// If we are not leader, we will reply with the id of another node, and the client
// must detect this and retry at that node.
// If we do not know or do not yet have a leader, we will reply with leader = -1 and
// client may choose to retry at us or another random node.
// TODO - need a version of StoreClientData that ensures some form of commitment after leader responds to a message?
func (r *RaftNode) StoreClientData(cd ClientDataStruct, response *ClientResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("############ StoreClientData()")
}
// NOTE - if we do not yet know leader, client will see response.leader = -1.
// They should wait before recontact, and may recontact us or another random node
defer r.persistState()
defer r.executeLog()
response.Leader = r.currentLeader
response.Success = false // by default, assume we will fail
if r.state != leader {
return nil
}
// Try to short-circuit based on the client serial num
if haveNewer, prevReply := r.haveNewerSerialNum(cd.ClientID, cd.ClientSerialNum); haveNewer {
response.Success = prevReply.Success
// response.leader = prevReply.leader
// NOTE - we do not want to notify about the previous leader, because if it is not us, the client will
// just get confused and contact the wrong node next time
// this situation only arises if the client's previous attempt was partially successful, but leader crashed before replying
return nil
}
// We are the leader and this is a new entry. Attempt to replicate this to all peer logs
response.Success = true
entry := LogEntry{
Term: r.CurrentTerm,
ClientData: cd.Data,
ClientID: cd.ClientID,
ClientSerialNum: cd.ClientSerialNum,
ClientResponse: ClientResponse{
Success: response.Success,
Leader: r.id}}
r.append(entry)
go r.heartbeatAppendEntriesRPC()
return nil
}
// After sending updates to other nodes, we try to advance our commitIndex
// At the end, we try to execute log
func (r *RaftNode) updateCommitIndex() {
// If there exists an N such that:
// 1) N > commitIndex,
// 2) a majority of matchIndex[i] >= N, and
// 3) log[N].term == currentTerm | log.Printf("commitIndex %d ineligible because of log entry %s", n, r.Log[n].String())
}
continue
}
peersAtThisLevel := make(map[HostID]bool)
for hostID := range r.hosts {
if hostID == r.id {
peersAtThisLevel[hostID] = true
} else {
peersAtThisLevel[hostID] = r.matchIndex[hostID] >= n
}
}
if haveMajority(peersAtThisLevel, "COMMIT IDX", r.verbose) {
r.commitIndex = n
}
}
}
// Based on our commit index, apply any log entries that are ready for commit
// This function should be idempotent and safe to apply often.
func (r *RaftNode) executeLog() {
for r.commitIndex > r.lastApplied {
r.lastApplied++
r.StateMachine.apply(r.Log[r.lastApplied])
}
}
// AppendEntries is called by RPC from the leader to modify the log of a follower.
// TODO - some amount of duplicated logic in AppendEntries() and Vote()
// Returns false if entries were rejected, or true if accepted
func (r *RaftNode) AppendEntries(ae AppendEntriesStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Printf("AppendEntries(). ae: %s", ae.String())
log.Printf("My log: %s", r.Log.String())
}
response.Term = r.CurrentTerm
if ae.LeaderID == r.currentLeader {
if r.verbose {
log.Println("AppendEntries from leader - reset tickers")
}
r.resetTickers()
}
// Reply false if term < currentTerm
if ae.Term < r.CurrentTerm {
if r.verbose {
log.Println("AE from stale term")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// NOTE - shifting to follower each time might sound misleading, but keeps things uniform
r.shiftToFollower(ae.Term, ae.LeaderID)
// Reply false if log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm
if int(ae.PrevLogIndex) >= len(r.Log) || // index out-of-bounds
r.Log[ae.PrevLogIndex].Term != ae.PrevLogTerm {
if r.verbose {
log.Println("my PrevLogTerm does not match theirs")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// If an existing entry conflicts with a new one (same index, but different terms),
// delete the existing entry and all that follow it
if r.verbose {
log.Println("Applying entries...")
}
offset := int(ae.PrevLogIndex) + 1
for i, entry := range ae.Entries {
if i+offset >= len(r.Log) { // We certainly have no conflict
if r.verbose {
log.Printf("Apply without conflict: index=%d", i+offset)
}
r.append(entry)
} else {
if r.Log[i+offset].Term != ae.Entries[i].Term { // We have conflicting entry
if r.verbose {
log.Printf("Conflict - delete suffix! (we have term=%d, they have term=%d). Delete our log from index=%d onwards.", r.Log[i+offset].Term, ae.Entries[i].Term, i+offset)
}
r.Log = r.Log[:i+offset] // delete the existing entry and all that follow it
r.append(entry) // append the current entry
log.Printf("\n\nLog: %s\n\n", stringOneLog(r.Log))
} else if r.Log[i+offset] != entry {
log.Printf("\nOURS: %s\n\nTHEIRS: %s", r.Log[i+offset].String(), entry.String())
panic("log safety violation occurred somewhere")
}
}
}
response.Success = true
lastIndex := r.getLastLogIndex()
// Now we need to decide how to set our local commit index
if ae.LeaderCommit > r.commitIndex {
r.commitIndex = min(lastIndex, ae.LeaderCommit)
}
r.executeLog()
r.persistState()
return nil
}
// CandidateLooksEligible allows a raft node to decide whether another host's log is sufficiently up-to-date to become leader
// Returns true if the incoming RequestVote shows that the peer is at least as up-to-date as we are
// See paper section 5.4
func (r *RaftNode) CandidateLooksEligible(candLastLogIdx LogIndex, candLastLogTerm Term) bool {
ourLastLogTerm := r.getLastLogTerm()
ourLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("We have: lastLogTerm=%d, lastLogIdx=%d. They have: lastLogTerm=%d, lastLogIdx=%d", ourLastLogTerm, ourLastLogIdx, candLastLogTerm, candLastLogIdx)
}
if ourLastLogTerm == candLastLogTerm {
return candLastLogIdx >= ourLastLogIdx
}
return candLastLogTerm >= ourLastLogTerm
}
// Vote is called by RPC from a candidate. We can observe the following from the raft.github.io simulation:
// 1) If we get a requestVoteRPC from a future term, we immediately jump to that term and send our vote
// 2) If we are already collecting votes for the next election, and simultaneously get a request from another node to vote for them, we do NOT give them our vote
// (we've already voted for ourselves!)
// 3) if we've been offline, and wakeup and try to get votes: we get rejections, that also tell us the new term, and we immediately jump to that term as a follower
func (r *RaftNode) Vote(rv RequestVoteStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("Vote()")
}
defer r.persistState()
response.Term = r.CurrentTerm
myLastLogTerm := r.getLastLogTerm()
myLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("RequestVoteStruct: %s. \nMy node: term: %d, votedFor %d, lastLogTerm: %d, lastLogIdx: %d",
rv.String(), r.CurrentTerm, r.VotedFor, myLastLogTerm, myLastLogIdx)
}
looksEligible := r.CandidateLooksEligible(rv.LastLogIdx, rv.LastLogTerm)
if rv.Term > r.CurrentTerm {
r.shiftToFollower(rv.Term, HostID(-1)) // We do not yet know who is leader for this term
}
if rv.Term < r.CurrentTerm {
if r.verbose {
log.Println("RV from prior term - do not grant vote")
}
response.Success = false
} else if (r.VotedFor == -1 || r.VotedFor == rv.CandidateID) && looksEligible {
if r.verbose {
log.Println("Grant vote")
}
r.resetTickers()
response.Success = true
r.VotedFor = rv.CandidateID
} else {
if r.verbose {
log.Println("Do not grant vote")
}
response.Success = false
}
return nil
}
func (r *RaftNode) getLastLogIndex() LogIndex {
if len(r.Log) > 0 {
return LogIndex(len(r.Log) - 1)
}
return LogIndex(0)
}
func (r *RaftNode) getLastLogTerm() Term {
return Term(r.Log[int(r.getLastLogIndex())].Term)
}
func max(x LogIndex, y LogIndex) LogIndex {
if x > y {
return x
}
return y
}
func min(x LogIndex, y LogIndex) LogIndex {
if x < y {
return x
}
return y
}
func (r *RaftNode) QuitChan() chan bool {
return r.quitChan
}
// Start is the entrypoint for a raft node (constructed here or in a test) to begin the protocol
func (r *RaftNode) Start() {
go r.recvDaemon()
go r.protocol()
go r.quitter(duration)
<-r.quitChan
log.Println("FINISH EXPERIMENT...")
r.printResults()
}
// Main Raft protocol
func (r *RaftNode) protocol() {
r.resetTickers()
log.Printf("Begin Protocol. verbose: %t", r.verbose)
for {
select {
case m := <-r.incomingChan:
if m.response.Term > r.CurrentTerm {
if r.verbose {
log.Printf("Received reply from hostID %d with higher term: %d and leaderid: %d", m.hostID, m.response.Term, m.response.LeaderID)
}
r.shiftToFollower(m.response.Term, m.response.LeaderID)
}
switch m.msgType {
case vote:
if r.verbose {
log.Printf("processing vote reply, hostID=%d: response=%s", m.hostID, m.response.String())
}
r.Lock()
if r.state == candidate {
r.votes[m.hostID] = m.response.Success
if r.wonElection() {
r.shiftToLeader()
}
}
r.Unlock()
case appendEntries:
r.Lock()
if r.state == leader { // We might have been deposed
// Inspect response and update our tracking variables appropriately
if !m.response.Success {
// TODO - When responding to AppendEntries, the follower should return success if they do a new append, OR if they already have appended that entry
prev := r.nextIndex[m.hostID]
next := max(0, r.nextIndex[m.hostID]-1)
if r.verbose {
log.Printf("Decrement nextIndex for hostID %d from %d to %d", m.hostID, prev, next)
}
r.nextIndex[m.hostID] = next
} else {
prev := r.nextIndex[m.hostID]
next := prev + LogIndex(m.aeLength)
if r.verbose {
log.Printf("Increment nextIndex for hostID %d from %d to %d", m.hostID, prev, next)
}
r.matchIndex[m.hostID] = prev
r.nextIndex[m.hostID] = next
}
r.executeLog()
}
r.Unlock()
default:
panic(fmt.Sprintf("invalid msg type. %s", m.String()))
}
case <-r.heartbeatTicker.C: // Send append entries, either empty or full depending on the peer's log index
if r.state == leader {
r.Lock()
r.heartbeatAppendEntriesRPC()
r.updateCommitIndex()
r.executeLog()
r.Unlock()
}
case <-r.electionTicker.C: // Periodically time out and start a new election
if r.state == follower || r.state == candidate {
if r.verbose {
log.Println("TIMED OUT - STARTING ELECTION")
}
r.election()
}
case <-r.quitChan:
r.shutdown()
}
}
}
// Quit the protocol on a timer (to be run in separate goroutine)
func (r *RaftNode) quitter(quitTime int) {
for {
select {
case <-r.quitChan: // the node decided it should quit
return
case <-time.After(time.Duration(quitTime) * time.Second): // we decide the node should quit
r.quitChan <- true
return
}
}
}
func (r *RaftNode) resetElectionTicker() time.Duration {
var newTimeout time.Duration = selectElectionTimeout(r.id) * r.timeoutUnits
if r.verbose {
log.Printf("new election timeout: %s", newTimeout.String())
}
r.electionTicker = *time.NewTicker(newTimeout)
return newTimeout
}
func (r *RaftNode) resetHeartbeatTicker() time.Duration {
var newTimeout time.Duration = heartbeatTimeout * r.timeoutUnits
if r.verbose {
log.Printf("new heartbeat timeout: %s", newTimeout.String())
}
r.heartbeatTicker = *time.NewTicker(newTimeout)
return newTimeout
}
func (r *RaftNode) resetTickers() (time.Duration, time.Duration) {
electionTimeout := r.resetElectionTicker()
heartbeatTimeout := r.resetHeartbeatTicker()
return electionTimeout, heartbeatTimeout
}
func (r *RaftNode) shutdown() {
log.Println("RAFT NODE SHUTTING DOWN")
r.quitChan <- true
}
func init() {
flag.StringVar(&hostfile, "h", "hostfile.json", "name of hostfile")
flag.IntVar(&duration, "d", 30, "time until node shutdown")
flag.BoolVar(&verbose, "v", false, "verbose output")
rand.Seed(time.Now().UTC().UnixNano())
}
// Raft is the entrypoint function for the raft replicated state machine protocol
func Raft() {
flag.Parse()
hosts := make(HostMap)
clients := make(ClientMap)
quitChan := make(chan bool)
intID, recvPort := ResolveAllPeers(hosts, clients, hostfile, true)
id := HostID(intID)
r := NewRaftNode(id, recvPort, hosts, clients, quitChan)
log.Printf("RaftNode: %s", r.String())
r.Start()
} | // Then:
// set commitIndex = N
for n := r.commitIndex + 1; n <= r.getLastLogIndex(); n++ {
if r.Log[n].Term != r.CurrentTerm {
if r.verbose { | random_line_split |
raft.go | package raft
import (
"flag"
"fmt"
"log"
"math"
"math/rand"
"strings"
"time"
)
var hostfile string
var duration int
var verbose bool
// returns true when an agent has a majority of votes for the proposed view
func (r *RaftNode) wonElection() bool {
return haveMajority(r.votes, "ELECTION", r.verbose)
}
func haveMajority(votes map[HostID]bool, label string, verbose bool) bool {
var sb strings.Builder
nVoters := len(votes)
nReq := int(math.Floor(float64(nVoters)/2)) + 1
nFound := 0
sb.WriteString("[")
for hostID, votedYes := range votes {
if votedYes {
nFound++
}
sb.WriteString(fmt.Sprintf("|host %d, votedYes %t|", hostID, votedYes))
}
sb.WriteString("]")
if verbose {
log.Printf("Checking %s majority. nVoters: %d, nReq: %d, nFound: %d, Votes: %s", label, nVoters, nReq, nFound, sb.String())
}
return nFound >= nReq
}
// NOTE - important that for all the shiftTo...() functions, we must first set our state variable
func (r *RaftNode) shiftToFollower(t Term, leaderID HostID) {
if r.verbose {
log.Printf("############ SHIFT TO FOLLOWER, Term: %d, LeaderID: %d", t, leaderID)
}
r.state = follower
r.CurrentTerm = t
r.currentLeader = leaderID
r.nextIndex = nil
r.matchIndex = nil
r.VotedFor = -1
}
// NOTE - We only become leader by doing shiftToCandidate() and voting for ourself
// Therefore, we know who we voted for.
// We have already adjusted our currentTerm (during shiftToCandidare())
func (r *RaftNode) shiftToLeader() {
defer r.heartbeatAppendEntriesRPC() // We need to confirm leadership with all nodes
if r.verbose {
log.Printf("############ SHIFT TO LEADER. Term: %d", r.CurrentTerm)
}
r.state = leader
r.currentLeader = r.id
// Reset leader volatile state
r.nextIndex = make(map[HostID]LogIndex)
r.matchIndex = make(map[HostID]LogIndex)
for peerID := range r.hosts {
r.nextIndex[peerID] = r.getLastLogIndex() + 1
r.matchIndex[peerID] = LogIndex(0)
}
}
func (r *RaftNode) election() {
r.shiftToCandidate()
r.multiRequestVoteRPC()
}
func (r *RaftNode) shiftToCandidate() {
r.resetTickers()
if r.verbose {
log.Println("############ SHIFT TO CANDIDATE")
}
r.votes = make(electionResults)
r.votes[r.id] = true
for hostID := range r.hosts {
if hostID != r.id {
r.votes[hostID] = false
}
}
r.state = candidate
r.CurrentTerm++
r.VotedFor = r.id
}
// StoreClientData allows a client to send data to the raft cluster via RPC for storage
// We fill the reply struct with "success = true" if we are leader and store the data successfully.
// If we are not leader, we will reply with the id of another node, and the client
// must detect this and retry at that node.
// If we do not know or do not yet have a leader, we will reply with leader = -1 and
// client may choose to retry at us or another random node.
// TODO - need a version of StoreClientData that ensures some form of commitment after leader responds to a message?
func (r *RaftNode) StoreClientData(cd ClientDataStruct, response *ClientResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("############ StoreClientData()")
}
// NOTE - if we do not yet know leader, client will see response.leader = -1.
// They should wait before recontact, and may recontact us or another random node
defer r.persistState()
defer r.executeLog()
response.Leader = r.currentLeader
response.Success = false // by default, assume we will fail
if r.state != leader {
return nil
}
// Try to short-circuit based on the client serial num
if haveNewer, prevReply := r.haveNewerSerialNum(cd.ClientID, cd.ClientSerialNum); haveNewer {
response.Success = prevReply.Success
// response.leader = prevReply.leader
// NOTE - we do not want to notify about the previous leader, because if it is not us, the client will
// just get confused and contact the wrong node next time
// this situation only arises if the client's previous attempt was partially successful, but leader crashed before replying
return nil
}
// We are the leader and this is a new entry. Attempt to replicate this to all peer logs
response.Success = true
entry := LogEntry{
Term: r.CurrentTerm,
ClientData: cd.Data,
ClientID: cd.ClientID,
ClientSerialNum: cd.ClientSerialNum,
ClientResponse: ClientResponse{
Success: response.Success,
Leader: r.id}}
r.append(entry)
go r.heartbeatAppendEntriesRPC()
return nil
}
// After sending updates to other nodes, we try to advance our commitIndex
// At the end, we try to execute log
func (r *RaftNode) updateCommitIndex() {
// If there exists an N such that:
// 1) N > commitIndex,
// 2) a majority of matchIndex[i] >= N, and
// 3) log[N].term == currentTerm
// Then:
// set commitIndex = N
for n := r.commitIndex + 1; n <= r.getLastLogIndex(); n++ {
if r.Log[n].Term != r.CurrentTerm {
if r.verbose {
log.Printf("commitIndex %d ineligible because of log entry %s", n, r.Log[n].String())
}
continue
}
peersAtThisLevel := make(map[HostID]bool)
for hostID := range r.hosts {
if hostID == r.id {
peersAtThisLevel[hostID] = true
} else {
peersAtThisLevel[hostID] = r.matchIndex[hostID] >= n
}
}
if haveMajority(peersAtThisLevel, "COMMIT IDX", r.verbose) {
r.commitIndex = n
}
}
}
// Based on our commit index, apply any log entries that are ready for commit
// This function should be idempotent and safe to apply often.
func (r *RaftNode) executeLog() {
for r.commitIndex > r.lastApplied {
r.lastApplied++
r.StateMachine.apply(r.Log[r.lastApplied])
}
}
// AppendEntries is called by RPC from the leader to modify the log of a follower.
// TODO - some amount of duplicated logic in AppendEntries() and Vote()
// Returns false if entries were rejected, or true if accepted
func (r *RaftNode) AppendEntries(ae AppendEntriesStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Printf("AppendEntries(). ae: %s", ae.String())
log.Printf("My log: %s", r.Log.String())
}
response.Term = r.CurrentTerm
if ae.LeaderID == r.currentLeader {
if r.verbose {
log.Println("AppendEntries from leader - reset tickers")
}
r.resetTickers()
}
// Reply false if term < currentTerm
if ae.Term < r.CurrentTerm {
if r.verbose {
log.Println("AE from stale term")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// NOTE - shifting to follower each time might sound misleading, but keeps things uniform
r.shiftToFollower(ae.Term, ae.LeaderID)
// Reply false if log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm
if int(ae.PrevLogIndex) >= len(r.Log) || // index out-of-bounds
r.Log[ae.PrevLogIndex].Term != ae.PrevLogTerm {
if r.verbose {
log.Println("my PrevLogTerm does not match theirs")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// If an existing entry conflicts with a new one (same index, but different terms),
// delete the existing entry and all that follow it
if r.verbose {
log.Println("Applying entries...")
}
offset := int(ae.PrevLogIndex) + 1
for i, entry := range ae.Entries {
if i+offset >= len(r.Log) { // We certainly have no conflict
if r.verbose {
log.Printf("Apply without conflict: index=%d", i+offset)
}
r.append(entry)
} else {
if r.Log[i+offset].Term != ae.Entries[i].Term { // We have conflicting entry
if r.verbose {
log.Printf("Conflict - delete suffix! (we have term=%d, they have term=%d). Delete our log from index=%d onwards.", r.Log[i+offset].Term, ae.Entries[i].Term, i+offset)
}
r.Log = r.Log[:i+offset] // delete the existing entry and all that follow it
r.append(entry) // append the current entry
log.Printf("\n\nLog: %s\n\n", stringOneLog(r.Log))
} else if r.Log[i+offset] != entry {
log.Printf("\nOURS: %s\n\nTHEIRS: %s", r.Log[i+offset].String(), entry.String())
panic("log safety violation occurred somewhere")
}
}
}
response.Success = true
lastIndex := r.getLastLogIndex()
// Now we need to decide how to set our local commit index
if ae.LeaderCommit > r.commitIndex {
r.commitIndex = min(lastIndex, ae.LeaderCommit)
}
r.executeLog()
r.persistState()
return nil
}
// CandidateLooksEligible allows a raft node to decide whether another host's log is sufficiently up-to-date to become leader
// Returns true if the incoming RequestVote shows that the peer is at least as up-to-date as we are
// See paper section 5.4
func (r *RaftNode) CandidateLooksEligible(candLastLogIdx LogIndex, candLastLogTerm Term) bool {
ourLastLogTerm := r.getLastLogTerm()
ourLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("We have: lastLogTerm=%d, lastLogIdx=%d. They have: lastLogTerm=%d, lastLogIdx=%d", ourLastLogTerm, ourLastLogIdx, candLastLogTerm, candLastLogIdx)
}
if ourLastLogTerm == candLastLogTerm {
return candLastLogIdx >= ourLastLogIdx
}
return candLastLogTerm >= ourLastLogTerm
}
// Vote is called by RPC from a candidate. We can observe the following from the raft.github.io simulation:
// 1) If we get a requestVoteRPC from a future term, we immediately jump to that term and send our vote
// 2) If we are already collecting votes for the next election, and simultaneously get a request from another node to vote for them, we do NOT give them our vote
// (we've already voted for ourselves!)
// 3) if we've been offline, and wakeup and try to get votes: we get rejections, that also tell us the new term, and we immediately jump to that term as a follower
func (r *RaftNode) Vote(rv RequestVoteStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("Vote()")
}
defer r.persistState()
response.Term = r.CurrentTerm
myLastLogTerm := r.getLastLogTerm()
myLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("RequestVoteStruct: %s. \nMy node: term: %d, votedFor %d, lastLogTerm: %d, lastLogIdx: %d",
rv.String(), r.CurrentTerm, r.VotedFor, myLastLogTerm, myLastLogIdx)
}
looksEligible := r.CandidateLooksEligible(rv.LastLogIdx, rv.LastLogTerm)
if rv.Term > r.CurrentTerm {
r.shiftToFollower(rv.Term, HostID(-1)) // We do not yet know who is leader for this term
}
if rv.Term < r.CurrentTerm {
if r.verbose {
log.Println("RV from prior term - do not grant vote")
}
response.Success = false
} else if (r.VotedFor == -1 || r.VotedFor == rv.CandidateID) && looksEligible {
if r.verbose {
log.Println("Grant vote")
}
r.resetTickers()
response.Success = true
r.VotedFor = rv.CandidateID
} else {
if r.verbose {
log.Println("Do not grant vote")
}
response.Success = false
}
return nil
}
func (r *RaftNode) getLastLogIndex() LogIndex {
if len(r.Log) > 0 {
return LogIndex(len(r.Log) - 1)
}
return LogIndex(0)
}
func (r *RaftNode) getLastLogTerm() Term {
return Term(r.Log[int(r.getLastLogIndex())].Term)
}
func max(x LogIndex, y LogIndex) LogIndex {
if x > y {
return x
}
return y
}
func min(x LogIndex, y LogIndex) LogIndex |
func (r *RaftNode) QuitChan() chan bool {
return r.quitChan
}
// Start is the entrypoint for a raft node (constructed here or in a test) to begin the protocol
func (r *RaftNode) Start() {
go r.recvDaemon()
go r.protocol()
go r.quitter(duration)
<-r.quitChan
log.Println("FINISH EXPERIMENT...")
r.printResults()
}
// Main Raft protocol
func (r *RaftNode) protocol() {
r.resetTickers()
log.Printf("Begin Protocol. verbose: %t", r.verbose)
for {
select {
case m := <-r.incomingChan:
if m.response.Term > r.CurrentTerm {
if r.verbose {
log.Printf("Received reply from hostID %d with higher term: %d and leaderid: %d", m.hostID, m.response.Term, m.response.LeaderID)
}
r.shiftToFollower(m.response.Term, m.response.LeaderID)
}
switch m.msgType {
case vote:
if r.verbose {
log.Printf("processing vote reply, hostID=%d: response=%s", m.hostID, m.response.String())
}
r.Lock()
if r.state == candidate {
r.votes[m.hostID] = m.response.Success
if r.wonElection() {
r.shiftToLeader()
}
}
r.Unlock()
case appendEntries:
r.Lock()
if r.state == leader { // We might have been deposed
// Inspect response and update our tracking variables appropriately
if !m.response.Success {
// TODO - When responding to AppendEntries, the follower should return success if they do a new append, OR if they already have appended that entry
prev := r.nextIndex[m.hostID]
next := max(0, r.nextIndex[m.hostID]-1)
if r.verbose {
log.Printf("Decrement nextIndex for hostID %d from %d to %d", m.hostID, prev, next)
}
r.nextIndex[m.hostID] = next
} else {
prev := r.nextIndex[m.hostID]
next := prev + LogIndex(m.aeLength)
if r.verbose {
log.Printf("Increment nextIndex for hostID %d from %d to %d", m.hostID, prev, next)
}
r.matchIndex[m.hostID] = prev
r.nextIndex[m.hostID] = next
}
r.executeLog()
}
r.Unlock()
default:
panic(fmt.Sprintf("invalid msg type. %s", m.String()))
}
case <-r.heartbeatTicker.C: // Send append entries, either empty or full depending on the peer's log index
if r.state == leader {
r.Lock()
r.heartbeatAppendEntriesRPC()
r.updateCommitIndex()
r.executeLog()
r.Unlock()
}
case <-r.electionTicker.C: // Periodically time out and start a new election
if r.state == follower || r.state == candidate {
if r.verbose {
log.Println("TIMED OUT - STARTING ELECTION")
}
r.election()
}
case <-r.quitChan:
r.shutdown()
}
}
}
// Quit the protocol on a timer (to be run in separate goroutine)
func (r *RaftNode) quitter(quitTime int) {
for {
select {
case <-r.quitChan: // the node decided it should quit
return
case <-time.After(time.Duration(quitTime) * time.Second): // we decide the node should quit
r.quitChan <- true
return
}
}
}
func (r *RaftNode) resetElectionTicker() time.Duration {
var newTimeout time.Duration = selectElectionTimeout(r.id) * r.timeoutUnits
if r.verbose {
log.Printf("new election timeout: %s", newTimeout.String())
}
r.electionTicker = *time.NewTicker(newTimeout)
return newTimeout
}
func (r *RaftNode) resetHeartbeatTicker() time.Duration {
var newTimeout time.Duration = heartbeatTimeout * r.timeoutUnits
if r.verbose {
log.Printf("new heartbeat timeout: %s", newTimeout.String())
}
r.heartbeatTicker = *time.NewTicker(newTimeout)
return newTimeout
}
func (r *RaftNode) resetTickers() (time.Duration, time.Duration) {
electionTimeout := r.resetElectionTicker()
heartbeatTimeout := r.resetHeartbeatTicker()
return electionTimeout, heartbeatTimeout
}
func (r *RaftNode) shutdown() {
log.Println("RAFT NODE SHUTTING DOWN")
r.quitChan <- true
}
func init() {
flag.StringVar(&hostfile, "h", "hostfile.json", "name of hostfile")
flag.IntVar(&duration, "d", 30, "time until node shutdown")
flag.BoolVar(&verbose, "v", false, "verbose output")
rand.Seed(time.Now().UTC().UnixNano())
}
// Raft is the entrypoint function for the raft replicated state machine protocol
func Raft() {
flag.Parse()
hosts := make(HostMap)
clients := make(ClientMap)
quitChan := make(chan bool)
intID, recvPort := ResolveAllPeers(hosts, clients, hostfile, true)
id := HostID(intID)
r := NewRaftNode(id, recvPort, hosts, clients, quitChan)
log.Printf("RaftNode: %s", r.String())
r.Start()
}
| {
if x < y {
return x
}
return y
} | identifier_body |
raft.go | package raft
import (
"flag"
"fmt"
"log"
"math"
"math/rand"
"strings"
"time"
)
var hostfile string
var duration int
var verbose bool
// returns true when an agent has a majority of votes for the proposed view
func (r *RaftNode) wonElection() bool {
return haveMajority(r.votes, "ELECTION", r.verbose)
}
func haveMajority(votes map[HostID]bool, label string, verbose bool) bool {
var sb strings.Builder
nVoters := len(votes)
nReq := int(math.Floor(float64(nVoters)/2)) + 1
nFound := 0
sb.WriteString("[")
for hostID, votedYes := range votes {
if votedYes {
nFound++
}
sb.WriteString(fmt.Sprintf("|host %d, votedYes %t|", hostID, votedYes))
}
sb.WriteString("]")
if verbose {
log.Printf("Checking %s majority. nVoters: %d, nReq: %d, nFound: %d, Votes: %s", label, nVoters, nReq, nFound, sb.String())
}
return nFound >= nReq
}
// NOTE - important that for all the shiftTo...() functions, we must first set our state variable
func (r *RaftNode) shiftToFollower(t Term, leaderID HostID) {
if r.verbose {
log.Printf("############ SHIFT TO FOLLOWER, Term: %d, LeaderID: %d", t, leaderID)
}
r.state = follower
r.CurrentTerm = t
r.currentLeader = leaderID
r.nextIndex = nil
r.matchIndex = nil
r.VotedFor = -1
}
// NOTE - We only become leader by doing shiftToCandidate() and voting for ourself
// Therefore, we know who we voted for.
// We have already adjusted our currentTerm (during shiftToCandidare())
func (r *RaftNode) shiftToLeader() {
defer r.heartbeatAppendEntriesRPC() // We need to confirm leadership with all nodes
if r.verbose {
log.Printf("############ SHIFT TO LEADER. Term: %d", r.CurrentTerm)
}
r.state = leader
r.currentLeader = r.id
// Reset leader volatile state
r.nextIndex = make(map[HostID]LogIndex)
r.matchIndex = make(map[HostID]LogIndex)
for peerID := range r.hosts {
r.nextIndex[peerID] = r.getLastLogIndex() + 1
r.matchIndex[peerID] = LogIndex(0)
}
}
func (r *RaftNode) election() {
r.shiftToCandidate()
r.multiRequestVoteRPC()
}
func (r *RaftNode) shiftToCandidate() {
r.resetTickers()
if r.verbose {
log.Println("############ SHIFT TO CANDIDATE")
}
r.votes = make(electionResults)
r.votes[r.id] = true
for hostID := range r.hosts {
if hostID != r.id {
r.votes[hostID] = false
}
}
r.state = candidate
r.CurrentTerm++
r.VotedFor = r.id
}
// StoreClientData allows a client to send data to the raft cluster via RPC for storage
// We fill the reply struct with "success = true" if we are leader and store the data successfully.
// If we are not leader, we will reply with the id of another node, and the client
// must detect this and retry at that node.
// If we do not know or do not yet have a leader, we will reply with leader = -1 and
// client may choose to retry at us or another random node.
// TODO - need a version of StoreClientData that ensures some form of commitment after leader responds to a message?
func (r *RaftNode) StoreClientData(cd ClientDataStruct, response *ClientResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("############ StoreClientData()")
}
// NOTE - if we do not yet know leader, client will see response.leader = -1.
// They should wait before recontact, and may recontact us or another random node
defer r.persistState()
defer r.executeLog()
response.Leader = r.currentLeader
response.Success = false // by default, assume we will fail
if r.state != leader {
return nil
}
// Try to short-circuit based on the client serial num
if haveNewer, prevReply := r.haveNewerSerialNum(cd.ClientID, cd.ClientSerialNum); haveNewer {
response.Success = prevReply.Success
// response.leader = prevReply.leader
// NOTE - we do not want to notify about the previous leader, because if it is not us, the client will
// just get confused and contact the wrong node next time
// this situation only arises if the client's previous attempt was partially successful, but leader crashed before replying
return nil
}
// We are the leader and this is a new entry. Attempt to replicate this to all peer logs
response.Success = true
entry := LogEntry{
Term: r.CurrentTerm,
ClientData: cd.Data,
ClientID: cd.ClientID,
ClientSerialNum: cd.ClientSerialNum,
ClientResponse: ClientResponse{
Success: response.Success,
Leader: r.id}}
r.append(entry)
go r.heartbeatAppendEntriesRPC()
return nil
}
// After sending updates to other nodes, we try to advance our commitIndex
// At the end, we try to execute log
func (r *RaftNode) updateCommitIndex() {
// If there exists an N such that:
// 1) N > commitIndex,
// 2) a majority of matchIndex[i] >= N, and
// 3) log[N].term == currentTerm
// Then:
// set commitIndex = N
for n := r.commitIndex + 1; n <= r.getLastLogIndex(); n++ {
if r.Log[n].Term != r.CurrentTerm {
if r.verbose {
log.Printf("commitIndex %d ineligible because of log entry %s", n, r.Log[n].String())
}
continue
}
peersAtThisLevel := make(map[HostID]bool)
for hostID := range r.hosts {
if hostID == r.id {
peersAtThisLevel[hostID] = true
} else {
peersAtThisLevel[hostID] = r.matchIndex[hostID] >= n
}
}
if haveMajority(peersAtThisLevel, "COMMIT IDX", r.verbose) {
r.commitIndex = n
}
}
}
// Based on our commit index, apply any log entries that are ready for commit
// This function should be idempotent and safe to apply often.
func (r *RaftNode) executeLog() {
for r.commitIndex > r.lastApplied {
r.lastApplied++
r.StateMachine.apply(r.Log[r.lastApplied])
}
}
// AppendEntries is called by RPC from the leader to modify the log of a follower.
// TODO - some amount of duplicated logic in AppendEntries() and Vote()
// Returns false if entries were rejected, or true if accepted
func (r *RaftNode) AppendEntries(ae AppendEntriesStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Printf("AppendEntries(). ae: %s", ae.String())
log.Printf("My log: %s", r.Log.String())
}
response.Term = r.CurrentTerm
if ae.LeaderID == r.currentLeader {
if r.verbose {
log.Println("AppendEntries from leader - reset tickers")
}
r.resetTickers()
}
// Reply false if term < currentTerm
if ae.Term < r.CurrentTerm {
if r.verbose {
log.Println("AE from stale term")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// NOTE - shifting to follower each time might sound misleading, but keeps things uniform
r.shiftToFollower(ae.Term, ae.LeaderID)
// Reply false if log doesn't contain an entry at prevLogIndex whose term matches prevLogTerm
if int(ae.PrevLogIndex) >= len(r.Log) || // index out-of-bounds
r.Log[ae.PrevLogIndex].Term != ae.PrevLogTerm {
if r.verbose {
log.Println("my PrevLogTerm does not match theirs")
}
response.Term = r.CurrentTerm
response.Success = false
return nil
}
// If an existing entry conflicts with a new one (same index, but different terms),
// delete the existing entry and all that follow it
if r.verbose {
log.Println("Applying entries...")
}
offset := int(ae.PrevLogIndex) + 1
for i, entry := range ae.Entries {
if i+offset >= len(r.Log) { // We certainly have no conflict
if r.verbose {
log.Printf("Apply without conflict: index=%d", i+offset)
}
r.append(entry)
} else {
if r.Log[i+offset].Term != ae.Entries[i].Term { // We have conflicting entry
if r.verbose {
log.Printf("Conflict - delete suffix! (we have term=%d, they have term=%d). Delete our log from index=%d onwards.", r.Log[i+offset].Term, ae.Entries[i].Term, i+offset)
}
r.Log = r.Log[:i+offset] // delete the existing entry and all that follow it
r.append(entry) // append the current entry
log.Printf("\n\nLog: %s\n\n", stringOneLog(r.Log))
} else if r.Log[i+offset] != entry {
log.Printf("\nOURS: %s\n\nTHEIRS: %s", r.Log[i+offset].String(), entry.String())
panic("log safety violation occurred somewhere")
}
}
}
response.Success = true
lastIndex := r.getLastLogIndex()
// Now we need to decide how to set our local commit index
if ae.LeaderCommit > r.commitIndex {
r.commitIndex = min(lastIndex, ae.LeaderCommit)
}
r.executeLog()
r.persistState()
return nil
}
// CandidateLooksEligible allows a raft node to decide whether another host's log is sufficiently up-to-date to become leader
// Returns true if the incoming RequestVote shows that the peer is at least as up-to-date as we are
// See paper section 5.4
func (r *RaftNode) CandidateLooksEligible(candLastLogIdx LogIndex, candLastLogTerm Term) bool {
ourLastLogTerm := r.getLastLogTerm()
ourLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("We have: lastLogTerm=%d, lastLogIdx=%d. They have: lastLogTerm=%d, lastLogIdx=%d", ourLastLogTerm, ourLastLogIdx, candLastLogTerm, candLastLogIdx)
}
if ourLastLogTerm == candLastLogTerm {
return candLastLogIdx >= ourLastLogIdx
}
return candLastLogTerm >= ourLastLogTerm
}
// Vote is called by RPC from a candidate. We can observe the following from the raft.github.io simulation:
// 1) If we get a requestVoteRPC from a future term, we immediately jump to that term and send our vote
// 2) If we are already collecting votes for the next election, and simultaneously get a request from another node to vote for them, we do NOT give them our vote
// (we've already voted for ourselves!)
// 3) if we've been offline, and wakeup and try to get votes: we get rejections, that also tell us the new term, and we immediately jump to that term as a follower
func (r *RaftNode) Vote(rv RequestVoteStruct, response *RPCResponse) error {
r.Lock()
defer r.Unlock()
if r.verbose {
log.Println("Vote()")
}
defer r.persistState()
response.Term = r.CurrentTerm
myLastLogTerm := r.getLastLogTerm()
myLastLogIdx := r.getLastLogIndex()
if r.verbose {
log.Printf("RequestVoteStruct: %s. \nMy node: term: %d, votedFor %d, lastLogTerm: %d, lastLogIdx: %d",
rv.String(), r.CurrentTerm, r.VotedFor, myLastLogTerm, myLastLogIdx)
}
looksEligible := r.CandidateLooksEligible(rv.LastLogIdx, rv.LastLogTerm)
if rv.Term > r.CurrentTerm {
r.shiftToFollower(rv.Term, HostID(-1)) // We do not yet know who is leader for this term
}
if rv.Term < r.CurrentTerm {
if r.verbose {
log.Println("RV from prior term - do not grant vote")
}
response.Success = false
} else if (r.VotedFor == -1 || r.VotedFor == rv.CandidateID) && looksEligible {
if r.verbose {
log.Println("Grant vote")
}
r.resetTickers()
response.Success = true
r.VotedFor = rv.CandidateID
} else {
if r.verbose {
log.Println("Do not grant vote")
}
response.Success = false
}
return nil
}
func (r *RaftNode) getLastLogIndex() LogIndex {
if len(r.Log) > 0 {
return LogIndex(len(r.Log) - 1)
}
return LogIndex(0)
}
func (r *RaftNode) getLastLogTerm() Term {
return Term(r.Log[int(r.getLastLogIndex())].Term)
}
func max(x LogIndex, y LogIndex) LogIndex {
if x > y |
return y
}
func min(x LogIndex, y LogIndex) LogIndex {
if x < y {
return x
}
return y
}
func (r *RaftNode) QuitChan() chan bool {
return r.quitChan
}
// Start is the entrypoint for a raft node (constructed here or in a test) to begin the protocol
func (r *RaftNode) Start() {
go r.recvDaemon()
go r.protocol()
go r.quitter(duration)
<-r.quitChan
log.Println("FINISH EXPERIMENT...")
r.printResults()
}
// Main Raft protocol
func (r *RaftNode) protocol() {
r.resetTickers()
log.Printf("Begin Protocol. verbose: %t", r.verbose)
for {
select {
case m := <-r.incomingChan:
if m.response.Term > r.CurrentTerm {
if r.verbose {
log.Printf("Received reply from hostID %d with higher term: %d and leaderid: %d", m.hostID, m.response.Term, m.response.LeaderID)
}
r.shiftToFollower(m.response.Term, m.response.LeaderID)
}
switch m.msgType {
case vote:
if r.verbose {
log.Printf("processing vote reply, hostID=%d: response=%s", m.hostID, m.response.String())
}
r.Lock()
if r.state == candidate {
r.votes[m.hostID] = m.response.Success
if r.wonElection() {
r.shiftToLeader()
}
}
r.Unlock()
case appendEntries:
r.Lock()
if r.state == leader { // We might have been deposed
// Inspect response and update our tracking variables appropriately
if !m.response.Success {
// TODO - When responding to AppendEntries, the follower should return success if they do a new append, OR if they already have appended that entry
prev := r.nextIndex[m.hostID]
next := max(0, r.nextIndex[m.hostID]-1)
if r.verbose {
log.Printf("Decrement nextIndex for hostID %d from %d to %d", m.hostID, prev, next)
}
r.nextIndex[m.hostID] = next
} else {
prev := r.nextIndex[m.hostID]
next := prev + LogIndex(m.aeLength)
if r.verbose {
log.Printf("Increment nextIndex for hostID %d from %d to %d", m.hostID, prev, next)
}
r.matchIndex[m.hostID] = prev
r.nextIndex[m.hostID] = next
}
r.executeLog()
}
r.Unlock()
default:
panic(fmt.Sprintf("invalid msg type. %s", m.String()))
}
case <-r.heartbeatTicker.C: // Send append entries, either empty or full depending on the peer's log index
if r.state == leader {
r.Lock()
r.heartbeatAppendEntriesRPC()
r.updateCommitIndex()
r.executeLog()
r.Unlock()
}
case <-r.electionTicker.C: // Periodically time out and start a new election
if r.state == follower || r.state == candidate {
if r.verbose {
log.Println("TIMED OUT - STARTING ELECTION")
}
r.election()
}
case <-r.quitChan:
r.shutdown()
}
}
}
// Quit the protocol on a timer (to be run in separate goroutine)
func (r *RaftNode) quitter(quitTime int) {
for {
select {
case <-r.quitChan: // the node decided it should quit
return
case <-time.After(time.Duration(quitTime) * time.Second): // we decide the node should quit
r.quitChan <- true
return
}
}
}
func (r *RaftNode) resetElectionTicker() time.Duration {
var newTimeout time.Duration = selectElectionTimeout(r.id) * r.timeoutUnits
if r.verbose {
log.Printf("new election timeout: %s", newTimeout.String())
}
r.electionTicker = *time.NewTicker(newTimeout)
return newTimeout
}
func (r *RaftNode) resetHeartbeatTicker() time.Duration {
var newTimeout time.Duration = heartbeatTimeout * r.timeoutUnits
if r.verbose {
log.Printf("new heartbeat timeout: %s", newTimeout.String())
}
r.heartbeatTicker = *time.NewTicker(newTimeout)
return newTimeout
}
func (r *RaftNode) resetTickers() (time.Duration, time.Duration) {
electionTimeout := r.resetElectionTicker()
heartbeatTimeout := r.resetHeartbeatTicker()
return electionTimeout, heartbeatTimeout
}
func (r *RaftNode) shutdown() {
log.Println("RAFT NODE SHUTTING DOWN")
r.quitChan <- true
}
func init() {
flag.StringVar(&hostfile, "h", "hostfile.json", "name of hostfile")
flag.IntVar(&duration, "d", 30, "time until node shutdown")
flag.BoolVar(&verbose, "v", false, "verbose output")
rand.Seed(time.Now().UTC().UnixNano())
}
// Raft is the entrypoint function for the raft replicated state machine protocol
func Raft() {
flag.Parse()
hosts := make(HostMap)
clients := make(ClientMap)
quitChan := make(chan bool)
intID, recvPort := ResolveAllPeers(hosts, clients, hostfile, true)
id := HostID(intID)
r := NewRaftNode(id, recvPort, hosts, clients, quitChan)
log.Printf("RaftNode: %s", r.String())
r.Start()
}
| {
return x
} | conditional_block |
svgfrags.py | #!/usr/bin/python
# -*- coding: iso-8859-2 -*-
# $Id: svgfrags.py,v 1.9 2007-03-13 20:55:37 wojtek Exp $
#
# SVGfrags - main program
#
# license: BSD
#
# author: Wojciech Muła
# e-mail: wojciech_mula@poczta.onet.pl
# WWW : http://0x80.pl/
"""
13.03.2007
- syntax chenges:
* keyword "this" as source
- use frags.get_text, frags.get_anchor
- + cleanup
- + traceback
12.03.2007
- use new parser (frags/parser.py & frags/parse_subst.py)
- syntax changes:
* removed 'settowidth' & 'settoheight' (now can be expressed with 'scale')
* removed 'fit' (now 'scale' option)
* added ('length' num) to scale
10.03.2007
- share same TeX expression
- id based on file timestamp & string hash (to reasume purposes)
- keep old DVI & TeX fles
- EquationsManager updated (SVGGfxDocument was changed)
- colors inherit from text nodes
- TeX-object space margin support
- options parse
9.03.2007
- parser
- clean up
8.03.2007
- early tests
"""
import sys, os, atexit
import logging
import xml.dom.minidom
import setup
import frags
import dvi2svg
from conv import utils
from conv import fontsel
from conv import dviparser
from conv.findfile import which
from conv.binfile import binfile
DEBUG = False
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('SVGfrags')
class EquationsManager(dvi2svg.SVGGfxDocument):
def __init__(self, doc, mag, scale, unit_mm):
super(EquationsManager, self).__init__(mag, scale, unit_mm, (0,0))
self.document = doc
self.svg = self.document.documentElement
def new_page(self):
self.chars = []
self.rules = []
self.lastpage = None
self.lastbbox = None
pass
def eop(self):
scale2str = self.scale2str
coord2str = self.coord2str
g = self.document.createElement('g')
self.lastpage = g
self.lastbbox = self.get_page_bbox()
for element in self.flush_rules() + self.flush_chars():
g.appendChild(element)
# (DEBUG)
if DEBUG:
xmin, ymin, xmax, ymax = self.lastbbox
r = self.document.createElement('rect')
r.setAttribute('x', str(xmin))
r.setAttribute('y', str(ymin))
r.setAttribute('width', str(xmax - xmin))
r.setAttribute('height', str(ymax - ymin))
r.setAttribute('fill', 'none')
r.setAttribute('stroke', 'red')
r.setAttribute('stroke-width', '0.25')
g.appendChild(r)
#for
def save(self, filename):
defs = self.document.getElementsByTagName('defs')[0]
for element in self.flush_glyphs():
defs.appendChild(element)
# save file
f = open(filename, 'wb')
if setup.options.prettyXML:
f.write(self.document.toprettyxml())
else:
f.write(self.document.toxml())
f.close()
def main(args):
from frags.cmdopts import parse_args
(setup.options, args) = parse_args(args)
# fixed options
setup.options.use_bbox = True
setup.options.prettyXML = False
input_txt = setup.options.input_txt
input_svg = setup.options.input_svg
output_svg = setup.options.output_svg
if not input_txt:
log.error("Rules file not provided, use switch -r or --rules")
sys.exit(1)
elif not os.path.exists(input_txt):
log.error("Rules file '%s' don't exist", input_txt)
sys.exit(1)
if not input_svg:
log.error("Input SVG file not provided, use switch -i or --input")
sys.exit(1)
elif not os.path.exists(input_svg):
log.error("Input SVG file '%s' don't exist", input_svg)
sys.exit(1)
if not output_svg:
log.error("Output SVG file not provided, use switch -i or --output")
sys.exit(1)
elif os.path.exists(output_svg) and not setup.options.frags_overwrite_file:
log.error("File %s already exists, and cannot be overwritten. Use switch -f or --force-overwrite to change this behaviour.", output_svg)
sys.exit(1)
# 1. Load SVG file
XML = xml.dom.minidom.parse(input_svg)
# 1.1. Create 'defs' tag (if doesn't exists), and add xlink namespace
if not XML.getElementsByTagName('defs'):
XML.documentElement.insertBefore(
XML.createElement('defs'),
XML.documentElement.firstChild
)
if not XML.documentElement.getAttribute('xmlns:xlink'):
XML.documentElement.setAttribute('xmlns:xlink', "http://www.w3.org/1999/xlink")
if True:
# XXX: hack; for unknown reason expat do not read id attribute
# and getElementById always fails
ID = {}
frags.collect_Id(XML, ID)
def my_getElementById(id):
try:
return ID[id]
except KeyError:
return None
XML.getElementById = my_getElementById
# 1.2. find all text objects
text_objects = {} # text -> node
for node in XML.getElementsByTagName('text'):
try:
text = frags.get_text(node, setup.options.frags_strip)
# add to list
if text in text_objects:
text_objects[text].append(node)
else:
text_objects[text] = [node]
except ValueError:
pass
#for
# 2. Load & parse replace pairs
input = open(input_txt, 'r').read()
from frags.parse_subst import parse
repl_defs = frags.Dict() # valid defs
text_nodes = set() # text nodes to remove/hide
try:
for item in parse(input):
((kind, value), tex, options) = item
if tex is None: # i.e. "this"
if kind == 'string':
if setup.options.frags_strip:
tex = value.strip()
else:
tex = value
elif kind == 'id':
node = XML.getElementById(value[1:])
if frags.istextnode(node):
tex = frags.get_text(node)
if tex is None:
log.error("Keyword 'this' is not allowed for rect/points object")
continue
if kind == 'string':
if setup.options.frags_strip:
value = value.strip()
try:
for node in text_objects[value]:
text_nodes.add(node)
repl_defs[tex] = ((kind, node), tex, options)
except KeyError:
log.warning("String '%s' doesn't found in SVG, skipping repl", value)
elif kind == 'id':
object = XML.getElementById(value[1:])
if object:
# "forget" id, save object
if object.nodeName in ['rect', 'ellipse', 'circle']:
repl_defs[tex] = ((kind, object), tex, options)
elif object.nodeName == 'text':
repl_defs[tex] = (('string', object), tex, options)
else:
log.warning("Object with id=%s is not text, rect, ellipse nor circle - skipping repl", value)
else:
log.warning("Object with id=%s doesn't found in SVG, skipping repl", value)
else: # point, rect -- no additional tests needed
repl_defs[tex] = ((kind, value), tex, options)
except frags.parse_subst.SyntaxError, e:
log.error("Syntax error: %s", str(e))
sys.exit(1)
if not repl_defs:
log.info("No rules - bye.")
sys.exit()
# make tmp name based on hash input & timestamp of input_txt file
tmp_filename = "svgfrags-%08x-%08x" % (
hash(input) & 0xffffffff,
os.path.getmtime(input_txt)
)
atexit.register(cleanup, tmp_filename)
if not os.path.exists(tmp_filename + ".dvi"):
# 3. prepare LaTeX source
tmp_lines = [
'\\batchmode',
'\\documentclass{article}',
'\\pagestyle{empty}'
'\\begin{document}',
]
for tex in repl_defs:
tmp_lines.append(tex) # each TeX expression at new page
tmp_lines.append("\\newpage")
# 4. write & compile TeX source
tmp_lines.append("\end{document}")
tmp = open(tmp_filename + '.tex', 'w')
for line in tmp_lines:
tmp.write(line + "\n")
tmp.close()
if which('latex'):
exitstatus = os.system("latex %s.tex > /dev/null" % tmp_filename)
if exitstatus:
log.error("LaTeX failed - error code %d; check log file '%s.log'", exitstatus, tmp_filename)
sys.exit(2)
else:
log.error("Program 'latex' isn't avaialable.")
sys.exit(3)
else:
log.info("File %s not changed, used existing DVI file (%s)", input_txt, tmp_filename)
# 5. Load DVI
dvi = binfile(tmp_filename + ".dvi", 'rb')
comment, (num, den, mag, u, l), page_offset, fonts = dviparser.dviinfo(dvi)
unit_mm = num/(den*10000.0)
scale = unit_mm * 72.27/25.4
mag = mag/1000.0
# 6. Preload fonts used in DVI & other stuff
fontsel.preload()
missing = []
for k in fonts:
_, s, d, fontname = fonts[k]
log.debug("Font %s=%s" % (k, fontname))
#print "Font %s=%s" % (k, fontname)
try:
fontsel.create_DVI_font(fontname, k, s, d, setup.options.enc_methods)
except fontsel.FontError, e:
log.error("Can't find font '%s': %s" % (fontname, str(e)))
missing.append((k, fontname))
if missing:
log.error("There were some unavailable fonts; list of missing fonts: %s" % (dvi.name, ", ".join("%d=%s" % kf for kf in missing)))
sys.exit(1)
# 7. Substitute
eq_id_n = 0
# helper functions
def get_width(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_width(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
def get_height(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_height(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
SVG = EquationsManager(XML, 1.25 * mag, scale, unit_mm)
for pageno, items in enumerate(repl_defs.values()):
dvi.seek(page_offset[pageno])
SVG.new_page()
dvi2svg.convert_page(dvi, SVG)
assert SVG.lastpage is not None, "Fatal error!"
assert SVG.lastbbox is not None, "Fatal error!"
if len(items) > 1:
# there are more then one referenco to this TeX object, so
# we have to **define** it, and then reference to, with <use>
eq_id = 'svgfrags-%x' % eq_id_n
eq_id_n += 1
SVG.lastpage.setAttribute('id', eq_id)
XML.getElementsByTagName('defs')[0].appendChild(SVG.lastpage)
else:
# just one reference, use node crated by SVGDocument
equation = SVG.lastpage
eq_id = None
# process
for ((kind, value), tex, options) in items:
px, py = options.position
if px == 'inherit':
if frags.istextnode(value):
px = frags.get_anchor(value)
else:
px = 0.0
# bounding box of equation
(xmin, ymin, xmax, ymax) = SVG.lastbbox
# enlarge with margin values
xmin -= options.margin[0]
xmax += options.margin[1]
ymin -= options.margin[2]
ymax += options.margin[3]
# and calculate bbox's dimensions
dx = xmax - xmin
dy = ymax - ymin
if eq_id is not None:
# more then one reference, create new node <use>
equation = XML.createElement('use')
equation.setAttributeNS('xlink', 'xlink:href', '#'+eq_id)
def put_equation(x, y, sx, sy):
# calculate desired point in equation BBox
xo = xmin + (xmax - xmin)*px
yo = ymin + (ymax - ymin)*py
# move (xo,yo) to (x,y)
if sx == sy:
equation.setAttribute(
'transform',
('translate(%s,%s)' % (SVG.c2s(x), SVG.c2s(y))) + \
('scale(%s)' % SVG.s2s(sx)) + \
('translate(%s,%s)' % (SVG.c2s(-xo), SVG.c2s(-yo)))
)
else:
equation.setAttribute(
'transform',
('translate(%s,%s)' % (SVG.c2s(x), SVG.c2s(y))) + \
('scale(%s,%s)' % (SVG.s2s(sx), SVG.s2s(sy))) + \
('translate(%s,%s)' % (SVG.c2s(-xo), SVG.c2s(-yo)))
)
return equation
# string or text object
if kind == 'string':
object = value
if options.scale == 'fit':
log.warning("%s is a text object, can't fit to rectangle", value)
sx = sy = 1.0
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
# get <text> object coords
x = frags.safe_float(object.getAttribute('x'))
y = frags.safe_float(object.getAttribute('y'))
# (DEBUG)
if DEBUG:
c = XML.createElement("circle")
c.setAttribute("cx", str(x))
c.setAttribute("cy", str(y))
c.setAttribute("r", "3")
c.setAttribute("fill", 'red')
object.parentNode.insertBefore(c, object)
put_equation(x, y, sx, sy)
# copy fill color from text node
fill = object.getAttribute('fill') or \
frags.CSS_value(object, 'fill')
if fill:
equation.setAttribute('fill', fill)
# insert equation into XML tree
object.parentNode.insertBefore(equation, object)
# explicity given point
elif kind == 'point':
if options.scale == 'fit':
log.warning("%s is a text object, can't fit to rectangle", value)
sx = sy = 1.0
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
# insert equation into XML tree
x, y = value
XML.documentElement.appendChild(
put_equation(x, y, sx, sy)
)
# rectangle or object with known bbox
elif kind == 'id' or kind == 'rect':
# get bounding box
if kind == 'rect':
Xmin, Ymin, Xmax, Ymax = value # rect
else:
Xmin, Ymin, Xmax, Ymax = frags.get_bbox(value) # object
DX = Xmax - Xmin
DY = Ymax - Ymin
# reference point
x = Xmin + (Xmax - Xmin)*px
y = Ymin + (Ymax - Ymin)*py
# and set default scale
sx = 1.0
sy = 1.0
# Fit in rectangle
if options.scale == 'fit':
tmp_x = DX/(xmax - xmin)
tmp_y = DY/(ymax - ymin)
if tmp_x < tmp_y:
sx = sy = tmp_x
else:
sx = sy = tmp_y
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this':
sx = DX/dx
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this':
sx = DX/dx
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this':
sy = DY/dy
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this':
sy = DY/dy
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
#endif
# move&scale equation
put_equation(x, y, sx, sy)
# and append to XML tree
if kind == 'rect':
XML.documentElement.appendChild(equation)
else: # kind == 'id'
# in case of existing object, place them
# just "above" them
pn = value.parentNode
if value == pn.lastChild:
pn.appendChild(equation)
else:
pn.insertBefore(equation, value.nextSibling)
#for
# 9. modify replaced <text> nodes according to options
if setup.options.frags_removetext: # remove nodes
for node in text_nodes:
node.parentNode.removeChild(node)
elif setup.options.frags_hidetext: # hide nodes
for node in text_nodes:
node.setAttribute('display', 'none')
SVG.save(output_svg)
def cleanup(tmp_filename):
" |
if __name__ == '__main__':
import traceback
try:
main(sys.argv)
except SystemExit, (code):
sys.exit(code)
except:
if setup.options.print_traceback:
traceback.print_exc(file=sys.stderr)
else:
exception, instance, _ = sys.exc_info()
print >> sys.stderr, "Unexpeced error - %s: %s" % (exception.__name__, str(instance))
# vim: ts=4 sw=4 nowrap
| remove temporary files"
extensions = ['.aux', '.log']
if not setup.options.frags_keeptex:
extensions.append('.tex')
if not setup.options.frags_keepdvi:
extensions.append('.dvi')
for ext in extensions:
frags.remove_file(tmp_filename + ext)
| identifier_body |
svgfrags.py | #!/usr/bin/python
# -*- coding: iso-8859-2 -*-
# $Id: svgfrags.py,v 1.9 2007-03-13 20:55:37 wojtek Exp $
#
# SVGfrags - main program
#
# license: BSD
#
# author: Wojciech Muła
# e-mail: wojciech_mula@poczta.onet.pl
# WWW : http://0x80.pl/
"""
13.03.2007
- syntax chenges:
* keyword "this" as source
- use frags.get_text, frags.get_anchor
- + cleanup
- + traceback
12.03.2007
- use new parser (frags/parser.py & frags/parse_subst.py)
- syntax changes:
* removed 'settowidth' & 'settoheight' (now can be expressed with 'scale')
* removed 'fit' (now 'scale' option)
* added ('length' num) to scale
10.03.2007
- share same TeX expression
- id based on file timestamp & string hash (to reasume purposes)
- keep old DVI & TeX fles
- EquationsManager updated (SVGGfxDocument was changed)
- colors inherit from text nodes
- TeX-object space margin support
- options parse
9.03.2007
- parser
- clean up
8.03.2007
- early tests
"""
import sys, os, atexit
import logging
import xml.dom.minidom
import setup
import frags
import dvi2svg
from conv import utils
from conv import fontsel
from conv import dviparser
from conv.findfile import which
from conv.binfile import binfile
DEBUG = False
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('SVGfrags')
class EquationsManager(dvi2svg.SVGGfxDocument):
def __init__(self, doc, mag, scale, unit_mm):
super(EquationsManager, self).__init__(mag, scale, unit_mm, (0,0))
self.document = doc
self.svg = self.document.documentElement
def new_page(self):
self.chars = []
self.rules = []
self.lastpage = None
self.lastbbox = None
pass
def eop(self):
scale2str = self.scale2str
coord2str = self.coord2str
g = self.document.createElement('g')
self.lastpage = g
self.lastbbox = self.get_page_bbox()
for element in self.flush_rules() + self.flush_chars():
g.appendChild(element)
# (DEBUG)
if DEBUG:
xmin, ymin, xmax, ymax = self.lastbbox
r = self.document.createElement('rect')
r.setAttribute('x', str(xmin))
r.setAttribute('y', str(ymin))
r.setAttribute('width', str(xmax - xmin))
r.setAttribute('height', str(ymax - ymin))
r.setAttribute('fill', 'none')
r.setAttribute('stroke', 'red')
r.setAttribute('stroke-width', '0.25')
g.appendChild(r)
#for
def save(self, filename):
defs = self.document.getElementsByTagName('defs')[0]
for element in self.flush_glyphs():
defs.appendChild(element)
# save file
f = open(filename, 'wb')
if setup.options.prettyXML:
f.write(self.document.toprettyxml())
else:
f.write(self.document.toxml())
f.close()
def main(args):
from frags.cmdopts import parse_args
(setup.options, args) = parse_args(args)
# fixed options
setup.options.use_bbox = True
setup.options.prettyXML = False
input_txt = setup.options.input_txt
input_svg = setup.options.input_svg
output_svg = setup.options.output_svg
if not input_txt:
log.error("Rules file not provided, use switch -r or --rules")
sys.exit(1)
elif not os.path.exists(input_txt):
log.error("Rules file '%s' don't exist", input_txt)
sys.exit(1)
if not input_svg:
log.error("Input SVG file not provided, use switch -i or --input")
sys.exit(1)
elif not os.path.exists(input_svg):
log.error("Input SVG file '%s' don't exist", input_svg)
sys.exit(1)
if not output_svg:
log.error("Output SVG file not provided, use switch -i or --output")
sys.exit(1)
elif os.path.exists(output_svg) and not setup.options.frags_overwrite_file:
log.error("File %s already exists, and cannot be overwritten. Use switch -f or --force-overwrite to change this behaviour.", output_svg)
sys.exit(1)
# 1. Load SVG file
XML = xml.dom.minidom.parse(input_svg)
# 1.1. Create 'defs' tag (if doesn't exists), and add xlink namespace
if not XML.getElementsByTagName('defs'):
XML.documentElement.insertBefore(
XML.createElement('defs'),
XML.documentElement.firstChild
)
if not XML.documentElement.getAttribute('xmlns:xlink'):
XML.documentElement.setAttribute('xmlns:xlink', "http://www.w3.org/1999/xlink")
if True:
# XXX: hack; for unknown reason expat do not read id attribute
# and getElementById always fails
ID = {}
frags.collect_Id(XML, ID)
def my_getElementById(id):
try:
return ID[id]
except KeyError:
return None
XML.getElementById = my_getElementById
# 1.2. find all text objects
text_objects = {} # text -> node
for node in XML.getElementsByTagName('text'):
try:
text = frags.get_text(node, setup.options.frags_strip)
# add to list
if text in text_objects:
text_objects[text].append(node)
else:
text_objects[text] = [node]
except ValueError:
pass
#for
# 2. Load & parse replace pairs
input = open(input_txt, 'r').read()
from frags.parse_subst import parse
repl_defs = frags.Dict() # valid defs
text_nodes = set() # text nodes to remove/hide
try:
for item in parse(input):
((kind, value), tex, options) = item
if tex is None: # i.e. "this"
if kind == 'string':
if setup.options.frags_strip:
tex = value.strip()
else:
tex = value
elif kind == 'id':
node = XML.getElementById(value[1:])
if frags.istextnode(node):
tex = frags.get_text(node)
if tex is None:
log.error("Keyword 'this' is not allowed for rect/points object")
continue
if kind == 'string':
if setup.options.frags_strip:
value = value.strip()
try:
for node in text_objects[value]:
text_nodes.add(node)
repl_defs[tex] = ((kind, node), tex, options)
except KeyError:
log.warning("String '%s' doesn't found in SVG, skipping repl", value)
elif kind == 'id':
object = XML.getElementById(value[1:])
if object:
# "forget" id, save object
if object.nodeName in ['rect', 'ellipse', 'circle']:
repl_defs[tex] = ((kind, object), tex, options)
elif object.nodeName == 'text':
repl_defs[tex] = (('string', object), tex, options)
else:
log.warning("Object with id=%s is not text, rect, ellipse nor circle - skipping repl", value)
else:
log.warning("Object with id=%s doesn't found in SVG, skipping repl", value)
else: # point, rect -- no additional tests needed
repl_defs[tex] = ((kind, value), tex, options)
except frags.parse_subst.SyntaxError, e:
log.error("Syntax error: %s", str(e))
sys.exit(1)
if not repl_defs:
log.info("No rules - bye.")
sys.exit()
# make tmp name based on hash input & timestamp of input_txt file
tmp_filename = "svgfrags-%08x-%08x" % (
hash(input) & 0xffffffff,
os.path.getmtime(input_txt)
)
atexit.register(cleanup, tmp_filename)
if not os.path.exists(tmp_filename + ".dvi"):
# 3. prepare LaTeX source
tmp_lines = [
'\\batchmode',
'\\documentclass{article}',
'\\pagestyle{empty}'
'\\begin{document}',
]
for tex in repl_defs:
tmp_lines.append(tex) # each TeX expression at new page
tmp_lines.append("\\newpage")
# 4. write & compile TeX source
tmp_lines.append("\end{document}")
tmp = open(tmp_filename + '.tex', 'w')
for line in tmp_lines: | exitstatus = os.system("latex %s.tex > /dev/null" % tmp_filename)
if exitstatus:
log.error("LaTeX failed - error code %d; check log file '%s.log'", exitstatus, tmp_filename)
sys.exit(2)
else:
log.error("Program 'latex' isn't avaialable.")
sys.exit(3)
else:
log.info("File %s not changed, used existing DVI file (%s)", input_txt, tmp_filename)
# 5. Load DVI
dvi = binfile(tmp_filename + ".dvi", 'rb')
comment, (num, den, mag, u, l), page_offset, fonts = dviparser.dviinfo(dvi)
unit_mm = num/(den*10000.0)
scale = unit_mm * 72.27/25.4
mag = mag/1000.0
# 6. Preload fonts used in DVI & other stuff
fontsel.preload()
missing = []
for k in fonts:
_, s, d, fontname = fonts[k]
log.debug("Font %s=%s" % (k, fontname))
#print "Font %s=%s" % (k, fontname)
try:
fontsel.create_DVI_font(fontname, k, s, d, setup.options.enc_methods)
except fontsel.FontError, e:
log.error("Can't find font '%s': %s" % (fontname, str(e)))
missing.append((k, fontname))
if missing:
log.error("There were some unavailable fonts; list of missing fonts: %s" % (dvi.name, ", ".join("%d=%s" % kf for kf in missing)))
sys.exit(1)
# 7. Substitute
eq_id_n = 0
# helper functions
def get_width(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_width(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
def get_height(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_height(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
SVG = EquationsManager(XML, 1.25 * mag, scale, unit_mm)
for pageno, items in enumerate(repl_defs.values()):
dvi.seek(page_offset[pageno])
SVG.new_page()
dvi2svg.convert_page(dvi, SVG)
assert SVG.lastpage is not None, "Fatal error!"
assert SVG.lastbbox is not None, "Fatal error!"
if len(items) > 1:
# there are more then one referenco to this TeX object, so
# we have to **define** it, and then reference to, with <use>
eq_id = 'svgfrags-%x' % eq_id_n
eq_id_n += 1
SVG.lastpage.setAttribute('id', eq_id)
XML.getElementsByTagName('defs')[0].appendChild(SVG.lastpage)
else:
# just one reference, use node crated by SVGDocument
equation = SVG.lastpage
eq_id = None
# process
for ((kind, value), tex, options) in items:
px, py = options.position
if px == 'inherit':
if frags.istextnode(value):
px = frags.get_anchor(value)
else:
px = 0.0
# bounding box of equation
(xmin, ymin, xmax, ymax) = SVG.lastbbox
# enlarge with margin values
xmin -= options.margin[0]
xmax += options.margin[1]
ymin -= options.margin[2]
ymax += options.margin[3]
# and calculate bbox's dimensions
dx = xmax - xmin
dy = ymax - ymin
if eq_id is not None:
# more then one reference, create new node <use>
equation = XML.createElement('use')
equation.setAttributeNS('xlink', 'xlink:href', '#'+eq_id)
def put_equation(x, y, sx, sy):
# calculate desired point in equation BBox
xo = xmin + (xmax - xmin)*px
yo = ymin + (ymax - ymin)*py
# move (xo,yo) to (x,y)
if sx == sy:
equation.setAttribute(
'transform',
('translate(%s,%s)' % (SVG.c2s(x), SVG.c2s(y))) + \
('scale(%s)' % SVG.s2s(sx)) + \
('translate(%s,%s)' % (SVG.c2s(-xo), SVG.c2s(-yo)))
)
else:
equation.setAttribute(
'transform',
('translate(%s,%s)' % (SVG.c2s(x), SVG.c2s(y))) + \
('scale(%s,%s)' % (SVG.s2s(sx), SVG.s2s(sy))) + \
('translate(%s,%s)' % (SVG.c2s(-xo), SVG.c2s(-yo)))
)
return equation
# string or text object
if kind == 'string':
object = value
if options.scale == 'fit':
log.warning("%s is a text object, can't fit to rectangle", value)
sx = sy = 1.0
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
# get <text> object coords
x = frags.safe_float(object.getAttribute('x'))
y = frags.safe_float(object.getAttribute('y'))
# (DEBUG)
if DEBUG:
c = XML.createElement("circle")
c.setAttribute("cx", str(x))
c.setAttribute("cy", str(y))
c.setAttribute("r", "3")
c.setAttribute("fill", 'red')
object.parentNode.insertBefore(c, object)
put_equation(x, y, sx, sy)
# copy fill color from text node
fill = object.getAttribute('fill') or \
frags.CSS_value(object, 'fill')
if fill:
equation.setAttribute('fill', fill)
# insert equation into XML tree
object.parentNode.insertBefore(equation, object)
# explicity given point
elif kind == 'point':
if options.scale == 'fit':
log.warning("%s is a text object, can't fit to rectangle", value)
sx = sy = 1.0
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
# insert equation into XML tree
x, y = value
XML.documentElement.appendChild(
put_equation(x, y, sx, sy)
)
# rectangle or object with known bbox
elif kind == 'id' or kind == 'rect':
# get bounding box
if kind == 'rect':
Xmin, Ymin, Xmax, Ymax = value # rect
else:
Xmin, Ymin, Xmax, Ymax = frags.get_bbox(value) # object
DX = Xmax - Xmin
DY = Ymax - Ymin
# reference point
x = Xmin + (Xmax - Xmin)*px
y = Ymin + (Ymax - Ymin)*py
# and set default scale
sx = 1.0
sy = 1.0
# Fit in rectangle
if options.scale == 'fit':
tmp_x = DX/(xmax - xmin)
tmp_y = DY/(ymax - ymin)
if tmp_x < tmp_y:
sx = sy = tmp_x
else:
sx = sy = tmp_y
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this':
sx = DX/dx
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this':
sx = DX/dx
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this':
sy = DY/dy
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this':
sy = DY/dy
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
#endif
# move&scale equation
put_equation(x, y, sx, sy)
# and append to XML tree
if kind == 'rect':
XML.documentElement.appendChild(equation)
else: # kind == 'id'
# in case of existing object, place them
# just "above" them
pn = value.parentNode
if value == pn.lastChild:
pn.appendChild(equation)
else:
pn.insertBefore(equation, value.nextSibling)
#for
# 9. modify replaced <text> nodes according to options
if setup.options.frags_removetext: # remove nodes
for node in text_nodes:
node.parentNode.removeChild(node)
elif setup.options.frags_hidetext: # hide nodes
for node in text_nodes:
node.setAttribute('display', 'none')
SVG.save(output_svg)
def cleanup(tmp_filename):
"remove temporary files"
extensions = ['.aux', '.log']
if not setup.options.frags_keeptex:
extensions.append('.tex')
if not setup.options.frags_keepdvi:
extensions.append('.dvi')
for ext in extensions:
frags.remove_file(tmp_filename + ext)
if __name__ == '__main__':
import traceback
try:
main(sys.argv)
except SystemExit, (code):
sys.exit(code)
except:
if setup.options.print_traceback:
traceback.print_exc(file=sys.stderr)
else:
exception, instance, _ = sys.exc_info()
print >> sys.stderr, "Unexpeced error - %s: %s" % (exception.__name__, str(instance))
# vim: ts=4 sw=4 nowrap | tmp.write(line + "\n")
tmp.close()
if which('latex'): | random_line_split |
svgfrags.py | #!/usr/bin/python
# -*- coding: iso-8859-2 -*-
# $Id: svgfrags.py,v 1.9 2007-03-13 20:55:37 wojtek Exp $
#
# SVGfrags - main program
#
# license: BSD
#
# author: Wojciech Muła
# e-mail: wojciech_mula@poczta.onet.pl
# WWW : http://0x80.pl/
"""
13.03.2007
- syntax chenges:
* keyword "this" as source
- use frags.get_text, frags.get_anchor
- + cleanup
- + traceback
12.03.2007
- use new parser (frags/parser.py & frags/parse_subst.py)
- syntax changes:
* removed 'settowidth' & 'settoheight' (now can be expressed with 'scale')
* removed 'fit' (now 'scale' option)
* added ('length' num) to scale
10.03.2007
- share same TeX expression
- id based on file timestamp & string hash (to reasume purposes)
- keep old DVI & TeX fles
- EquationsManager updated (SVGGfxDocument was changed)
- colors inherit from text nodes
- TeX-object space margin support
- options parse
9.03.2007
- parser
- clean up
8.03.2007
- early tests
"""
import sys, os, atexit
import logging
import xml.dom.minidom
import setup
import frags
import dvi2svg
from conv import utils
from conv import fontsel
from conv import dviparser
from conv.findfile import which
from conv.binfile import binfile
DEBUG = False
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('SVGfrags')
class EquationsManager(dvi2svg.SVGGfxDocument):
def __init__(self, doc, mag, scale, unit_mm):
super(EquationsManager, self).__init__(mag, scale, unit_mm, (0,0))
self.document = doc
self.svg = self.document.documentElement
def new_page(self):
self.chars = []
self.rules = []
self.lastpage = None
self.lastbbox = None
pass
def eop(self):
scale2str = self.scale2str
coord2str = self.coord2str
g = self.document.createElement('g')
self.lastpage = g
self.lastbbox = self.get_page_bbox()
for element in self.flush_rules() + self.flush_chars():
g.appendChild(element)
# (DEBUG)
if DEBUG:
xmin, ymin, xmax, ymax = self.lastbbox
r = self.document.createElement('rect')
r.setAttribute('x', str(xmin))
r.setAttribute('y', str(ymin))
r.setAttribute('width', str(xmax - xmin))
r.setAttribute('height', str(ymax - ymin))
r.setAttribute('fill', 'none')
r.setAttribute('stroke', 'red')
r.setAttribute('stroke-width', '0.25')
g.appendChild(r)
#for
def s | self, filename):
defs = self.document.getElementsByTagName('defs')[0]
for element in self.flush_glyphs():
defs.appendChild(element)
# save file
f = open(filename, 'wb')
if setup.options.prettyXML:
f.write(self.document.toprettyxml())
else:
f.write(self.document.toxml())
f.close()
def main(args):
from frags.cmdopts import parse_args
(setup.options, args) = parse_args(args)
# fixed options
setup.options.use_bbox = True
setup.options.prettyXML = False
input_txt = setup.options.input_txt
input_svg = setup.options.input_svg
output_svg = setup.options.output_svg
if not input_txt:
log.error("Rules file not provided, use switch -r or --rules")
sys.exit(1)
elif not os.path.exists(input_txt):
log.error("Rules file '%s' don't exist", input_txt)
sys.exit(1)
if not input_svg:
log.error("Input SVG file not provided, use switch -i or --input")
sys.exit(1)
elif not os.path.exists(input_svg):
log.error("Input SVG file '%s' don't exist", input_svg)
sys.exit(1)
if not output_svg:
log.error("Output SVG file not provided, use switch -i or --output")
sys.exit(1)
elif os.path.exists(output_svg) and not setup.options.frags_overwrite_file:
log.error("File %s already exists, and cannot be overwritten. Use switch -f or --force-overwrite to change this behaviour.", output_svg)
sys.exit(1)
# 1. Load SVG file
XML = xml.dom.minidom.parse(input_svg)
# 1.1. Create 'defs' tag (if doesn't exists), and add xlink namespace
if not XML.getElementsByTagName('defs'):
XML.documentElement.insertBefore(
XML.createElement('defs'),
XML.documentElement.firstChild
)
if not XML.documentElement.getAttribute('xmlns:xlink'):
XML.documentElement.setAttribute('xmlns:xlink', "http://www.w3.org/1999/xlink")
if True:
# XXX: hack; for unknown reason expat do not read id attribute
# and getElementById always fails
ID = {}
frags.collect_Id(XML, ID)
def my_getElementById(id):
try:
return ID[id]
except KeyError:
return None
XML.getElementById = my_getElementById
# 1.2. find all text objects
text_objects = {} # text -> node
for node in XML.getElementsByTagName('text'):
try:
text = frags.get_text(node, setup.options.frags_strip)
# add to list
if text in text_objects:
text_objects[text].append(node)
else:
text_objects[text] = [node]
except ValueError:
pass
#for
# 2. Load & parse replace pairs
input = open(input_txt, 'r').read()
from frags.parse_subst import parse
repl_defs = frags.Dict() # valid defs
text_nodes = set() # text nodes to remove/hide
try:
for item in parse(input):
((kind, value), tex, options) = item
if tex is None: # i.e. "this"
if kind == 'string':
if setup.options.frags_strip:
tex = value.strip()
else:
tex = value
elif kind == 'id':
node = XML.getElementById(value[1:])
if frags.istextnode(node):
tex = frags.get_text(node)
if tex is None:
log.error("Keyword 'this' is not allowed for rect/points object")
continue
if kind == 'string':
if setup.options.frags_strip:
value = value.strip()
try:
for node in text_objects[value]:
text_nodes.add(node)
repl_defs[tex] = ((kind, node), tex, options)
except KeyError:
log.warning("String '%s' doesn't found in SVG, skipping repl", value)
elif kind == 'id':
object = XML.getElementById(value[1:])
if object:
# "forget" id, save object
if object.nodeName in ['rect', 'ellipse', 'circle']:
repl_defs[tex] = ((kind, object), tex, options)
elif object.nodeName == 'text':
repl_defs[tex] = (('string', object), tex, options)
else:
log.warning("Object with id=%s is not text, rect, ellipse nor circle - skipping repl", value)
else:
log.warning("Object with id=%s doesn't found in SVG, skipping repl", value)
else: # point, rect -- no additional tests needed
repl_defs[tex] = ((kind, value), tex, options)
except frags.parse_subst.SyntaxError, e:
log.error("Syntax error: %s", str(e))
sys.exit(1)
if not repl_defs:
log.info("No rules - bye.")
sys.exit()
# make tmp name based on hash input & timestamp of input_txt file
tmp_filename = "svgfrags-%08x-%08x" % (
hash(input) & 0xffffffff,
os.path.getmtime(input_txt)
)
atexit.register(cleanup, tmp_filename)
if not os.path.exists(tmp_filename + ".dvi"):
# 3. prepare LaTeX source
tmp_lines = [
'\\batchmode',
'\\documentclass{article}',
'\\pagestyle{empty}'
'\\begin{document}',
]
for tex in repl_defs:
tmp_lines.append(tex) # each TeX expression at new page
tmp_lines.append("\\newpage")
# 4. write & compile TeX source
tmp_lines.append("\end{document}")
tmp = open(tmp_filename + '.tex', 'w')
for line in tmp_lines:
tmp.write(line + "\n")
tmp.close()
if which('latex'):
exitstatus = os.system("latex %s.tex > /dev/null" % tmp_filename)
if exitstatus:
log.error("LaTeX failed - error code %d; check log file '%s.log'", exitstatus, tmp_filename)
sys.exit(2)
else:
log.error("Program 'latex' isn't avaialable.")
sys.exit(3)
else:
log.info("File %s not changed, used existing DVI file (%s)", input_txt, tmp_filename)
# 5. Load DVI
dvi = binfile(tmp_filename + ".dvi", 'rb')
comment, (num, den, mag, u, l), page_offset, fonts = dviparser.dviinfo(dvi)
unit_mm = num/(den*10000.0)
scale = unit_mm * 72.27/25.4
mag = mag/1000.0
# 6. Preload fonts used in DVI & other stuff
fontsel.preload()
missing = []
for k in fonts:
_, s, d, fontname = fonts[k]
log.debug("Font %s=%s" % (k, fontname))
#print "Font %s=%s" % (k, fontname)
try:
fontsel.create_DVI_font(fontname, k, s, d, setup.options.enc_methods)
except fontsel.FontError, e:
log.error("Can't find font '%s': %s" % (fontname, str(e)))
missing.append((k, fontname))
if missing:
log.error("There were some unavailable fonts; list of missing fonts: %s" % (dvi.name, ", ".join("%d=%s" % kf for kf in missing)))
sys.exit(1)
# 7. Substitute
eq_id_n = 0
# helper functions
def get_width(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_width(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
def get_height(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_height(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
SVG = EquationsManager(XML, 1.25 * mag, scale, unit_mm)
for pageno, items in enumerate(repl_defs.values()):
dvi.seek(page_offset[pageno])
SVG.new_page()
dvi2svg.convert_page(dvi, SVG)
assert SVG.lastpage is not None, "Fatal error!"
assert SVG.lastbbox is not None, "Fatal error!"
if len(items) > 1:
# there are more then one referenco to this TeX object, so
# we have to **define** it, and then reference to, with <use>
eq_id = 'svgfrags-%x' % eq_id_n
eq_id_n += 1
SVG.lastpage.setAttribute('id', eq_id)
XML.getElementsByTagName('defs')[0].appendChild(SVG.lastpage)
else:
# just one reference, use node crated by SVGDocument
equation = SVG.lastpage
eq_id = None
# process
for ((kind, value), tex, options) in items:
px, py = options.position
if px == 'inherit':
if frags.istextnode(value):
px = frags.get_anchor(value)
else:
px = 0.0
# bounding box of equation
(xmin, ymin, xmax, ymax) = SVG.lastbbox
# enlarge with margin values
xmin -= options.margin[0]
xmax += options.margin[1]
ymin -= options.margin[2]
ymax += options.margin[3]
# and calculate bbox's dimensions
dx = xmax - xmin
dy = ymax - ymin
if eq_id is not None:
# more then one reference, create new node <use>
equation = XML.createElement('use')
equation.setAttributeNS('xlink', 'xlink:href', '#'+eq_id)
def put_equation(x, y, sx, sy):
# calculate desired point in equation BBox
xo = xmin + (xmax - xmin)*px
yo = ymin + (ymax - ymin)*py
# move (xo,yo) to (x,y)
if sx == sy:
equation.setAttribute(
'transform',
('translate(%s,%s)' % (SVG.c2s(x), SVG.c2s(y))) + \
('scale(%s)' % SVG.s2s(sx)) + \
('translate(%s,%s)' % (SVG.c2s(-xo), SVG.c2s(-yo)))
)
else:
equation.setAttribute(
'transform',
('translate(%s,%s)' % (SVG.c2s(x), SVG.c2s(y))) + \
('scale(%s,%s)' % (SVG.s2s(sx), SVG.s2s(sy))) + \
('translate(%s,%s)' % (SVG.c2s(-xo), SVG.c2s(-yo)))
)
return equation
# string or text object
if kind == 'string':
object = value
if options.scale == 'fit':
log.warning("%s is a text object, can't fit to rectangle", value)
sx = sy = 1.0
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
# get <text> object coords
x = frags.safe_float(object.getAttribute('x'))
y = frags.safe_float(object.getAttribute('y'))
# (DEBUG)
if DEBUG:
c = XML.createElement("circle")
c.setAttribute("cx", str(x))
c.setAttribute("cy", str(y))
c.setAttribute("r", "3")
c.setAttribute("fill", 'red')
object.parentNode.insertBefore(c, object)
put_equation(x, y, sx, sy)
# copy fill color from text node
fill = object.getAttribute('fill') or \
frags.CSS_value(object, 'fill')
if fill:
equation.setAttribute('fill', fill)
# insert equation into XML tree
object.parentNode.insertBefore(equation, object)
# explicity given point
elif kind == 'point':
if options.scale == 'fit':
log.warning("%s is a text object, can't fit to rectangle", value)
sx = sy = 1.0
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
# insert equation into XML tree
x, y = value
XML.documentElement.appendChild(
put_equation(x, y, sx, sy)
)
# rectangle or object with known bbox
elif kind == 'id' or kind == 'rect':
# get bounding box
if kind == 'rect':
Xmin, Ymin, Xmax, Ymax = value # rect
else:
Xmin, Ymin, Xmax, Ymax = frags.get_bbox(value) # object
DX = Xmax - Xmin
DY = Ymax - Ymin
# reference point
x = Xmin + (Xmax - Xmin)*px
y = Ymin + (Ymax - Ymin)*py
# and set default scale
sx = 1.0
sy = 1.0
# Fit in rectangle
if options.scale == 'fit':
tmp_x = DX/(xmax - xmin)
tmp_y = DY/(ymax - ymin)
if tmp_x < tmp_y:
sx = sy = tmp_x
else:
sx = sy = tmp_y
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this':
sx = DX/dx
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this':
sx = DX/dx
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this':
sy = DY/dy
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this':
sy = DY/dy
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
#endif
# move&scale equation
put_equation(x, y, sx, sy)
# and append to XML tree
if kind == 'rect':
XML.documentElement.appendChild(equation)
else: # kind == 'id'
# in case of existing object, place them
# just "above" them
pn = value.parentNode
if value == pn.lastChild:
pn.appendChild(equation)
else:
pn.insertBefore(equation, value.nextSibling)
#for
# 9. modify replaced <text> nodes according to options
if setup.options.frags_removetext: # remove nodes
for node in text_nodes:
node.parentNode.removeChild(node)
elif setup.options.frags_hidetext: # hide nodes
for node in text_nodes:
node.setAttribute('display', 'none')
SVG.save(output_svg)
def cleanup(tmp_filename):
"remove temporary files"
extensions = ['.aux', '.log']
if not setup.options.frags_keeptex:
extensions.append('.tex')
if not setup.options.frags_keepdvi:
extensions.append('.dvi')
for ext in extensions:
frags.remove_file(tmp_filename + ext)
if __name__ == '__main__':
import traceback
try:
main(sys.argv)
except SystemExit, (code):
sys.exit(code)
except:
if setup.options.print_traceback:
traceback.print_exc(file=sys.stderr)
else:
exception, instance, _ = sys.exc_info()
print >> sys.stderr, "Unexpeced error - %s: %s" % (exception.__name__, str(instance))
# vim: ts=4 sw=4 nowrap
| ave( | identifier_name |
svgfrags.py | #!/usr/bin/python
# -*- coding: iso-8859-2 -*-
# $Id: svgfrags.py,v 1.9 2007-03-13 20:55:37 wojtek Exp $
#
# SVGfrags - main program
#
# license: BSD
#
# author: Wojciech Muła
# e-mail: wojciech_mula@poczta.onet.pl
# WWW : http://0x80.pl/
"""
13.03.2007
- syntax chenges:
* keyword "this" as source
- use frags.get_text, frags.get_anchor
- + cleanup
- + traceback
12.03.2007
- use new parser (frags/parser.py & frags/parse_subst.py)
- syntax changes:
* removed 'settowidth' & 'settoheight' (now can be expressed with 'scale')
* removed 'fit' (now 'scale' option)
* added ('length' num) to scale
10.03.2007
- share same TeX expression
- id based on file timestamp & string hash (to reasume purposes)
- keep old DVI & TeX fles
- EquationsManager updated (SVGGfxDocument was changed)
- colors inherit from text nodes
- TeX-object space margin support
- options parse
9.03.2007
- parser
- clean up
8.03.2007
- early tests
"""
import sys, os, atexit
import logging
import xml.dom.minidom
import setup
import frags
import dvi2svg
from conv import utils
from conv import fontsel
from conv import dviparser
from conv.findfile import which
from conv.binfile import binfile
DEBUG = False
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('SVGfrags')
class EquationsManager(dvi2svg.SVGGfxDocument):
def __init__(self, doc, mag, scale, unit_mm):
super(EquationsManager, self).__init__(mag, scale, unit_mm, (0,0))
self.document = doc
self.svg = self.document.documentElement
def new_page(self):
self.chars = []
self.rules = []
self.lastpage = None
self.lastbbox = None
pass
def eop(self):
scale2str = self.scale2str
coord2str = self.coord2str
g = self.document.createElement('g')
self.lastpage = g
self.lastbbox = self.get_page_bbox()
for element in self.flush_rules() + self.flush_chars():
g.appendChild(element)
# (DEBUG)
if DEBUG:
xmin, ymin, xmax, ymax = self.lastbbox
r = self.document.createElement('rect')
r.setAttribute('x', str(xmin))
r.setAttribute('y', str(ymin))
r.setAttribute('width', str(xmax - xmin))
r.setAttribute('height', str(ymax - ymin))
r.setAttribute('fill', 'none')
r.setAttribute('stroke', 'red')
r.setAttribute('stroke-width', '0.25')
g.appendChild(r)
#for
def save(self, filename):
defs = self.document.getElementsByTagName('defs')[0]
for element in self.flush_glyphs():
defs.appendChild(element)
# save file
f = open(filename, 'wb')
if setup.options.prettyXML:
f.write(self.document.toprettyxml())
else:
f.write(self.document.toxml())
f.close()
def main(args):
from frags.cmdopts import parse_args
(setup.options, args) = parse_args(args)
# fixed options
setup.options.use_bbox = True
setup.options.prettyXML = False
input_txt = setup.options.input_txt
input_svg = setup.options.input_svg
output_svg = setup.options.output_svg
if not input_txt:
log.error("Rules file not provided, use switch -r or --rules")
sys.exit(1)
elif not os.path.exists(input_txt):
log.error("Rules file '%s' don't exist", input_txt)
sys.exit(1)
if not input_svg:
log.error("Input SVG file not provided, use switch -i or --input")
sys.exit(1)
elif not os.path.exists(input_svg):
log.error("Input SVG file '%s' don't exist", input_svg)
sys.exit(1)
if not output_svg:
log.error("Output SVG file not provided, use switch -i or --output")
sys.exit(1)
elif os.path.exists(output_svg) and not setup.options.frags_overwrite_file:
log.error("File %s already exists, and cannot be overwritten. Use switch -f or --force-overwrite to change this behaviour.", output_svg)
sys.exit(1)
# 1. Load SVG file
XML = xml.dom.minidom.parse(input_svg)
# 1.1. Create 'defs' tag (if doesn't exists), and add xlink namespace
if not XML.getElementsByTagName('defs'):
XML.documentElement.insertBefore(
XML.createElement('defs'),
XML.documentElement.firstChild
)
if not XML.documentElement.getAttribute('xmlns:xlink'):
XML.documentElement.setAttribute('xmlns:xlink', "http://www.w3.org/1999/xlink")
if True:
# XXX: hack; for unknown reason expat do not read id attribute
# and getElementById always fails
ID = {}
frags.collect_Id(XML, ID)
def my_getElementById(id):
try:
return ID[id]
except KeyError:
return None
XML.getElementById = my_getElementById
# 1.2. find all text objects
text_objects = {} # text -> node
for node in XML.getElementsByTagName('text'):
try:
text = frags.get_text(node, setup.options.frags_strip)
# add to list
if text in text_objects:
text_objects[text].append(node)
else:
text_objects[text] = [node]
except ValueError:
pass
#for
# 2. Load & parse replace pairs
input = open(input_txt, 'r').read()
from frags.parse_subst import parse
repl_defs = frags.Dict() # valid defs
text_nodes = set() # text nodes to remove/hide
try:
for item in parse(input):
((kind, value), tex, options) = item
if tex is None: # i.e. "this"
if kind == 'string':
if setup.options.frags_strip:
tex = value.strip()
else:
tex = value
elif kind == 'id':
node = XML.getElementById(value[1:])
if frags.istextnode(node):
tex = frags.get_text(node)
if tex is None:
log.error("Keyword 'this' is not allowed for rect/points object")
continue
if kind == 'string':
if setup.options.frags_strip:
value = value.strip()
try:
for node in text_objects[value]:
text_nodes.add(node)
repl_defs[tex] = ((kind, node), tex, options)
except KeyError:
log.warning("String '%s' doesn't found in SVG, skipping repl", value)
elif kind == 'id':
object = XML.getElementById(value[1:])
if object:
# "forget" id, save object
if object.nodeName in ['rect', 'ellipse', 'circle']:
repl_defs[tex] = ((kind, object), tex, options)
elif object.nodeName == 'text':
repl_defs[tex] = (('string', object), tex, options)
else:
log.warning("Object with id=%s is not text, rect, ellipse nor circle - skipping repl", value)
else:
log.warning("Object with id=%s doesn't found in SVG, skipping repl", value)
else: # point, rect -- no additional tests needed
repl_defs[tex] = ((kind, value), tex, options)
except frags.parse_subst.SyntaxError, e:
log.error("Syntax error: %s", str(e))
sys.exit(1)
if not repl_defs:
log.info("No rules - bye.")
sys.exit()
# make tmp name based on hash input & timestamp of input_txt file
tmp_filename = "svgfrags-%08x-%08x" % (
hash(input) & 0xffffffff,
os.path.getmtime(input_txt)
)
atexit.register(cleanup, tmp_filename)
if not os.path.exists(tmp_filename + ".dvi"):
# 3. prepare LaTeX source
tmp_lines = [
'\\batchmode',
'\\documentclass{article}',
'\\pagestyle{empty}'
'\\begin{document}',
]
for tex in repl_defs:
tmp_lines.append(tex) # each TeX expression at new page
tmp_lines.append("\\newpage")
# 4. write & compile TeX source
tmp_lines.append("\end{document}")
tmp = open(tmp_filename + '.tex', 'w')
for line in tmp_lines:
tmp.write(line + "\n")
tmp.close()
if which('latex'):
exitstatus = os.system("latex %s.tex > /dev/null" % tmp_filename)
if exitstatus:
log.error("LaTeX failed - error code %d; check log file '%s.log'", exitstatus, tmp_filename)
sys.exit(2)
else:
log.error("Program 'latex' isn't avaialable.")
sys.exit(3)
else:
log.info("File %s not changed, used existing DVI file (%s)", input_txt, tmp_filename)
# 5. Load DVI
dvi = binfile(tmp_filename + ".dvi", 'rb')
comment, (num, den, mag, u, l), page_offset, fonts = dviparser.dviinfo(dvi)
unit_mm = num/(den*10000.0)
scale = unit_mm * 72.27/25.4
mag = mag/1000.0
# 6. Preload fonts used in DVI & other stuff
fontsel.preload()
missing = []
for k in fonts:
_, s, d, fontname = fonts[k]
log.debug("Font %s=%s" % (k, fontname))
#print "Font %s=%s" % (k, fontname)
try:
fontsel.create_DVI_font(fontname, k, s, d, setup.options.enc_methods)
except fontsel.FontError, e:
log.error("Can't find font '%s': %s" % (fontname, str(e)))
missing.append((k, fontname))
if missing:
log.error("There were some unavailable fonts; list of missing fonts: %s" % (dvi.name, ", ".join("%d=%s" % kf for kf in missing)))
sys.exit(1)
# 7. Substitute
eq_id_n = 0
# helper functions
def get_width(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_width(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
def get_height(obj_id, default=0.0):
ref = XML.getElementById(obj_id)
if ref:
return frags.get_height(ref)
else:
log.error("Object id=%s doesn't exists", obj_id)
return default
SVG = EquationsManager(XML, 1.25 * mag, scale, unit_mm)
for pageno, items in enumerate(repl_defs.values()):
dvi.seek(page_offset[pageno])
SVG.new_page()
dvi2svg.convert_page(dvi, SVG)
assert SVG.lastpage is not None, "Fatal error!"
assert SVG.lastbbox is not None, "Fatal error!"
if len(items) > 1:
# there are more then one referenco to this TeX object, so
# we have to **define** it, and then reference to, with <use>
eq_id = 'svgfrags-%x' % eq_id_n
eq_id_n += 1
SVG.lastpage.setAttribute('id', eq_id)
XML.getElementsByTagName('defs')[0].appendChild(SVG.lastpage)
else:
# just one reference, use node crated by SVGDocument
equation = SVG.lastpage
eq_id = None
# process
for ((kind, value), tex, options) in items:
px, py = options.position
if px == 'inherit':
if frags.istextnode(value):
px = frags.get_anchor(value)
else:
px = 0.0
# bounding box of equation
(xmin, ymin, xmax, ymax) = SVG.lastbbox
# enlarge with margin values
xmin -= options.margin[0]
xmax += options.margin[1]
ymin -= options.margin[2]
ymax += options.margin[3]
# and calculate bbox's dimensions
dx = xmax - xmin
dy = ymax - ymin
if eq_id is not None:
# more then one reference, create new node <use>
equation = XML.createElement('use')
equation.setAttributeNS('xlink', 'xlink:href', '#'+eq_id)
def put_equation(x, y, sx, sy):
# calculate desired point in equation BBox
xo = xmin + (xmax - xmin)*px
yo = ymin + (ymax - ymin)*py
# move (xo,yo) to (x,y)
if sx == sy:
equation.setAttribute(
'transform',
('translate(%s,%s)' % (SVG.c2s(x), SVG.c2s(y))) + \
('scale(%s)' % SVG.s2s(sx)) + \
('translate(%s,%s)' % (SVG.c2s(-xo), SVG.c2s(-yo)))
)
else:
equation.setAttribute(
'transform',
('translate(%s,%s)' % (SVG.c2s(x), SVG.c2s(y))) + \
('scale(%s,%s)' % (SVG.s2s(sx), SVG.s2s(sy))) + \
('translate(%s,%s)' % (SVG.c2s(-xo), SVG.c2s(-yo)))
)
return equation
# string or text object
if kind == 'string':
object = value
if options.scale == 'fit':
log.warning("%s is a text object, can't fit to rectangle", value)
sx = sy = 1.0
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
# get <text> object coords
x = frags.safe_float(object.getAttribute('x'))
y = frags.safe_float(object.getAttribute('y'))
# (DEBUG)
if DEBUG:
c = XML.createElement("circle")
c.setAttribute("cx", str(x))
c.setAttribute("cy", str(y))
c.setAttribute("r", "3")
c.setAttribute("fill", 'red')
object.parentNode.insertBefore(c, object)
put_equation(x, y, sx, sy)
# copy fill color from text node
fill = object.getAttribute('fill') or \
frags.CSS_value(object, 'fill')
if fill:
equation.setAttribute('fill', fill)
# insert equation into XML tree
object.parentNode.insertBefore(equation, object)
# explicity given point
elif kind == 'point':
if options.scale == 'fit':
log.warning("%s is a text object, can't fit to rectangle", value)
sx = sy = 1.0
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this': p | else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this': pass # no scale
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this': pass # no scale
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
# insert equation into XML tree
x, y = value
XML.documentElement.appendChild(
put_equation(x, y, sx, sy)
)
# rectangle or object with known bbox
elif kind == 'id' or kind == 'rect':
# get bounding box
if kind == 'rect':
Xmin, Ymin, Xmax, Ymax = value # rect
else:
Xmin, Ymin, Xmax, Ymax = frags.get_bbox(value) # object
DX = Xmax - Xmin
DY = Ymax - Ymin
# reference point
x = Xmin + (Xmax - Xmin)*px
y = Ymin + (Ymax - Ymin)*py
# and set default scale
sx = 1.0
sy = 1.0
# Fit in rectangle
if options.scale == 'fit':
tmp_x = DX/(xmax - xmin)
tmp_y = DY/(ymax - ymin)
if tmp_x < tmp_y:
sx = sy = tmp_x
else:
sx = sy = tmp_y
else:
sx, sy = options.scale
if type(sx) is tuple:
kind, val = sx
sx = 1.0
if kind == 'width':
if val == 'this':
sx = DX/dx
else: # XML id
sx = get_width(val[1][1:], dx)/dx
elif kind == "height":
if val == 'this':
sx = DX/dx
else: # XML id
sx = get_height(val[1][1:], dx)/dx
elif kind == "length":
sx = val/dx
if type(sy) is tuple:
kind, val = sy
sy = 1.0
if kind == 'width':
if val == 'this':
sy = DY/dy
else: # XML id
sy = get_width(val[1][1:], dy)/dy
elif kind == "height":
if val == 'this':
sy = DY/dy
else: # XML id
sy = get_height(val[1][1:], dy)/dy
elif kind == "length":
sy = val/dy
if sx == "uniform":
sx = sy
if sy == "uniform":
sy = sx
#endif
# move&scale equation
put_equation(x, y, sx, sy)
# and append to XML tree
if kind == 'rect':
XML.documentElement.appendChild(equation)
else: # kind == 'id'
# in case of existing object, place them
# just "above" them
pn = value.parentNode
if value == pn.lastChild:
pn.appendChild(equation)
else:
pn.insertBefore(equation, value.nextSibling)
#for
# 9. modify replaced <text> nodes according to options
if setup.options.frags_removetext: # remove nodes
for node in text_nodes:
node.parentNode.removeChild(node)
elif setup.options.frags_hidetext: # hide nodes
for node in text_nodes:
node.setAttribute('display', 'none')
SVG.save(output_svg)
def cleanup(tmp_filename):
"remove temporary files"
extensions = ['.aux', '.log']
if not setup.options.frags_keeptex:
extensions.append('.tex')
if not setup.options.frags_keepdvi:
extensions.append('.dvi')
for ext in extensions:
frags.remove_file(tmp_filename + ext)
if __name__ == '__main__':
import traceback
try:
main(sys.argv)
except SystemExit, (code):
sys.exit(code)
except:
if setup.options.print_traceback:
traceback.print_exc(file=sys.stderr)
else:
exception, instance, _ = sys.exc_info()
print >> sys.stderr, "Unexpeced error - %s: %s" % (exception.__name__, str(instance))
# vim: ts=4 sw=4 nowrap
| ass # no scale
| conditional_block |
update_configuration_files.py | """A script for updating pre-existing V2 Pipette configurations."""
import os
import json
import argparse
from pathlib import Path
from typing import List, Dict, Tuple, Any, Iterator, Type
from pydantic import BaseModel
from pydantic.main import ModelMetaclass
from enum import Enum
from opentrons_shared_data import get_shared_data_root
from ..pipette_definition import (
PipetteConfigurations,
PipetteGeometryDefinition,
PipettePhysicalPropertiesDefinition,
PipetteLiquidPropertiesDefinition,
PipetteModelVersionType,
SupportedTipsDefinition,
)
from ..types import (
PipetteModelType,
PipetteChannelType,
PipetteVersionType,
PipetteTipType,
PipetteModelMajorVersion,
PipetteModelMinorVersion,
)
from ..load_data import _geometry, _physical, _liquid
from ..pipette_load_name_conversions import convert_pipette_model
from ..dev_types import PipetteModel
"""
Instructions:
To run this script, you must be in `shared-data/python`. To invoke, use the command:
`pipenv run python -m opentrons_shared_data.pipette.scripts.update_configuration_files`
If you want to update all files, you can simply use the argument `--update_all_models`.
Make sure to run `make format-js` afterwards to ensure formatting of the json files
is good.
*Note* If you are adding a brand-new key, you MUST update the pydantic models
found in `python/pipette/pipette_definition.py` before running this script.
*Note* When you are entering in your data, please utilize the exact type. I.e. if it's a
list, you must input the list like: [1, 2, 3] or if it's a dict, like: {"data": 1}..
For now, we do not support updating pipetting functions in this script.
"""
ROOT = get_shared_data_root() / "pipette" / "definitions" / "2"
NOZZLE_LOCATION_CONFIGS = ["nozzle_offset", "nozzle_map"]
def | (c: str) -> str:
# Tiny helper function to convert to camelCase.
config_name = c.split("_")
if len(config_name) == 1:
return config_name[0]
return f"{config_name[0]}" + "".join(s.capitalize() for s in config_name[1::])
def list_configuration_keys() -> Tuple[List[str], Dict[int, str]]:
"""List out the model keys available to modify at the top level."""
lookup = {i: v for (i, v) in enumerate(PipetteConfigurations.__fields__)}
return [
f"{i}: {v}" for (i, v) in enumerate(PipetteConfigurations.__fields__)
], lookup
def list_available_enum(enum_type: Type[Enum]) -> List[str]:
"""List available pipette models"""
return [f"{i}: {v}" for (i, v) in enumerate(enum_type)] # type: ignore[var-annotated]
def handle_subclass_model(
top_level_configuration: List[str], base_model: BaseModel, is_basemodel: bool
) -> List[str]:
"""Handle sub-classed basemodels and update the top level model as necessary."""
if is_basemodel:
if base_model.__fields__ == SupportedTipsDefinition.__fields__:
# pydantic does something weird with the types in ModelFields so
# we cannot use isinstance checks to confirm if the base model
# is a supported tips definition
print(f"choose {PipetteTipType.__name__}:")
for row in list_available_enum(PipetteTipType):
print(f"\t{row}")
tip_type = list(PipetteTipType)[
int(input("select the tip volume size to modify"))
]
top_level_configuration.append(tip_type.name)
lookup = {i: v for (i, v) in enumerate(base_model.__fields__)}
config_list = [f"{i}: {v}" for (i, v) in enumerate(base_model.__fields__)]
print(f"you selected the basemodel {base_model.__name__}:") # type: ignore[attr-defined]
for row in config_list:
print(f"\t{row}")
configuration_to_update = lookup[
int(input("select a specific configuration from above\n"))
]
field_type = base_model.__fields__[configuration_to_update].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
top_level_configuration.append(configuration_to_update)
return handle_subclass_model(top_level_configuration, field_type, is_basemodel)
else:
return top_level_configuration
def check_from_version(version: str) -> str:
"""Check that the version requested is supported in the system."""
version_int = [int(v) for v in version.split(".")]
if version_int[0] not in PipetteModelMajorVersion:
raise ValueError(f"Major version {version_int[0]} is not supported.")
if version_int[1] not in PipetteModelMinorVersion:
raise ValueError(f"Minor version {version_int[1]} is not supported.")
return version
def save_data_to_file(
directorypath: Path,
file_name: str,
data: Dict[str, Any],
) -> None:
"""
Function used to save data to a file
"""
directorypath.mkdir(parents=True, exist_ok=True)
filepath = directorypath / f"{file_name}.json"
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
def update(
dict_to_update: Dict[str, Any], iter_of_configs: Iterator[str], value_to_update: Any
) -> Dict[str, Any]:
"""
Recursively update the given dictionary to ensure no data is lost when updating.
"""
next_key = next(iter_of_configs, None)
if next_key and isinstance(dict_to_update[next_key], dict):
dict_to_update[next_key] = update(
dict_to_update.get(next_key, {}), iter_of_configs, value_to_update
)
elif next_key:
dict_to_update[next_key] = value_to_update
return dict_to_update
def build_nozzle_map(
nozzle_offset: List[float], channels: PipetteChannelType
) -> Dict[str, List[float]]:
Y_OFFSET = 9
X_OFFSET = -9
breakpoint()
if channels == PipetteChannelType.SINGLE_CHANNEL:
return {"A1": nozzle_offset}
elif channels == PipetteChannelType.EIGHT_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}1": [
nozzle_offset[0],
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
}
elif channels == PipetteChannelType.NINETY_SIX_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}{1 + 1*col}": [
nozzle_offset[0] + X_OFFSET * col,
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
for col in range(12)
}
raise ValueError(f"Unsupported channel type {channels}")
def load_and_update_file_from_config(
config_to_update: List[str],
value_to_update: Any,
model_to_update: PipetteModelVersionType,
) -> None:
"""Update the requested config and save to disk.
Load the requested config sub type (physical, geometry or liquid). Then
update the current file and save to disk.
"""
camel_list_to_update = iter([_change_to_camel_case(i) for i in config_to_update])
if config_to_update[0] in PipetteGeometryDefinition.__fields__:
geometry = _geometry(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
if config_to_update[0] == "nozzle_map":
nozzle_to_use = (
value_to_update if value_to_update else geometry["nozzleOffset"]
)
geometry["nozzleMap"] = build_nozzle_map(
nozzle_to_use, model_to_update.pipette_channels
)
elif config_to_update[0] == "nozzle_offset":
geometry["nozzleMap"] = build_nozzle_map(
value_to_update, model_to_update.pipette_channels
)
geometry["nozzleOffset"] = value_to_update
else:
geometry = update(geometry, camel_list_to_update, value_to_update)
PipetteGeometryDefinition.parse_obj(geometry)
filepath = (
ROOT
/ "geometry"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
geometry,
)
elif config_to_update[0] in PipettePhysicalPropertiesDefinition.__fields__:
physical = _physical(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
physical = update(physical, camel_list_to_update, value_to_update)
PipettePhysicalPropertiesDefinition.parse_obj(physical)
filepath = (
ROOT
/ "general"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
physical,
)
elif config_to_update[0] in PipetteLiquidPropertiesDefinition.__fields__:
liquid = _liquid(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
liquid = update(physical, camel_list_to_update, value_to_update)
PipetteLiquidPropertiesDefinition.parse_obj(liquid)
filepath = (
ROOT
/ "liquid"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
liquid,
)
else:
raise KeyError(
f"{config_to_update} is not saved to a file. Check `pipette_definition.py` for more information."
)
def _update_single_model(configuration_to_update: List[str]) -> None:
"""Helper function to update single model."""
print(f"choose {PipetteModelType.__name__}:")
for row in list_available_enum(PipetteModelType):
print(f"\t{row}")
model = list(PipetteModelType)[int(input("Please select from above\n"))]
print(f"choose {PipetteChannelType.__name__}:")
for row in list_available_enum(PipetteChannelType):
print(f"\t{row}")
channels = list(PipetteChannelType)[int(input("Please select from above\n"))]
version = PipetteVersionType.convert_from_float(
float(check_from_version(input("Please input the version of the model\n")))
)
built_model: PipetteModel = PipetteModel(
f"{model.name}_{str(channels)}_v{version.major}.{version.minor}"
)
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[1]:
print(
"You selected nozzle_map to edit. If you wish to update the nozzle offset, enter it on the next line.\n"
)
print("Otherwise, please type 'null' on the next line.\n")
value_to_update = json.loads(
input(
f"Please select what you would like to update {configuration_to_update} to for {built_model}\n"
)
)
model_version = convert_pipette_model(built_model)
load_and_update_file_from_config(
configuration_to_update, value_to_update, model_version
)
def _update_all_models(configuration_to_update: List[str]) -> None:
paths_to_validate = ROOT / "liquid"
_channel_model_str = {
"single_channel": "single",
"ninety_six_channel": "96",
"eight_channel": "multi",
}
for channel_dir in os.listdir(paths_to_validate):
for model_dir in os.listdir(paths_to_validate / channel_dir):
for version_file in os.listdir(paths_to_validate / channel_dir / model_dir):
version_list = version_file.split(".json")[0].split("_")
built_model: PipetteModel = PipetteModel(
f"{model_dir}_{_channel_model_str[channel_dir]}_v{version_list[0]}.{version_list[1]}"
)
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[1]:
print(
"You selected nozzle_map to edit. If you wish to update the nozzle offset, enter it on the next line.\n"
)
print("Otherwise, please type 'null' on the next line.\n")
value_to_update = json.loads(
input(
f"Please select what you would like to update {configuration_to_update} to for {built_model}\n"
)
)
model_version = convert_pipette_model(built_model)
load_and_update_file_from_config(
configuration_to_update, value_to_update, model_version
)
def determine_models_to_update(update_all_models: bool) -> None:
try:
while True:
print(f"choose {PipetteConfigurations.__name__}:")
config_list, table_lookup = list_configuration_keys()
for row in config_list:
print(f"\t{row}")
configuration_to_update = [
table_lookup[int(input("select a configuration from above\n"))]
]
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[0]:
print(
f"NOTE: updating the {configuration_to_update[0]} will automatically update the {NOZZLE_LOCATION_CONFIGS[1]}\n"
)
field_type = PipetteConfigurations.__fields__[
configuration_to_update[0]
].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
configuration_to_update = handle_subclass_model(
configuration_to_update, field_type, is_basemodel
)
if update_all_models:
_update_all_models(configuration_to_update)
else:
_update_single_model(configuration_to_update)
except KeyboardInterrupt:
print("Finished updating! Validate that your files updated successfully.")
def main() -> None:
"""Entry point."""
parser = argparse.ArgumentParser(
description="96 channel tip handling testing script."
)
parser.add_argument(
"--update_all_models",
type=bool,
help="update all",
default=False,
)
args = parser.parse_args()
determine_models_to_update(args.update_all_models)
if __name__ == "__main__":
"""
A script to automate building a pipette configuration definition.
This script can either perform migrations from a v1 -> v2 schema format
or build a brand new script from scratch.
When building a new pipette configuration model, you will either need
to provide CSVs or use command line inputs.
If you choose CSVs you will need one CSV for the general pipette configuration
data (such as pipette model or number of channels) and one for every tip
type that this pipette model can support.
"""
main()
| _change_to_camel_case | identifier_name |
update_configuration_files.py | """A script for updating pre-existing V2 Pipette configurations."""
import os
import json
import argparse
from pathlib import Path
from typing import List, Dict, Tuple, Any, Iterator, Type
from pydantic import BaseModel
from pydantic.main import ModelMetaclass
from enum import Enum
from opentrons_shared_data import get_shared_data_root
from ..pipette_definition import (
PipetteConfigurations,
PipetteGeometryDefinition,
PipettePhysicalPropertiesDefinition,
PipetteLiquidPropertiesDefinition,
PipetteModelVersionType,
SupportedTipsDefinition,
)
from ..types import (
PipetteModelType,
PipetteChannelType,
PipetteVersionType,
PipetteTipType,
PipetteModelMajorVersion,
PipetteModelMinorVersion,
)
from ..load_data import _geometry, _physical, _liquid
from ..pipette_load_name_conversions import convert_pipette_model
from ..dev_types import PipetteModel
"""
Instructions:
To run this script, you must be in `shared-data/python`. To invoke, use the command:
`pipenv run python -m opentrons_shared_data.pipette.scripts.update_configuration_files`
If you want to update all files, you can simply use the argument `--update_all_models`.
Make sure to run `make format-js` afterwards to ensure formatting of the json files
is good.
*Note* If you are adding a brand-new key, you MUST update the pydantic models
found in `python/pipette/pipette_definition.py` before running this script.
*Note* When you are entering in your data, please utilize the exact type. I.e. if it's a
list, you must input the list like: [1, 2, 3] or if it's a dict, like: {"data": 1}..
For now, we do not support updating pipetting functions in this script.
"""
ROOT = get_shared_data_root() / "pipette" / "definitions" / "2"
NOZZLE_LOCATION_CONFIGS = ["nozzle_offset", "nozzle_map"]
def _change_to_camel_case(c: str) -> str:
# Tiny helper function to convert to camelCase.
config_name = c.split("_")
if len(config_name) == 1:
return config_name[0]
return f"{config_name[0]}" + "".join(s.capitalize() for s in config_name[1::])
def list_configuration_keys() -> Tuple[List[str], Dict[int, str]]:
"""List out the model keys available to modify at the top level."""
lookup = {i: v for (i, v) in enumerate(PipetteConfigurations.__fields__)}
return [
f"{i}: {v}" for (i, v) in enumerate(PipetteConfigurations.__fields__)
], lookup
def list_available_enum(enum_type: Type[Enum]) -> List[str]:
"""List available pipette models"""
return [f"{i}: {v}" for (i, v) in enumerate(enum_type)] # type: ignore[var-annotated]
def handle_subclass_model(
top_level_configuration: List[str], base_model: BaseModel, is_basemodel: bool
) -> List[str]:
"""Handle sub-classed basemodels and update the top level model as necessary."""
if is_basemodel:
if base_model.__fields__ == SupportedTipsDefinition.__fields__:
# pydantic does something weird with the types in ModelFields so
# we cannot use isinstance checks to confirm if the base model
# is a supported tips definition
print(f"choose {PipetteTipType.__name__}:")
for row in list_available_enum(PipetteTipType):
print(f"\t{row}")
tip_type = list(PipetteTipType)[
int(input("select the tip volume size to modify"))
]
top_level_configuration.append(tip_type.name)
lookup = {i: v for (i, v) in enumerate(base_model.__fields__)}
config_list = [f"{i}: {v}" for (i, v) in enumerate(base_model.__fields__)]
print(f"you selected the basemodel {base_model.__name__}:") # type: ignore[attr-defined]
for row in config_list:
print(f"\t{row}")
configuration_to_update = lookup[
int(input("select a specific configuration from above\n"))
]
field_type = base_model.__fields__[configuration_to_update].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
top_level_configuration.append(configuration_to_update)
return handle_subclass_model(top_level_configuration, field_type, is_basemodel)
else:
return top_level_configuration
def check_from_version(version: str) -> str:
"""Check that the version requested is supported in the system."""
version_int = [int(v) for v in version.split(".")]
if version_int[0] not in PipetteModelMajorVersion:
raise ValueError(f"Major version {version_int[0]} is not supported.")
if version_int[1] not in PipetteModelMinorVersion:
raise ValueError(f"Minor version {version_int[1]} is not supported.")
return version
def save_data_to_file(
directorypath: Path,
file_name: str,
data: Dict[str, Any],
) -> None:
"""
Function used to save data to a file
"""
directorypath.mkdir(parents=True, exist_ok=True)
filepath = directorypath / f"{file_name}.json"
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
def update(
dict_to_update: Dict[str, Any], iter_of_configs: Iterator[str], value_to_update: Any
) -> Dict[str, Any]:
|
def build_nozzle_map(
nozzle_offset: List[float], channels: PipetteChannelType
) -> Dict[str, List[float]]:
Y_OFFSET = 9
X_OFFSET = -9
breakpoint()
if channels == PipetteChannelType.SINGLE_CHANNEL:
return {"A1": nozzle_offset}
elif channels == PipetteChannelType.EIGHT_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}1": [
nozzle_offset[0],
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
}
elif channels == PipetteChannelType.NINETY_SIX_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}{1 + 1*col}": [
nozzle_offset[0] + X_OFFSET * col,
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
for col in range(12)
}
raise ValueError(f"Unsupported channel type {channels}")
def load_and_update_file_from_config(
config_to_update: List[str],
value_to_update: Any,
model_to_update: PipetteModelVersionType,
) -> None:
"""Update the requested config and save to disk.
Load the requested config sub type (physical, geometry or liquid). Then
update the current file and save to disk.
"""
camel_list_to_update = iter([_change_to_camel_case(i) for i in config_to_update])
if config_to_update[0] in PipetteGeometryDefinition.__fields__:
geometry = _geometry(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
if config_to_update[0] == "nozzle_map":
nozzle_to_use = (
value_to_update if value_to_update else geometry["nozzleOffset"]
)
geometry["nozzleMap"] = build_nozzle_map(
nozzle_to_use, model_to_update.pipette_channels
)
elif config_to_update[0] == "nozzle_offset":
geometry["nozzleMap"] = build_nozzle_map(
value_to_update, model_to_update.pipette_channels
)
geometry["nozzleOffset"] = value_to_update
else:
geometry = update(geometry, camel_list_to_update, value_to_update)
PipetteGeometryDefinition.parse_obj(geometry)
filepath = (
ROOT
/ "geometry"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
geometry,
)
elif config_to_update[0] in PipettePhysicalPropertiesDefinition.__fields__:
physical = _physical(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
physical = update(physical, camel_list_to_update, value_to_update)
PipettePhysicalPropertiesDefinition.parse_obj(physical)
filepath = (
ROOT
/ "general"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
physical,
)
elif config_to_update[0] in PipetteLiquidPropertiesDefinition.__fields__:
liquid = _liquid(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
liquid = update(physical, camel_list_to_update, value_to_update)
PipetteLiquidPropertiesDefinition.parse_obj(liquid)
filepath = (
ROOT
/ "liquid"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
liquid,
)
else:
raise KeyError(
f"{config_to_update} is not saved to a file. Check `pipette_definition.py` for more information."
)
def _update_single_model(configuration_to_update: List[str]) -> None:
"""Helper function to update single model."""
print(f"choose {PipetteModelType.__name__}:")
for row in list_available_enum(PipetteModelType):
print(f"\t{row}")
model = list(PipetteModelType)[int(input("Please select from above\n"))]
print(f"choose {PipetteChannelType.__name__}:")
for row in list_available_enum(PipetteChannelType):
print(f"\t{row}")
channels = list(PipetteChannelType)[int(input("Please select from above\n"))]
version = PipetteVersionType.convert_from_float(
float(check_from_version(input("Please input the version of the model\n")))
)
built_model: PipetteModel = PipetteModel(
f"{model.name}_{str(channels)}_v{version.major}.{version.minor}"
)
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[1]:
print(
"You selected nozzle_map to edit. If you wish to update the nozzle offset, enter it on the next line.\n"
)
print("Otherwise, please type 'null' on the next line.\n")
value_to_update = json.loads(
input(
f"Please select what you would like to update {configuration_to_update} to for {built_model}\n"
)
)
model_version = convert_pipette_model(built_model)
load_and_update_file_from_config(
configuration_to_update, value_to_update, model_version
)
def _update_all_models(configuration_to_update: List[str]) -> None:
paths_to_validate = ROOT / "liquid"
_channel_model_str = {
"single_channel": "single",
"ninety_six_channel": "96",
"eight_channel": "multi",
}
for channel_dir in os.listdir(paths_to_validate):
for model_dir in os.listdir(paths_to_validate / channel_dir):
for version_file in os.listdir(paths_to_validate / channel_dir / model_dir):
version_list = version_file.split(".json")[0].split("_")
built_model: PipetteModel = PipetteModel(
f"{model_dir}_{_channel_model_str[channel_dir]}_v{version_list[0]}.{version_list[1]}"
)
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[1]:
print(
"You selected nozzle_map to edit. If you wish to update the nozzle offset, enter it on the next line.\n"
)
print("Otherwise, please type 'null' on the next line.\n")
value_to_update = json.loads(
input(
f"Please select what you would like to update {configuration_to_update} to for {built_model}\n"
)
)
model_version = convert_pipette_model(built_model)
load_and_update_file_from_config(
configuration_to_update, value_to_update, model_version
)
def determine_models_to_update(update_all_models: bool) -> None:
try:
while True:
print(f"choose {PipetteConfigurations.__name__}:")
config_list, table_lookup = list_configuration_keys()
for row in config_list:
print(f"\t{row}")
configuration_to_update = [
table_lookup[int(input("select a configuration from above\n"))]
]
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[0]:
print(
f"NOTE: updating the {configuration_to_update[0]} will automatically update the {NOZZLE_LOCATION_CONFIGS[1]}\n"
)
field_type = PipetteConfigurations.__fields__[
configuration_to_update[0]
].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
configuration_to_update = handle_subclass_model(
configuration_to_update, field_type, is_basemodel
)
if update_all_models:
_update_all_models(configuration_to_update)
else:
_update_single_model(configuration_to_update)
except KeyboardInterrupt:
print("Finished updating! Validate that your files updated successfully.")
def main() -> None:
"""Entry point."""
parser = argparse.ArgumentParser(
description="96 channel tip handling testing script."
)
parser.add_argument(
"--update_all_models",
type=bool,
help="update all",
default=False,
)
args = parser.parse_args()
determine_models_to_update(args.update_all_models)
if __name__ == "__main__":
"""
A script to automate building a pipette configuration definition.
This script can either perform migrations from a v1 -> v2 schema format
or build a brand new script from scratch.
When building a new pipette configuration model, you will either need
to provide CSVs or use command line inputs.
If you choose CSVs you will need one CSV for the general pipette configuration
data (such as pipette model or number of channels) and one for every tip
type that this pipette model can support.
"""
main()
| """
Recursively update the given dictionary to ensure no data is lost when updating.
"""
next_key = next(iter_of_configs, None)
if next_key and isinstance(dict_to_update[next_key], dict):
dict_to_update[next_key] = update(
dict_to_update.get(next_key, {}), iter_of_configs, value_to_update
)
elif next_key:
dict_to_update[next_key] = value_to_update
return dict_to_update | identifier_body |
update_configuration_files.py | """A script for updating pre-existing V2 Pipette configurations."""
import os
import json
import argparse
from pathlib import Path
from typing import List, Dict, Tuple, Any, Iterator, Type
from pydantic import BaseModel
from pydantic.main import ModelMetaclass
from enum import Enum
from opentrons_shared_data import get_shared_data_root
from ..pipette_definition import (
PipetteConfigurations,
PipetteGeometryDefinition,
PipettePhysicalPropertiesDefinition,
PipetteLiquidPropertiesDefinition,
PipetteModelVersionType,
SupportedTipsDefinition,
)
from ..types import (
PipetteModelType,
PipetteChannelType,
PipetteVersionType,
PipetteTipType,
PipetteModelMajorVersion,
PipetteModelMinorVersion,
)
from ..load_data import _geometry, _physical, _liquid
from ..pipette_load_name_conversions import convert_pipette_model
from ..dev_types import PipetteModel
"""
Instructions:
To run this script, you must be in `shared-data/python`. To invoke, use the command:
`pipenv run python -m opentrons_shared_data.pipette.scripts.update_configuration_files`
If you want to update all files, you can simply use the argument `--update_all_models`.
Make sure to run `make format-js` afterwards to ensure formatting of the json files
is good.
*Note* If you are adding a brand-new key, you MUST update the pydantic models
found in `python/pipette/pipette_definition.py` before running this script.
*Note* When you are entering in your data, please utilize the exact type. I.e. if it's a
list, you must input the list like: [1, 2, 3] or if it's a dict, like: {"data": 1}..
For now, we do not support updating pipetting functions in this script.
"""
ROOT = get_shared_data_root() / "pipette" / "definitions" / "2"
NOZZLE_LOCATION_CONFIGS = ["nozzle_offset", "nozzle_map"]
def _change_to_camel_case(c: str) -> str:
# Tiny helper function to convert to camelCase.
config_name = c.split("_")
if len(config_name) == 1:
return config_name[0]
return f"{config_name[0]}" + "".join(s.capitalize() for s in config_name[1::])
def list_configuration_keys() -> Tuple[List[str], Dict[int, str]]:
"""List out the model keys available to modify at the top level."""
lookup = {i: v for (i, v) in enumerate(PipetteConfigurations.__fields__)}
return [
f"{i}: {v}" for (i, v) in enumerate(PipetteConfigurations.__fields__)
], lookup
def list_available_enum(enum_type: Type[Enum]) -> List[str]:
"""List available pipette models"""
return [f"{i}: {v}" for (i, v) in enumerate(enum_type)] # type: ignore[var-annotated]
def handle_subclass_model(
top_level_configuration: List[str], base_model: BaseModel, is_basemodel: bool
) -> List[str]:
"""Handle sub-classed basemodels and update the top level model as necessary."""
if is_basemodel:
if base_model.__fields__ == SupportedTipsDefinition.__fields__:
# pydantic does something weird with the types in ModelFields so
# we cannot use isinstance checks to confirm if the base model
# is a supported tips definition
print(f"choose {PipetteTipType.__name__}:")
for row in list_available_enum(PipetteTipType):
print(f"\t{row}")
tip_type = list(PipetteTipType)[
int(input("select the tip volume size to modify"))
]
top_level_configuration.append(tip_type.name)
lookup = {i: v for (i, v) in enumerate(base_model.__fields__)}
config_list = [f"{i}: {v}" for (i, v) in enumerate(base_model.__fields__)]
print(f"you selected the basemodel {base_model.__name__}:") # type: ignore[attr-defined]
for row in config_list:
print(f"\t{row}")
configuration_to_update = lookup[
int(input("select a specific configuration from above\n"))
]
field_type = base_model.__fields__[configuration_to_update].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
top_level_configuration.append(configuration_to_update)
return handle_subclass_model(top_level_configuration, field_type, is_basemodel)
else:
return top_level_configuration
def check_from_version(version: str) -> str:
"""Check that the version requested is supported in the system."""
version_int = [int(v) for v in version.split(".")]
if version_int[0] not in PipetteModelMajorVersion:
raise ValueError(f"Major version {version_int[0]} is not supported.")
if version_int[1] not in PipetteModelMinorVersion:
raise ValueError(f"Minor version {version_int[1]} is not supported.")
return version
def save_data_to_file(
directorypath: Path,
file_name: str,
data: Dict[str, Any],
) -> None:
"""
Function used to save data to a file
"""
directorypath.mkdir(parents=True, exist_ok=True)
filepath = directorypath / f"{file_name}.json"
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
def update(
dict_to_update: Dict[str, Any], iter_of_configs: Iterator[str], value_to_update: Any
) -> Dict[str, Any]:
"""
Recursively update the given dictionary to ensure no data is lost when updating.
"""
next_key = next(iter_of_configs, None)
if next_key and isinstance(dict_to_update[next_key], dict):
dict_to_update[next_key] = update(
dict_to_update.get(next_key, {}), iter_of_configs, value_to_update
)
elif next_key:
dict_to_update[next_key] = value_to_update
return dict_to_update
def build_nozzle_map(
nozzle_offset: List[float], channels: PipetteChannelType
) -> Dict[str, List[float]]:
Y_OFFSET = 9
X_OFFSET = -9
breakpoint()
if channels == PipetteChannelType.SINGLE_CHANNEL:
return {"A1": nozzle_offset}
elif channels == PipetteChannelType.EIGHT_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}1": [
nozzle_offset[0],
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
}
elif channels == PipetteChannelType.NINETY_SIX_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}{1 + 1*col}": [
nozzle_offset[0] + X_OFFSET * col,
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
for col in range(12)
}
raise ValueError(f"Unsupported channel type {channels}")
def load_and_update_file_from_config(
config_to_update: List[str],
value_to_update: Any,
model_to_update: PipetteModelVersionType,
) -> None:
"""Update the requested config and save to disk.
Load the requested config sub type (physical, geometry or liquid). Then
update the current file and save to disk.
"""
camel_list_to_update = iter([_change_to_camel_case(i) for i in config_to_update])
if config_to_update[0] in PipetteGeometryDefinition.__fields__:
geometry = _geometry(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
if config_to_update[0] == "nozzle_map":
nozzle_to_use = (
value_to_update if value_to_update else geometry["nozzleOffset"]
)
geometry["nozzleMap"] = build_nozzle_map(
nozzle_to_use, model_to_update.pipette_channels
)
elif config_to_update[0] == "nozzle_offset":
geometry["nozzleMap"] = build_nozzle_map(
value_to_update, model_to_update.pipette_channels
)
geometry["nozzleOffset"] = value_to_update
else:
geometry = update(geometry, camel_list_to_update, value_to_update)
PipetteGeometryDefinition.parse_obj(geometry)
filepath = (
ROOT
/ "geometry"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
geometry,
)
elif config_to_update[0] in PipettePhysicalPropertiesDefinition.__fields__:
physical = _physical(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
physical = update(physical, camel_list_to_update, value_to_update)
PipettePhysicalPropertiesDefinition.parse_obj(physical)
filepath = (
ROOT
/ "general"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
physical,
)
elif config_to_update[0] in PipetteLiquidPropertiesDefinition.__fields__:
liquid = _liquid(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
liquid = update(physical, camel_list_to_update, value_to_update)
PipetteLiquidPropertiesDefinition.parse_obj(liquid)
filepath = (
ROOT
/ "liquid"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
liquid,
)
else:
raise KeyError(
f"{config_to_update} is not saved to a file. Check `pipette_definition.py` for more information."
)
def _update_single_model(configuration_to_update: List[str]) -> None:
"""Helper function to update single model."""
print(f"choose {PipetteModelType.__name__}:")
for row in list_available_enum(PipetteModelType):
print(f"\t{row}")
model = list(PipetteModelType)[int(input("Please select from above\n"))]
print(f"choose {PipetteChannelType.__name__}:")
for row in list_available_enum(PipetteChannelType):
|
channels = list(PipetteChannelType)[int(input("Please select from above\n"))]
version = PipetteVersionType.convert_from_float(
float(check_from_version(input("Please input the version of the model\n")))
)
built_model: PipetteModel = PipetteModel(
f"{model.name}_{str(channels)}_v{version.major}.{version.minor}"
)
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[1]:
print(
"You selected nozzle_map to edit. If you wish to update the nozzle offset, enter it on the next line.\n"
)
print("Otherwise, please type 'null' on the next line.\n")
value_to_update = json.loads(
input(
f"Please select what you would like to update {configuration_to_update} to for {built_model}\n"
)
)
model_version = convert_pipette_model(built_model)
load_and_update_file_from_config(
configuration_to_update, value_to_update, model_version
)
def _update_all_models(configuration_to_update: List[str]) -> None:
paths_to_validate = ROOT / "liquid"
_channel_model_str = {
"single_channel": "single",
"ninety_six_channel": "96",
"eight_channel": "multi",
}
for channel_dir in os.listdir(paths_to_validate):
for model_dir in os.listdir(paths_to_validate / channel_dir):
for version_file in os.listdir(paths_to_validate / channel_dir / model_dir):
version_list = version_file.split(".json")[0].split("_")
built_model: PipetteModel = PipetteModel(
f"{model_dir}_{_channel_model_str[channel_dir]}_v{version_list[0]}.{version_list[1]}"
)
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[1]:
print(
"You selected nozzle_map to edit. If you wish to update the nozzle offset, enter it on the next line.\n"
)
print("Otherwise, please type 'null' on the next line.\n")
value_to_update = json.loads(
input(
f"Please select what you would like to update {configuration_to_update} to for {built_model}\n"
)
)
model_version = convert_pipette_model(built_model)
load_and_update_file_from_config(
configuration_to_update, value_to_update, model_version
)
def determine_models_to_update(update_all_models: bool) -> None:
try:
while True:
print(f"choose {PipetteConfigurations.__name__}:")
config_list, table_lookup = list_configuration_keys()
for row in config_list:
print(f"\t{row}")
configuration_to_update = [
table_lookup[int(input("select a configuration from above\n"))]
]
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[0]:
print(
f"NOTE: updating the {configuration_to_update[0]} will automatically update the {NOZZLE_LOCATION_CONFIGS[1]}\n"
)
field_type = PipetteConfigurations.__fields__[
configuration_to_update[0]
].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
configuration_to_update = handle_subclass_model(
configuration_to_update, field_type, is_basemodel
)
if update_all_models:
_update_all_models(configuration_to_update)
else:
_update_single_model(configuration_to_update)
except KeyboardInterrupt:
print("Finished updating! Validate that your files updated successfully.")
def main() -> None:
"""Entry point."""
parser = argparse.ArgumentParser(
description="96 channel tip handling testing script."
)
parser.add_argument(
"--update_all_models",
type=bool,
help="update all",
default=False,
)
args = parser.parse_args()
determine_models_to_update(args.update_all_models)
if __name__ == "__main__":
"""
A script to automate building a pipette configuration definition.
This script can either perform migrations from a v1 -> v2 schema format
or build a brand new script from scratch.
When building a new pipette configuration model, you will either need
to provide CSVs or use command line inputs.
If you choose CSVs you will need one CSV for the general pipette configuration
data (such as pipette model or number of channels) and one for every tip
type that this pipette model can support.
"""
main()
| print(f"\t{row}") | conditional_block |
update_configuration_files.py | """A script for updating pre-existing V2 Pipette configurations."""
import os
import json
import argparse
from pathlib import Path
from typing import List, Dict, Tuple, Any, Iterator, Type
from pydantic import BaseModel
from pydantic.main import ModelMetaclass
from enum import Enum
from opentrons_shared_data import get_shared_data_root
from ..pipette_definition import (
PipetteConfigurations,
PipetteGeometryDefinition,
PipettePhysicalPropertiesDefinition,
PipetteLiquidPropertiesDefinition,
PipetteModelVersionType,
SupportedTipsDefinition,
)
from ..types import (
PipetteModelType,
PipetteChannelType,
PipetteVersionType,
PipetteTipType,
PipetteModelMajorVersion,
PipetteModelMinorVersion,
)
from ..load_data import _geometry, _physical, _liquid
from ..pipette_load_name_conversions import convert_pipette_model
from ..dev_types import PipetteModel
"""
Instructions:
To run this script, you must be in `shared-data/python`. To invoke, use the command:
`pipenv run python -m opentrons_shared_data.pipette.scripts.update_configuration_files`
If you want to update all files, you can simply use the argument `--update_all_models`.
Make sure to run `make format-js` afterwards to ensure formatting of the json files
is good.
*Note* If you are adding a brand-new key, you MUST update the pydantic models
found in `python/pipette/pipette_definition.py` before running this script.
*Note* When you are entering in your data, please utilize the exact type. I.e. if it's a
list, you must input the list like: [1, 2, 3] or if it's a dict, like: {"data": 1}..
For now, we do not support updating pipetting functions in this script.
"""
ROOT = get_shared_data_root() / "pipette" / "definitions" / "2"
NOZZLE_LOCATION_CONFIGS = ["nozzle_offset", "nozzle_map"]
def _change_to_camel_case(c: str) -> str:
# Tiny helper function to convert to camelCase.
config_name = c.split("_")
if len(config_name) == 1:
return config_name[0]
return f"{config_name[0]}" + "".join(s.capitalize() for s in config_name[1::])
def list_configuration_keys() -> Tuple[List[str], Dict[int, str]]:
"""List out the model keys available to modify at the top level."""
lookup = {i: v for (i, v) in enumerate(PipetteConfigurations.__fields__)}
return [
f"{i}: {v}" for (i, v) in enumerate(PipetteConfigurations.__fields__)
], lookup
def list_available_enum(enum_type: Type[Enum]) -> List[str]:
"""List available pipette models"""
return [f"{i}: {v}" for (i, v) in enumerate(enum_type)] # type: ignore[var-annotated]
def handle_subclass_model(
top_level_configuration: List[str], base_model: BaseModel, is_basemodel: bool
) -> List[str]:
"""Handle sub-classed basemodels and update the top level model as necessary."""
if is_basemodel:
if base_model.__fields__ == SupportedTipsDefinition.__fields__:
# pydantic does something weird with the types in ModelFields so
# we cannot use isinstance checks to confirm if the base model
# is a supported tips definition
print(f"choose {PipetteTipType.__name__}:")
for row in list_available_enum(PipetteTipType):
print(f"\t{row}")
tip_type = list(PipetteTipType)[
int(input("select the tip volume size to modify"))
]
top_level_configuration.append(tip_type.name)
lookup = {i: v for (i, v) in enumerate(base_model.__fields__)}
config_list = [f"{i}: {v}" for (i, v) in enumerate(base_model.__fields__)]
print(f"you selected the basemodel {base_model.__name__}:") # type: ignore[attr-defined]
for row in config_list:
print(f"\t{row}")
configuration_to_update = lookup[
int(input("select a specific configuration from above\n"))
]
field_type = base_model.__fields__[configuration_to_update].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
top_level_configuration.append(configuration_to_update)
return handle_subclass_model(top_level_configuration, field_type, is_basemodel)
else:
return top_level_configuration
def check_from_version(version: str) -> str:
"""Check that the version requested is supported in the system."""
version_int = [int(v) for v in version.split(".")]
if version_int[0] not in PipetteModelMajorVersion:
raise ValueError(f"Major version {version_int[0]} is not supported.")
if version_int[1] not in PipetteModelMinorVersion:
raise ValueError(f"Minor version {version_int[1]} is not supported.")
return version
def save_data_to_file(
directorypath: Path,
file_name: str,
data: Dict[str, Any],
) -> None:
"""
Function used to save data to a file
"""
directorypath.mkdir(parents=True, exist_ok=True)
filepath = directorypath / f"{file_name}.json"
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
def update(
dict_to_update: Dict[str, Any], iter_of_configs: Iterator[str], value_to_update: Any
) -> Dict[str, Any]:
"""
Recursively update the given dictionary to ensure no data is lost when updating.
"""
next_key = next(iter_of_configs, None)
if next_key and isinstance(dict_to_update[next_key], dict):
dict_to_update[next_key] = update(
dict_to_update.get(next_key, {}), iter_of_configs, value_to_update
)
elif next_key:
dict_to_update[next_key] = value_to_update
return dict_to_update
def build_nozzle_map(
nozzle_offset: List[float], channels: PipetteChannelType
) -> Dict[str, List[float]]:
Y_OFFSET = 9
X_OFFSET = -9
breakpoint()
if channels == PipetteChannelType.SINGLE_CHANNEL:
return {"A1": nozzle_offset}
elif channels == PipetteChannelType.EIGHT_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}1": [
nozzle_offset[0],
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
}
elif channels == PipetteChannelType.NINETY_SIX_CHANNEL:
return {
f"{chr(ord('A') + 1*row)}{1 + 1*col}": [
nozzle_offset[0] + X_OFFSET * col,
nozzle_offset[1] + Y_OFFSET * row,
nozzle_offset[2],
]
for row in range(8)
for col in range(12)
}
raise ValueError(f"Unsupported channel type {channels}")
def load_and_update_file_from_config(
config_to_update: List[str],
value_to_update: Any,
model_to_update: PipetteModelVersionType,
) -> None:
"""Update the requested config and save to disk.
Load the requested config sub type (physical, geometry or liquid). Then
update the current file and save to disk.
"""
camel_list_to_update = iter([_change_to_camel_case(i) for i in config_to_update])
if config_to_update[0] in PipetteGeometryDefinition.__fields__:
geometry = _geometry(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
if config_to_update[0] == "nozzle_map":
nozzle_to_use = (
value_to_update if value_to_update else geometry["nozzleOffset"]
)
geometry["nozzleMap"] = build_nozzle_map(
nozzle_to_use, model_to_update.pipette_channels
)
elif config_to_update[0] == "nozzle_offset":
geometry["nozzleMap"] = build_nozzle_map(
value_to_update, model_to_update.pipette_channels
)
geometry["nozzleOffset"] = value_to_update
else:
geometry = update(geometry, camel_list_to_update, value_to_update)
PipetteGeometryDefinition.parse_obj(geometry)
filepath = (
ROOT
/ "geometry"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
geometry,
)
elif config_to_update[0] in PipettePhysicalPropertiesDefinition.__fields__:
physical = _physical(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
physical = update(physical, camel_list_to_update, value_to_update)
PipettePhysicalPropertiesDefinition.parse_obj(physical)
filepath = (
ROOT
/ "general"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
physical,
)
elif config_to_update[0] in PipetteLiquidPropertiesDefinition.__fields__:
liquid = _liquid(
model_to_update.pipette_channels,
model_to_update.pipette_type,
model_to_update.pipette_version,
)
liquid = update(physical, camel_list_to_update, value_to_update)
PipetteLiquidPropertiesDefinition.parse_obj(liquid)
filepath = (
ROOT
/ "liquid"
/ model_to_update.pipette_channels.name.lower()
/ model_to_update.pipette_type.value
)
save_data_to_file(
filepath,
f"{model_to_update.pipette_version.major}_{model_to_update.pipette_version.minor}",
liquid,
)
else:
raise KeyError(
f"{config_to_update} is not saved to a file. Check `pipette_definition.py` for more information."
)
def _update_single_model(configuration_to_update: List[str]) -> None:
"""Helper function to update single model."""
print(f"choose {PipetteModelType.__name__}:")
for row in list_available_enum(PipetteModelType):
print(f"\t{row}")
model = list(PipetteModelType)[int(input("Please select from above\n"))]
print(f"choose {PipetteChannelType.__name__}:")
for row in list_available_enum(PipetteChannelType):
print(f"\t{row}")
channels = list(PipetteChannelType)[int(input("Please select from above\n"))]
version = PipetteVersionType.convert_from_float(
float(check_from_version(input("Please input the version of the model\n")))
)
built_model: PipetteModel = PipetteModel(
f"{model.name}_{str(channels)}_v{version.major}.{version.minor}"
)
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[1]:
print(
"You selected nozzle_map to edit. If you wish to update the nozzle offset, enter it on the next line.\n"
)
print("Otherwise, please type 'null' on the next line.\n")
value_to_update = json.loads(
input(
f"Please select what you would like to update {configuration_to_update} to for {built_model}\n"
)
)
model_version = convert_pipette_model(built_model)
load_and_update_file_from_config(
configuration_to_update, value_to_update, model_version
)
def _update_all_models(configuration_to_update: List[str]) -> None:
paths_to_validate = ROOT / "liquid"
_channel_model_str = {
"single_channel": "single",
"ninety_six_channel": "96",
"eight_channel": "multi",
}
for channel_dir in os.listdir(paths_to_validate):
for model_dir in os.listdir(paths_to_validate / channel_dir):
for version_file in os.listdir(paths_to_validate / channel_dir / model_dir):
version_list = version_file.split(".json")[0].split("_")
built_model: PipetteModel = PipetteModel(
f"{model_dir}_{_channel_model_str[channel_dir]}_v{version_list[0]}.{version_list[1]}"
)
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[1]:
print( | print("Otherwise, please type 'null' on the next line.\n")
value_to_update = json.loads(
input(
f"Please select what you would like to update {configuration_to_update} to for {built_model}\n"
)
)
model_version = convert_pipette_model(built_model)
load_and_update_file_from_config(
configuration_to_update, value_to_update, model_version
)
def determine_models_to_update(update_all_models: bool) -> None:
try:
while True:
print(f"choose {PipetteConfigurations.__name__}:")
config_list, table_lookup = list_configuration_keys()
for row in config_list:
print(f"\t{row}")
configuration_to_update = [
table_lookup[int(input("select a configuration from above\n"))]
]
if configuration_to_update[0] == NOZZLE_LOCATION_CONFIGS[0]:
print(
f"NOTE: updating the {configuration_to_update[0]} will automatically update the {NOZZLE_LOCATION_CONFIGS[1]}\n"
)
field_type = PipetteConfigurations.__fields__[
configuration_to_update[0]
].type_
is_basemodel = isinstance(field_type, ModelMetaclass)
configuration_to_update = handle_subclass_model(
configuration_to_update, field_type, is_basemodel
)
if update_all_models:
_update_all_models(configuration_to_update)
else:
_update_single_model(configuration_to_update)
except KeyboardInterrupt:
print("Finished updating! Validate that your files updated successfully.")
def main() -> None:
"""Entry point."""
parser = argparse.ArgumentParser(
description="96 channel tip handling testing script."
)
parser.add_argument(
"--update_all_models",
type=bool,
help="update all",
default=False,
)
args = parser.parse_args()
determine_models_to_update(args.update_all_models)
if __name__ == "__main__":
"""
A script to automate building a pipette configuration definition.
This script can either perform migrations from a v1 -> v2 schema format
or build a brand new script from scratch.
When building a new pipette configuration model, you will either need
to provide CSVs or use command line inputs.
If you choose CSVs you will need one CSV for the general pipette configuration
data (such as pipette model or number of channels) and one for every tip
type that this pipette model can support.
"""
main() | "You selected nozzle_map to edit. If you wish to update the nozzle offset, enter it on the next line.\n"
) | random_line_split |
adversarial_semantic_dis_trainer.py | import argparse
import os
import glob
import pprint
import pickle
import time
import torch
from torch.nn import functional as F
import torch.optim as optim
import pytorch3d.structures
from pytorch3d.io import load_objs_as_meshes
from pytorch3d.io import load_obj
from pytorch3d.structures import Meshes
from pytorch3d.renderer import Textures
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader
)
from pytorch3d.loss import mesh_laplacian_smoothing, mesh_normal_consistency
from tqdm.autonotebook import tqdm
import pandas as pd
from utils import utils, network_utils
from deformation.deformation_net import DeformationNetwork
import deformation.losses as def_losses
from deformation.semantic_discriminator_loss import SemanticDiscriminatorLoss, compute_sem_dis_loss
from adversarial.datasets import GenerationDataset, ShapenetRendersDataset
class AdversarialDiscriminatorTrainer():
def __init__(self, cfg_path, gpu_num, exp_name):
|
def train(self):
# setting up dataloaders
# https://stackoverflow.com/questions/51444059/how-to-iterate-over-two-dataloaders-simultaneously-using-pytorch
generation_dataset = GenerationDataset(cfg, self.device)
generation_loader = torch.utils.data.DataLoader(generation_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
shapenet_renders_dataset = ShapenetRendersDataset(cfg)
shapenet_renders_loader = torch.utils.data.DataLoader(shapenet_renders_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
# setting up networks and optimizers
deform_net = DeformationNetwork(self.cfg, self.mesh_num_vertices, self.device)
deform_net.to(self.device)
deform_optimizer = optim.Adam(deform_net.parameters(), lr=self.cfg["training"]["learning_rate"])
semantic_dis_net = SemanticDiscriminatorNetwork(cfg)
semantic_dis_net.to(self.device)
dis_optimizer = optim.Adam(semantic_dis_net.parameters(), lr=0.00001, weight_decay=1e-2)
# for adding noise to training labels
# real images have label 1, fake images has label 0
real_labels_dist = torch.distributions.Uniform(torch.tensor([1.0-self.label_noise]), torch.tensor([1.0]))
fake_labels_dist = torch.distributions.Uniform(torch.tensor([0.0]), torch.tensor([0.0+self.label_noise]))
# training generative deformation network and discriminator in an alternating, GAN style
for iter_i in tqdm(range(self.total_training_iters), file=self.tqdm_out):
# training discriminator; generator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = True
for param in deform_net.parameters(): param.requires_grad = False
generation_iter = iter(generation_loader)
shapenet_renders_iter = iter(shapenet_renders_loader)
for batch_idx in tqdm(range(self.num_batches_dis_train), file=self.tqdm_out):
semantic_dis_net.train()
deform_net.eval() # not sure if supposed to set this
dis_optimizer.zero_grad()
real_render_batch = next(shapenet_renders_iter).to(self.device)
pred_logits_real = semantic_dis_net(real_render_batch)
gen_batch = next(generation_iter)
gen_batch_vertices = gen_batch["mesh_verts"].to(self.device)
gen_batch_images = gen_batch["image"].to(self.device)
gen_batch_poses = gen_batch["pose"].to(self.device)
deformed_meshes = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch_vertices,
gen_batch_images, gen_batch_poses, compute_losses=False)
# TODO: fix this to turn into logits, not sigmoid
pred_logits_fake = compute_sem_dis_loss(deformed_meshes, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
batch_size = real_render_batch.shape[0]
real_labels = real_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
fake_labels = fake_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
dis_loss = F.binary_cross_entropy_with_logits(pred_logits_real, real_labels) + \
F.binary_cross_entropy_with_logits(pred_logits_fake, fake_labels)
dis_loss.backward()
dis_optimizer.step()
continue
# training generator; discriminator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = False
for param in deform_net.parameters(): param.requires_grad = True
for gen_batch in tqdm(generation_loader[:self.num_batches_gen_train], file=self.tqdm_out):
deform_net.train()
semantic_dis_net.eval()
deform_optimizer.zero_grad()
deform_loss_dict, _ = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch)
# TODO: make sure loss is correct (follows minimax loss)
total_loss = sum([deform_loss_dict[loss_name] * cfg['training'][loss_name.replace("loss", "lam")] for loss_name in deform_loss_dict])
total_loss.backward()
deform_optimizer.step()
# given a batch of meshes, masks, and poses computes a forward pass through a given deformation network and semantic discriminator network
# returns the deformed mesh and a (optionally) dict of (unweighed, raw) computed losses
# TODO: fix mesh (currently, needs to already be in device)
def refine_mesh_batched(self, deform_net, semantic_dis_net, mesh_verts_batch, img_batch, pose_batch, compute_losses=True):
# computing mesh deformation
delta_v = deform_net(pose_batch, img_batch, mesh_verts_batch)
delta_v = delta_v.reshape((-1,3))
deformed_mesh = mesh.offset_verts(delta_v)
if not compute_losses:
return deformed_mesh
else:
# prep inputs used to compute losses
pred_dist = pose_batch[:,0]
pred_elev = pose_batch[:,1]
pred_azim = pose_batch[:,2]
R, T = look_at_view_transform(pred_dist, pred_elev, pred_azim)
mask = rgba_image[:,:,3] > 0
mask_gt = torch.tensor(mask, dtype=torch.float).to(self.device)
num_vertices = mesh.verts_packed().shape[0]
zero_deformation_tensor = torch.zeros((num_vertices, 3)).to(self.device)
sym_plane_normal = [0,0,1] # TODO: make this generalizable to other classes
loss_dict = {}
# computing losses
rendered_deformed_mesh = utils.render_mesh(deformed_mesh, R, T, self.device, img_size=224, silhouette=True)
loss_dict["sil_loss"] = F.binary_cross_entropy(rendered_deformed_mesh[0, :,:, 3], mask_gt)
loss_dict["l2_loss"] = F.mse_loss(delta_v, zero_deformation_tensor)
loss_dict["lap_smoothness_loss"] = mesh_laplacian_smoothing(deformed_mesh)
loss_dict["normal_consistency_loss"] = mesh_normal_consistency(deformed_mesh)
# TODO: remove weights?
if self.img_sym_lam > 0:
loss_dict["img_sym_loss"], _ = def_losses.image_symmetry_loss(deformed_mesh, sym_plane_normal, self.cfg["training"]["img_sym_num_azim"], self.device)
else:
loss_dict["img_sym_loss"] = torch.tensor(0).to(self.device)
if self.vertex_sym_lam > 0:
loss_dict["vertex_sym_loss"] = def_losses.vertex_symmetry_loss_fast(deformed_mesh, sym_plane_normal, self.device)
else:
loss_dict["vertex_sym_loss"] = torch.tensor(0).to(self.device)
if self.semantic_dis_lam > 0:
loss_dict["semantic_dis_loss"], _ = compute_sem_dis_loss(deformed_mesh, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
else:
loss_dict["semantic_dis_loss"] = torch.tensor(0).to(self.device)
return loss_dict, deformed_mesh
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Adversarially train a SemanticDiscriminatorNetwork.')
parser.add_argument('cfg_path', type=str, help='Path to yaml configuration file.')
parser.add_argument('--gpu', type=int, default=0, help='Gpu number to use.')
parser.add_argument('--exp_name', type=str, default="adv_semantic_discrim", help='name of experiment')
parser.add_argument('--light', action='store_true', help='run a lighter version of training w/ smaller batch size and num_workers')
parser.add_argument('--label_noise', type=float, default=0, help='amount of label noise to use during training')
args = parser.parse_args()
trainer = AdversarialDiscriminatorTrainer(args.cfg_path, args.gpu, args.exp_name)
training_df = trainer.train()
| self.cfg = utils.load_config(cfg_path, "configs/default.yaml")
self.device = torch.device("cuda:"+str(gpu_num))
self.batch_size = self.cfg["semantic_dis_training"]["batch_size"]
self.total_training_iters = 2
self.num_batches_dis_train = 5
self.num_batches_gen_train = 5
self.mesh_num_vertices = 1498
self.label_noise = 0
self.semantic_dis_loss_num_render = 8
self.training_output_dir = os.path.join(cfg['semantic_dis_training']['output_dir'], "{}_{}".format(time.strftime("%Y_%m_%d--%H_%M_%S"), exp_name))
if not os.path.exists(self.training_output_dir):
os.makedirs(self.training_output_dir)
self.tqdm_out = utils.TqdmPrintEvery() | identifier_body |
adversarial_semantic_dis_trainer.py | import argparse
import os
import glob
import pprint
import pickle
import time
import torch
from torch.nn import functional as F
import torch.optim as optim
import pytorch3d.structures
from pytorch3d.io import load_objs_as_meshes
from pytorch3d.io import load_obj
from pytorch3d.structures import Meshes
from pytorch3d.renderer import Textures
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader
)
from pytorch3d.loss import mesh_laplacian_smoothing, mesh_normal_consistency
from tqdm.autonotebook import tqdm
import pandas as pd
from utils import utils, network_utils
from deformation.deformation_net import DeformationNetwork
import deformation.losses as def_losses
from deformation.semantic_discriminator_loss import SemanticDiscriminatorLoss, compute_sem_dis_loss
from adversarial.datasets import GenerationDataset, ShapenetRendersDataset
class AdversarialDiscriminatorTrainer():
def __init__(self, cfg_path, gpu_num, exp_name):
self.cfg = utils.load_config(cfg_path, "configs/default.yaml")
self.device = torch.device("cuda:"+str(gpu_num))
self.batch_size = self.cfg["semantic_dis_training"]["batch_size"]
self.total_training_iters = 2
self.num_batches_dis_train = 5
self.num_batches_gen_train = 5
self.mesh_num_vertices = 1498
self.label_noise = 0
self.semantic_dis_loss_num_render = 8
self.training_output_dir = os.path.join(cfg['semantic_dis_training']['output_dir'], "{}_{}".format(time.strftime("%Y_%m_%d--%H_%M_%S"), exp_name))
if not os.path.exists(self.training_output_dir):
os.makedirs(self.training_output_dir)
self.tqdm_out = utils.TqdmPrintEvery()
def train(self):
# setting up dataloaders
# https://stackoverflow.com/questions/51444059/how-to-iterate-over-two-dataloaders-simultaneously-using-pytorch
generation_dataset = GenerationDataset(cfg, self.device)
generation_loader = torch.utils.data.DataLoader(generation_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
shapenet_renders_dataset = ShapenetRendersDataset(cfg)
shapenet_renders_loader = torch.utils.data.DataLoader(shapenet_renders_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
# setting up networks and optimizers
deform_net = DeformationNetwork(self.cfg, self.mesh_num_vertices, self.device)
deform_net.to(self.device)
deform_optimizer = optim.Adam(deform_net.parameters(), lr=self.cfg["training"]["learning_rate"])
semantic_dis_net = SemanticDiscriminatorNetwork(cfg)
semantic_dis_net.to(self.device)
dis_optimizer = optim.Adam(semantic_dis_net.parameters(), lr=0.00001, weight_decay=1e-2)
# for adding noise to training labels
# real images have label 1, fake images has label 0
real_labels_dist = torch.distributions.Uniform(torch.tensor([1.0-self.label_noise]), torch.tensor([1.0]))
fake_labels_dist = torch.distributions.Uniform(torch.tensor([0.0]), torch.tensor([0.0+self.label_noise]))
# training generative deformation network and discriminator in an alternating, GAN style
for iter_i in tqdm(range(self.total_training_iters), file=self.tqdm_out):
# training discriminator; generator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = True
for param in deform_net.parameters(): param.requires_grad = False
generation_iter = iter(generation_loader)
shapenet_renders_iter = iter(shapenet_renders_loader)
for batch_idx in tqdm(range(self.num_batches_dis_train), file=self.tqdm_out):
semantic_dis_net.train()
deform_net.eval() # not sure if supposed to set this
dis_optimizer.zero_grad()
real_render_batch = next(shapenet_renders_iter).to(self.device)
pred_logits_real = semantic_dis_net(real_render_batch)
gen_batch = next(generation_iter)
gen_batch_vertices = gen_batch["mesh_verts"].to(self.device)
gen_batch_images = gen_batch["image"].to(self.device)
gen_batch_poses = gen_batch["pose"].to(self.device)
deformed_meshes = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch_vertices,
gen_batch_images, gen_batch_poses, compute_losses=False)
# TODO: fix this to turn into logits, not sigmoid
pred_logits_fake = compute_sem_dis_loss(deformed_meshes, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
batch_size = real_render_batch.shape[0]
real_labels = real_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
fake_labels = fake_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
dis_loss = F.binary_cross_entropy_with_logits(pred_logits_real, real_labels) + \
F.binary_cross_entropy_with_logits(pred_logits_fake, fake_labels)
dis_loss.backward()
dis_optimizer.step()
continue
# training generator; discriminator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = False
for param in deform_net.parameters(): param.requires_grad = True
for gen_batch in tqdm(generation_loader[:self.num_batches_gen_train], file=self.tqdm_out):
deform_net.train()
semantic_dis_net.eval()
deform_optimizer.zero_grad()
deform_loss_dict, _ = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch)
# TODO: make sure loss is correct (follows minimax loss)
total_loss = sum([deform_loss_dict[loss_name] * cfg['training'][loss_name.replace("loss", "lam")] for loss_name in deform_loss_dict])
total_loss.backward()
deform_optimizer.step()
# given a batch of meshes, masks, and poses computes a forward pass through a given deformation network and semantic discriminator network
# returns the deformed mesh and a (optionally) dict of (unweighed, raw) computed losses
# TODO: fix mesh (currently, needs to already be in device)
def refine_mesh_batched(self, deform_net, semantic_dis_net, mesh_verts_batch, img_batch, pose_batch, compute_losses=True):
# computing mesh deformation
delta_v = deform_net(pose_batch, img_batch, mesh_verts_batch)
delta_v = delta_v.reshape((-1,3))
deformed_mesh = mesh.offset_verts(delta_v)
if not compute_losses:
return deformed_mesh
else:
# prep inputs used to compute losses
pred_dist = pose_batch[:,0]
pred_elev = pose_batch[:,1]
pred_azim = pose_batch[:,2]
R, T = look_at_view_transform(pred_dist, pred_elev, pred_azim)
mask = rgba_image[:,:,3] > 0
mask_gt = torch.tensor(mask, dtype=torch.float).to(self.device)
num_vertices = mesh.verts_packed().shape[0]
zero_deformation_tensor = torch.zeros((num_vertices, 3)).to(self.device)
sym_plane_normal = [0,0,1] # TODO: make this generalizable to other classes
loss_dict = {}
# computing losses
rendered_deformed_mesh = utils.render_mesh(deformed_mesh, R, T, self.device, img_size=224, silhouette=True)
loss_dict["sil_loss"] = F.binary_cross_entropy(rendered_deformed_mesh[0, :,:, 3], mask_gt)
loss_dict["l2_loss"] = F.mse_loss(delta_v, zero_deformation_tensor)
loss_dict["lap_smoothness_loss"] = mesh_laplacian_smoothing(deformed_mesh)
loss_dict["normal_consistency_loss"] = mesh_normal_consistency(deformed_mesh)
# TODO: remove weights?
if self.img_sym_lam > 0:
loss_dict["img_sym_loss"], _ = def_losses.image_symmetry_loss(deformed_mesh, sym_plane_normal, self.cfg["training"]["img_sym_num_azim"], self.device)
else:
loss_dict["img_sym_loss"] = torch.tensor(0).to(self.device)
if self.vertex_sym_lam > 0:
loss_dict["vertex_sym_loss"] = def_losses.vertex_symmetry_loss_fast(deformed_mesh, sym_plane_normal, self.device)
else:
loss_dict["vertex_sym_loss"] = torch.tensor(0).to(self.device)
if self.semantic_dis_lam > 0:
loss_dict["semantic_dis_loss"], _ = compute_sem_dis_loss(deformed_mesh, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
else:
|
return loss_dict, deformed_mesh
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Adversarially train a SemanticDiscriminatorNetwork.')
parser.add_argument('cfg_path', type=str, help='Path to yaml configuration file.')
parser.add_argument('--gpu', type=int, default=0, help='Gpu number to use.')
parser.add_argument('--exp_name', type=str, default="adv_semantic_discrim", help='name of experiment')
parser.add_argument('--light', action='store_true', help='run a lighter version of training w/ smaller batch size and num_workers')
parser.add_argument('--label_noise', type=float, default=0, help='amount of label noise to use during training')
args = parser.parse_args()
trainer = AdversarialDiscriminatorTrainer(args.cfg_path, args.gpu, args.exp_name)
training_df = trainer.train()
| loss_dict["semantic_dis_loss"] = torch.tensor(0).to(self.device) | conditional_block |
adversarial_semantic_dis_trainer.py | import argparse
import os
import glob
import pprint
import pickle
import time
import torch
from torch.nn import functional as F
import torch.optim as optim
import pytorch3d.structures
from pytorch3d.io import load_objs_as_meshes
from pytorch3d.io import load_obj
from pytorch3d.structures import Meshes
from pytorch3d.renderer import Textures
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader
)
from pytorch3d.loss import mesh_laplacian_smoothing, mesh_normal_consistency
from tqdm.autonotebook import tqdm
import pandas as pd
from utils import utils, network_utils
from deformation.deformation_net import DeformationNetwork
import deformation.losses as def_losses
from deformation.semantic_discriminator_loss import SemanticDiscriminatorLoss, compute_sem_dis_loss
from adversarial.datasets import GenerationDataset, ShapenetRendersDataset
class AdversarialDiscriminatorTrainer():
def __init__(self, cfg_path, gpu_num, exp_name):
self.cfg = utils.load_config(cfg_path, "configs/default.yaml")
self.device = torch.device("cuda:"+str(gpu_num))
self.batch_size = self.cfg["semantic_dis_training"]["batch_size"]
self.total_training_iters = 2
self.num_batches_dis_train = 5
self.num_batches_gen_train = 5
self.mesh_num_vertices = 1498
self.label_noise = 0
self.semantic_dis_loss_num_render = 8
self.training_output_dir = os.path.join(cfg['semantic_dis_training']['output_dir'], "{}_{}".format(time.strftime("%Y_%m_%d--%H_%M_%S"), exp_name))
if not os.path.exists(self.training_output_dir):
os.makedirs(self.training_output_dir)
self.tqdm_out = utils.TqdmPrintEvery()
def train(self):
# setting up dataloaders
# https://stackoverflow.com/questions/51444059/how-to-iterate-over-two-dataloaders-simultaneously-using-pytorch
generation_dataset = GenerationDataset(cfg, self.device)
generation_loader = torch.utils.data.DataLoader(generation_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
shapenet_renders_dataset = ShapenetRendersDataset(cfg)
shapenet_renders_loader = torch.utils.data.DataLoader(shapenet_renders_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
# setting up networks and optimizers
deform_net = DeformationNetwork(self.cfg, self.mesh_num_vertices, self.device)
deform_net.to(self.device)
deform_optimizer = optim.Adam(deform_net.parameters(), lr=self.cfg["training"]["learning_rate"])
semantic_dis_net = SemanticDiscriminatorNetwork(cfg)
semantic_dis_net.to(self.device)
dis_optimizer = optim.Adam(semantic_dis_net.parameters(), lr=0.00001, weight_decay=1e-2)
# for adding noise to training labels
# real images have label 1, fake images has label 0
real_labels_dist = torch.distributions.Uniform(torch.tensor([1.0-self.label_noise]), torch.tensor([1.0]))
fake_labels_dist = torch.distributions.Uniform(torch.tensor([0.0]), torch.tensor([0.0+self.label_noise]))
# training generative deformation network and discriminator in an alternating, GAN style
for iter_i in tqdm(range(self.total_training_iters), file=self.tqdm_out):
# training discriminator; generator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = True
for param in deform_net.parameters(): param.requires_grad = False
generation_iter = iter(generation_loader)
shapenet_renders_iter = iter(shapenet_renders_loader)
for batch_idx in tqdm(range(self.num_batches_dis_train), file=self.tqdm_out):
semantic_dis_net.train()
deform_net.eval() # not sure if supposed to set this
dis_optimizer.zero_grad()
real_render_batch = next(shapenet_renders_iter).to(self.device)
pred_logits_real = semantic_dis_net(real_render_batch)
gen_batch = next(generation_iter)
gen_batch_vertices = gen_batch["mesh_verts"].to(self.device)
gen_batch_images = gen_batch["image"].to(self.device)
gen_batch_poses = gen_batch["pose"].to(self.device)
deformed_meshes = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch_vertices,
gen_batch_images, gen_batch_poses, compute_losses=False)
# TODO: fix this to turn into logits, not sigmoid
pred_logits_fake = compute_sem_dis_loss(deformed_meshes, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
batch_size = real_render_batch.shape[0]
real_labels = real_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device) |
dis_loss = F.binary_cross_entropy_with_logits(pred_logits_real, real_labels) + \
F.binary_cross_entropy_with_logits(pred_logits_fake, fake_labels)
dis_loss.backward()
dis_optimizer.step()
continue
# training generator; discriminator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = False
for param in deform_net.parameters(): param.requires_grad = True
for gen_batch in tqdm(generation_loader[:self.num_batches_gen_train], file=self.tqdm_out):
deform_net.train()
semantic_dis_net.eval()
deform_optimizer.zero_grad()
deform_loss_dict, _ = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch)
# TODO: make sure loss is correct (follows minimax loss)
total_loss = sum([deform_loss_dict[loss_name] * cfg['training'][loss_name.replace("loss", "lam")] for loss_name in deform_loss_dict])
total_loss.backward()
deform_optimizer.step()
# given a batch of meshes, masks, and poses computes a forward pass through a given deformation network and semantic discriminator network
# returns the deformed mesh and a (optionally) dict of (unweighed, raw) computed losses
# TODO: fix mesh (currently, needs to already be in device)
def refine_mesh_batched(self, deform_net, semantic_dis_net, mesh_verts_batch, img_batch, pose_batch, compute_losses=True):
# computing mesh deformation
delta_v = deform_net(pose_batch, img_batch, mesh_verts_batch)
delta_v = delta_v.reshape((-1,3))
deformed_mesh = mesh.offset_verts(delta_v)
if not compute_losses:
return deformed_mesh
else:
# prep inputs used to compute losses
pred_dist = pose_batch[:,0]
pred_elev = pose_batch[:,1]
pred_azim = pose_batch[:,2]
R, T = look_at_view_transform(pred_dist, pred_elev, pred_azim)
mask = rgba_image[:,:,3] > 0
mask_gt = torch.tensor(mask, dtype=torch.float).to(self.device)
num_vertices = mesh.verts_packed().shape[0]
zero_deformation_tensor = torch.zeros((num_vertices, 3)).to(self.device)
sym_plane_normal = [0,0,1] # TODO: make this generalizable to other classes
loss_dict = {}
# computing losses
rendered_deformed_mesh = utils.render_mesh(deformed_mesh, R, T, self.device, img_size=224, silhouette=True)
loss_dict["sil_loss"] = F.binary_cross_entropy(rendered_deformed_mesh[0, :,:, 3], mask_gt)
loss_dict["l2_loss"] = F.mse_loss(delta_v, zero_deformation_tensor)
loss_dict["lap_smoothness_loss"] = mesh_laplacian_smoothing(deformed_mesh)
loss_dict["normal_consistency_loss"] = mesh_normal_consistency(deformed_mesh)
# TODO: remove weights?
if self.img_sym_lam > 0:
loss_dict["img_sym_loss"], _ = def_losses.image_symmetry_loss(deformed_mesh, sym_plane_normal, self.cfg["training"]["img_sym_num_azim"], self.device)
else:
loss_dict["img_sym_loss"] = torch.tensor(0).to(self.device)
if self.vertex_sym_lam > 0:
loss_dict["vertex_sym_loss"] = def_losses.vertex_symmetry_loss_fast(deformed_mesh, sym_plane_normal, self.device)
else:
loss_dict["vertex_sym_loss"] = torch.tensor(0).to(self.device)
if self.semantic_dis_lam > 0:
loss_dict["semantic_dis_loss"], _ = compute_sem_dis_loss(deformed_mesh, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
else:
loss_dict["semantic_dis_loss"] = torch.tensor(0).to(self.device)
return loss_dict, deformed_mesh
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Adversarially train a SemanticDiscriminatorNetwork.')
parser.add_argument('cfg_path', type=str, help='Path to yaml configuration file.')
parser.add_argument('--gpu', type=int, default=0, help='Gpu number to use.')
parser.add_argument('--exp_name', type=str, default="adv_semantic_discrim", help='name of experiment')
parser.add_argument('--light', action='store_true', help='run a lighter version of training w/ smaller batch size and num_workers')
parser.add_argument('--label_noise', type=float, default=0, help='amount of label noise to use during training')
args = parser.parse_args()
trainer = AdversarialDiscriminatorTrainer(args.cfg_path, args.gpu, args.exp_name)
training_df = trainer.train() | fake_labels = fake_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device) | random_line_split |
adversarial_semantic_dis_trainer.py | import argparse
import os
import glob
import pprint
import pickle
import time
import torch
from torch.nn import functional as F
import torch.optim as optim
import pytorch3d.structures
from pytorch3d.io import load_objs_as_meshes
from pytorch3d.io import load_obj
from pytorch3d.structures import Meshes
from pytorch3d.renderer import Textures
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader
)
from pytorch3d.loss import mesh_laplacian_smoothing, mesh_normal_consistency
from tqdm.autonotebook import tqdm
import pandas as pd
from utils import utils, network_utils
from deformation.deformation_net import DeformationNetwork
import deformation.losses as def_losses
from deformation.semantic_discriminator_loss import SemanticDiscriminatorLoss, compute_sem_dis_loss
from adversarial.datasets import GenerationDataset, ShapenetRendersDataset
class AdversarialDiscriminatorTrainer():
def __init__(self, cfg_path, gpu_num, exp_name):
self.cfg = utils.load_config(cfg_path, "configs/default.yaml")
self.device = torch.device("cuda:"+str(gpu_num))
self.batch_size = self.cfg["semantic_dis_training"]["batch_size"]
self.total_training_iters = 2
self.num_batches_dis_train = 5
self.num_batches_gen_train = 5
self.mesh_num_vertices = 1498
self.label_noise = 0
self.semantic_dis_loss_num_render = 8
self.training_output_dir = os.path.join(cfg['semantic_dis_training']['output_dir'], "{}_{}".format(time.strftime("%Y_%m_%d--%H_%M_%S"), exp_name))
if not os.path.exists(self.training_output_dir):
os.makedirs(self.training_output_dir)
self.tqdm_out = utils.TqdmPrintEvery()
def | (self):
# setting up dataloaders
# https://stackoverflow.com/questions/51444059/how-to-iterate-over-two-dataloaders-simultaneously-using-pytorch
generation_dataset = GenerationDataset(cfg, self.device)
generation_loader = torch.utils.data.DataLoader(generation_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
shapenet_renders_dataset = ShapenetRendersDataset(cfg)
shapenet_renders_loader = torch.utils.data.DataLoader(shapenet_renders_dataset, batch_size=self.batch_size, num_workers=4, shuffle=True)
# setting up networks and optimizers
deform_net = DeformationNetwork(self.cfg, self.mesh_num_vertices, self.device)
deform_net.to(self.device)
deform_optimizer = optim.Adam(deform_net.parameters(), lr=self.cfg["training"]["learning_rate"])
semantic_dis_net = SemanticDiscriminatorNetwork(cfg)
semantic_dis_net.to(self.device)
dis_optimizer = optim.Adam(semantic_dis_net.parameters(), lr=0.00001, weight_decay=1e-2)
# for adding noise to training labels
# real images have label 1, fake images has label 0
real_labels_dist = torch.distributions.Uniform(torch.tensor([1.0-self.label_noise]), torch.tensor([1.0]))
fake_labels_dist = torch.distributions.Uniform(torch.tensor([0.0]), torch.tensor([0.0+self.label_noise]))
# training generative deformation network and discriminator in an alternating, GAN style
for iter_i in tqdm(range(self.total_training_iters), file=self.tqdm_out):
# training discriminator; generator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = True
for param in deform_net.parameters(): param.requires_grad = False
generation_iter = iter(generation_loader)
shapenet_renders_iter = iter(shapenet_renders_loader)
for batch_idx in tqdm(range(self.num_batches_dis_train), file=self.tqdm_out):
semantic_dis_net.train()
deform_net.eval() # not sure if supposed to set this
dis_optimizer.zero_grad()
real_render_batch = next(shapenet_renders_iter).to(self.device)
pred_logits_real = semantic_dis_net(real_render_batch)
gen_batch = next(generation_iter)
gen_batch_vertices = gen_batch["mesh_verts"].to(self.device)
gen_batch_images = gen_batch["image"].to(self.device)
gen_batch_poses = gen_batch["pose"].to(self.device)
deformed_meshes = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch_vertices,
gen_batch_images, gen_batch_poses, compute_losses=False)
# TODO: fix this to turn into logits, not sigmoid
pred_logits_fake = compute_sem_dis_loss(deformed_meshes, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
batch_size = real_render_batch.shape[0]
real_labels = real_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
fake_labels = fake_labels_dist.sample((batch_size,1)).squeeze(2).to(self.device)
dis_loss = F.binary_cross_entropy_with_logits(pred_logits_real, real_labels) + \
F.binary_cross_entropy_with_logits(pred_logits_fake, fake_labels)
dis_loss.backward()
dis_optimizer.step()
continue
# training generator; discriminator weights are frozen
# =/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/=/
for param in semantic_dis_net.parameters(): param.requires_grad = False
for param in deform_net.parameters(): param.requires_grad = True
for gen_batch in tqdm(generation_loader[:self.num_batches_gen_train], file=self.tqdm_out):
deform_net.train()
semantic_dis_net.eval()
deform_optimizer.zero_grad()
deform_loss_dict, _ = self.refine_mesh_batched(deform_net, semantic_dis_net, gen_batch)
# TODO: make sure loss is correct (follows minimax loss)
total_loss = sum([deform_loss_dict[loss_name] * cfg['training'][loss_name.replace("loss", "lam")] for loss_name in deform_loss_dict])
total_loss.backward()
deform_optimizer.step()
# given a batch of meshes, masks, and poses computes a forward pass through a given deformation network and semantic discriminator network
# returns the deformed mesh and a (optionally) dict of (unweighed, raw) computed losses
# TODO: fix mesh (currently, needs to already be in device)
def refine_mesh_batched(self, deform_net, semantic_dis_net, mesh_verts_batch, img_batch, pose_batch, compute_losses=True):
# computing mesh deformation
delta_v = deform_net(pose_batch, img_batch, mesh_verts_batch)
delta_v = delta_v.reshape((-1,3))
deformed_mesh = mesh.offset_verts(delta_v)
if not compute_losses:
return deformed_mesh
else:
# prep inputs used to compute losses
pred_dist = pose_batch[:,0]
pred_elev = pose_batch[:,1]
pred_azim = pose_batch[:,2]
R, T = look_at_view_transform(pred_dist, pred_elev, pred_azim)
mask = rgba_image[:,:,3] > 0
mask_gt = torch.tensor(mask, dtype=torch.float).to(self.device)
num_vertices = mesh.verts_packed().shape[0]
zero_deformation_tensor = torch.zeros((num_vertices, 3)).to(self.device)
sym_plane_normal = [0,0,1] # TODO: make this generalizable to other classes
loss_dict = {}
# computing losses
rendered_deformed_mesh = utils.render_mesh(deformed_mesh, R, T, self.device, img_size=224, silhouette=True)
loss_dict["sil_loss"] = F.binary_cross_entropy(rendered_deformed_mesh[0, :,:, 3], mask_gt)
loss_dict["l2_loss"] = F.mse_loss(delta_v, zero_deformation_tensor)
loss_dict["lap_smoothness_loss"] = mesh_laplacian_smoothing(deformed_mesh)
loss_dict["normal_consistency_loss"] = mesh_normal_consistency(deformed_mesh)
# TODO: remove weights?
if self.img_sym_lam > 0:
loss_dict["img_sym_loss"], _ = def_losses.image_symmetry_loss(deformed_mesh, sym_plane_normal, self.cfg["training"]["img_sym_num_azim"], self.device)
else:
loss_dict["img_sym_loss"] = torch.tensor(0).to(self.device)
if self.vertex_sym_lam > 0:
loss_dict["vertex_sym_loss"] = def_losses.vertex_symmetry_loss_fast(deformed_mesh, sym_plane_normal, self.device)
else:
loss_dict["vertex_sym_loss"] = torch.tensor(0).to(self.device)
if self.semantic_dis_lam > 0:
loss_dict["semantic_dis_loss"], _ = compute_sem_dis_loss(deformed_mesh, self.semantic_dis_loss_num_render, semantic_dis_net, self.device)
else:
loss_dict["semantic_dis_loss"] = torch.tensor(0).to(self.device)
return loss_dict, deformed_mesh
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Adversarially train a SemanticDiscriminatorNetwork.')
parser.add_argument('cfg_path', type=str, help='Path to yaml configuration file.')
parser.add_argument('--gpu', type=int, default=0, help='Gpu number to use.')
parser.add_argument('--exp_name', type=str, default="adv_semantic_discrim", help='name of experiment')
parser.add_argument('--light', action='store_true', help='run a lighter version of training w/ smaller batch size and num_workers')
parser.add_argument('--label_noise', type=float, default=0, help='amount of label noise to use during training')
args = parser.parse_args()
trainer = AdversarialDiscriminatorTrainer(args.cfg_path, args.gpu, args.exp_name)
training_df = trainer.train()
| train | identifier_name |
endpoint.rs | use std::fmt;
use std::fs;
use std::io::{
self,
Read,
Write,
};
use std::num::ParseIntError;
use std::str;
use super::Driver;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct SlotFunction(pub u8);
impl SlotFunction {
pub fn slot(&self) -> u8 {
self.0 >> 3
}
pub fn function(&self) -> u8 {
self.0 & 0x7
}
}
impl fmt::Debug for SlotFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SlotFunction")
.field("slot", &(self.0 >> 3))
.field("function", &(self.0 & 0x7))
.finish()
}
}
impl fmt::Display for SlotFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:02x}.{}", self.0 >> 3, self.0 & 0x7)
}
}
impl str::FromStr for SlotFunction {
type Err = ::failure::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let r = s.as_bytes();
ensure!(r.len() <= 4, "String too long for PCI device.function: {:?}", s);
// short: 0.0, long: 1f.7
let (dev_s, fun_s) = if r.len() == 3 && r[1] == b'.' {
(&s[0..1], &s[2..3])
} else if r.len() == 4 && r[2] == b'.' {
(&s[0..2], &s[3..4])
} else {
bail!("Couldn't find '.' in valid place for PCI device.function: {:?}", s);
};
let dev = with_context!(("invalid PCI device: {}", dev_s),
u8::from_str_radix(dev_s, 16).map_err(|e| e.into())
)?;
let fun = with_context!(("invalid PCI function: {}", fun_s),
Ok(u8::from_str_radix(fun_s, 8)?)
)?;
ensure!(dev < 0x20, "invalid PCI device: {} (too big)", dev);
ensure!(fun <= 0x08, "invalid PCI function: {} (too big)", fun);
Ok(SlotFunction(dev << 3 | fun))
}
}
fn read_trimmed_info_file(ep: PciEndpoint, name: &str) -> crate::AResult<String> {
with_context!(("couldn't read info file {} for PCI device {}", name, ep), {
let mut f = fs::File::open(ep.device_file(name))?;
let mut result = String::new();
f.read_to_string(&mut result)?;
Ok(result.trim().into())
})
}
fn read_hex_info_file<T>(ep: PciEndpoint, name: &str, from_str_radix: fn(&str, u32) -> Result<T, ParseIntError>) -> crate::AResult<T> {
let value = read_trimmed_info_file(ep, name)?;
ensure!(value.starts_with("0x"), "info {} for PCI device {} doesn't start with '0x': {:?}", name, ep, value);
with_context!(("couldn't parse info {} for PCI device {}", name, ep), {
Ok(from_str_radix(&value[2..], 16)?)
})
}
fn read_decimal_info_file<T>(ep: PciEndpoint, name: &str, from_str_radix: fn(&str, u32) -> Result<T, ParseIntError>) -> crate::AResult<T> {
let value = read_trimmed_info_file(ep, name)?;
with_context!(("couldn't parse info {} for PCI device {}", name, ep), {
Ok(from_str_radix(&value, 10)?)
})
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct PciBus {
pub domain: u16,
pub bus: u8,
}
impl fmt::Display for PciBus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:04x}:{:02x}", self.domain, self.bus)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct PciEndpoint {
pub bus: PciBus,
pub slot_function: SlotFunction,
}
impl PciEndpoint {
fn device_file(&self, name: &str) -> String {
format!("/sys/bus/pci/devices/{}/{}", *self, name)
}
pub fn is_enabled(&self) -> crate::AResult<bool> {
match read_trimmed_info_file(*self, "enable")?.as_str() {
"0" => Ok(false),
"1" => Ok(true),
e => bail!("Invalid 'enable' value {:?} for PCI device {}", e, self),
}
}
pub fn scoped_enable(&self) -> crate::AResult<ScopedEnable> {
if !self.is_enabled()? {
let scoped_enable = ScopedEnable { ep: Some(*self) };
self.enable()?;
Ok(scoped_enable)
} else {
Ok(ScopedEnable { ep: None })
}
}
pub fn enable(&self) -> crate::AResult<()> {
with_context!(("PCI {}: enable device", self), {
fs::OpenOptions::new().write(true).open(self.device_file("enable"))?.write_all(b"1")?;
Ok(())
})
}
pub fn disable(&self) -> crate::AResult<()> {
with_context!(("PCI {}: disable device", self), {
fs::OpenOptions::new().write(true).open(self.device_file("enable"))?.write_all(b"0")?;
Ok(())
})
}
pub fn vendor(&self) -> crate::AResult<VendorId> {
read_hex_info_file::<u16>(*self, "vendor", u16::from_str_radix).map(VendorId)
}
pub fn device(&self) -> crate::AResult<DeviceID> {
read_hex_info_file::<u16>(*self, "device", u16::from_str_radix).map(DeviceID)
}
pub fn subsystem_vendor(&self) -> crate::AResult<VendorId> {
read_hex_info_file::<u16>(*self, "subsystem_vendor", u16::from_str_radix).map(VendorId)
}
pub fn subsystem_device(&self) -> crate::AResult<DeviceID> {
read_hex_info_file::<u16>(*self, "subsystem_device", u16::from_str_radix).map(DeviceID)
}
pub fn class(&self) -> crate::AResult<Class> {
let v = read_hex_info_file::<u32>(*self, "class", u32::from_str_radix)?;
let class_code = ClassCode((v >> 16) as u8);
let subclass_code = SubClassCode((v >> 8) as u8);
let programming_interface = ProgrammingInterface(v as u8);
Ok(Class{class_code, subclass_code, programming_interface})
}
/// Bridges have a secondary bus (the bus directly connected devices on the other side are on)
pub fn secondary_bus(&self) -> crate::AResult<PciBus> {
let bus = read_decimal_info_file::<u8>(*self, "secondary_bus_number", u8::from_str_radix)?;
Ok(PciBus {
domain: self.bus.domain,
bus,
})
}
pub fn driver(&self) -> crate::AResult<Option<Driver>> {
let link = self.device_file("driver");
match fs::symlink_metadata(&link) {
Err(ref e) if e.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(e) => bail!("Couldn't locate driver for PCI device {}: {}", self, e),
Ok(attr) => if !attr.file_type().is_symlink() {
bail!("driver for PCI device {} not a symlink", self);
},
}
let path = with_context!(("Couldn't follow driver symlink for PCI device {}", self),
Ok(fs::canonicalize(link)?)
)?;
Ok(Some(Driver{path}))
}
}
impl fmt::Display for PciEndpoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}", self.bus, self.slot_function)
}
}
impl str::FromStr for PciEndpoint {
type Err = ::failure::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// max len: 0000:00:00.0
// short: 0:0.0
ensure!(s.len() <= 12, "PCI endpoint too long: {:?}", s);
let (domain, bus_s, devfun_s) = {
let mut parts = s.split(':');
let p1 = parts.next().ok_or_else(|| format_err!("Need at least one ':' in PCI endpoint: {:?}", s))?;
let p2 = parts.next().ok_or_else(|| format_err!("Need at least one ':' in PCI endpoint: {:?}", s))?;
match parts.next() {
None => (0, p1, p2),
Some(p3) => {
ensure!(parts.next().is_none(), "At most two ':' in PCI endpoint: {:?}", s);
let domain = with_context!(("invalid PCI domain: {}", p1),
Ok(u16::from_str_radix(p1, 16)?)
)?;
(domain, p2, p3)
}
}
};
let bus = with_context!(("invalid PCI bus: {}", bus_s),
Ok(u8::from_str_radix(bus_s, 16)?)
)?;
let slot_function = devfun_s.parse::<SlotFunction>()?;
let bus = PciBus {
domain,
bus,
};
Ok(PciEndpoint {
bus,
slot_function,
})
}
}
#[derive(Debug)]
pub struct ScopedEnable {
ep: Option<PciEndpoint>, // is none if already "closed" or was already enabled before
}
impl ScopedEnable {
pub fn | (mut self) -> crate::AResult<()> {
if let Some(ep) = self.ep.take() {
ep.disable()?;
}
Ok(())
}
}
impl Drop for ScopedEnable {
fn drop(&mut self) {
if let Some(ep) = self.ep.take() {
if let Err(e) = ep.disable() {
error!("PCI {}: Failed to disable temporarily enabled device: {}", ep, e);
}
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct VendorId(pub u16);
impl fmt::Display for VendorId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "0x{:04x}", self.0)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct DeviceID(pub u16);
impl fmt::Display for DeviceID {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "0x{:04x}", self.0)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct ClassCode(pub u8);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct SubClassCode(pub u8);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct ProgrammingInterface(pub u8);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct Class {
pub class_code: ClassCode,
pub subclass_code: SubClassCode,
pub programming_interface: ProgrammingInterface,
}
impl fmt::Display for Class {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"0x{:02x}{:02x}{:02x}",
self.class_code.0,
self.subclass_code.0,
self.programming_interface.0,
)
}
}
#[cfg(test)]
mod test {
use super::SlotFunction;
fn check_dev_fun(dev: u8, fun: u8, repr: &str) {
assert!(dev < 0x20);
assert!(fun < 0x08);
match repr.parse::<SlotFunction>() {
Err(e) => panic!("{} failed to parse as SlotFunction: {}", repr, e),
Ok(df) => assert_eq!(SlotFunction(dev << 3 | fun), df, "failed validing parsed {}", repr),
}
}
fn check_dev_fun_canonical(dev: u8, fun: u8, repr: &str) {
check_dev_fun(dev, fun, repr);
assert_eq!(SlotFunction(dev << 3 | fun).to_string(), repr, "failed stringifying dev 0x{:02x} function {}", dev, fun);
}
fn check_invalid_dev_fun(repr: &str) {
assert!(repr.parse::<SlotFunction>().is_err(), "{:?} must not be a valid DEV.FUN");
}
#[test]
fn parse_dev_function() {
check_dev_fun(0b0_0000, 0b000, "0.0");
check_dev_fun_canonical(0b0_0000, 0b000, "00.0");
check_dev_fun_canonical(0b0_0000, 0b001, "00.1");
check_dev_fun_canonical(0b0_0000, 0b111, "00.7");
check_dev_fun_canonical(0b0_0001, 0b000, "01.0");
check_dev_fun_canonical(0b0_0001, 0b001, "01.1");
check_dev_fun_canonical(0b0_0001, 0b111, "01.7");
check_dev_fun_canonical(0b1_0000, 0b000, "10.0");
check_dev_fun_canonical(0b1_0000, 0b111, "10.7");
check_dev_fun_canonical(0b1_1111, 0b011, "1f.3");
check_dev_fun_canonical(0b1_1111, 0b111, "1f.7");
check_invalid_dev_fun("");
check_invalid_dev_fun(".");
check_invalid_dev_fun("0.");
check_invalid_dev_fun("00.");
check_invalid_dev_fun("000.");
check_invalid_dev_fun(".0");
check_invalid_dev_fun(".00");
check_invalid_dev_fun(".000");
check_invalid_dev_fun("0");
check_invalid_dev_fun("00");
check_invalid_dev_fun("000");
check_invalid_dev_fun("0000");
}
}
| close | identifier_name |
endpoint.rs | use std::fmt;
use std::fs;
use std::io::{
self,
Read,
Write,
};
use std::num::ParseIntError;
use std::str;
use super::Driver;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct SlotFunction(pub u8);
impl SlotFunction {
pub fn slot(&self) -> u8 {
self.0 >> 3
}
pub fn function(&self) -> u8 {
self.0 & 0x7
}
}
impl fmt::Debug for SlotFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SlotFunction")
.field("slot", &(self.0 >> 3))
.field("function", &(self.0 & 0x7))
.finish()
}
}
impl fmt::Display for SlotFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:02x}.{}", self.0 >> 3, self.0 & 0x7)
}
}
impl str::FromStr for SlotFunction {
type Err = ::failure::Error;
|
// short: 0.0, long: 1f.7
let (dev_s, fun_s) = if r.len() == 3 && r[1] == b'.' {
(&s[0..1], &s[2..3])
} else if r.len() == 4 && r[2] == b'.' {
(&s[0..2], &s[3..4])
} else {
bail!("Couldn't find '.' in valid place for PCI device.function: {:?}", s);
};
let dev = with_context!(("invalid PCI device: {}", dev_s),
u8::from_str_radix(dev_s, 16).map_err(|e| e.into())
)?;
let fun = with_context!(("invalid PCI function: {}", fun_s),
Ok(u8::from_str_radix(fun_s, 8)?)
)?;
ensure!(dev < 0x20, "invalid PCI device: {} (too big)", dev);
ensure!(fun <= 0x08, "invalid PCI function: {} (too big)", fun);
Ok(SlotFunction(dev << 3 | fun))
}
}
fn read_trimmed_info_file(ep: PciEndpoint, name: &str) -> crate::AResult<String> {
with_context!(("couldn't read info file {} for PCI device {}", name, ep), {
let mut f = fs::File::open(ep.device_file(name))?;
let mut result = String::new();
f.read_to_string(&mut result)?;
Ok(result.trim().into())
})
}
fn read_hex_info_file<T>(ep: PciEndpoint, name: &str, from_str_radix: fn(&str, u32) -> Result<T, ParseIntError>) -> crate::AResult<T> {
let value = read_trimmed_info_file(ep, name)?;
ensure!(value.starts_with("0x"), "info {} for PCI device {} doesn't start with '0x': {:?}", name, ep, value);
with_context!(("couldn't parse info {} for PCI device {}", name, ep), {
Ok(from_str_radix(&value[2..], 16)?)
})
}
fn read_decimal_info_file<T>(ep: PciEndpoint, name: &str, from_str_radix: fn(&str, u32) -> Result<T, ParseIntError>) -> crate::AResult<T> {
let value = read_trimmed_info_file(ep, name)?;
with_context!(("couldn't parse info {} for PCI device {}", name, ep), {
Ok(from_str_radix(&value, 10)?)
})
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct PciBus {
pub domain: u16,
pub bus: u8,
}
impl fmt::Display for PciBus {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:04x}:{:02x}", self.domain, self.bus)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct PciEndpoint {
pub bus: PciBus,
pub slot_function: SlotFunction,
}
impl PciEndpoint {
fn device_file(&self, name: &str) -> String {
format!("/sys/bus/pci/devices/{}/{}", *self, name)
}
pub fn is_enabled(&self) -> crate::AResult<bool> {
match read_trimmed_info_file(*self, "enable")?.as_str() {
"0" => Ok(false),
"1" => Ok(true),
e => bail!("Invalid 'enable' value {:?} for PCI device {}", e, self),
}
}
pub fn scoped_enable(&self) -> crate::AResult<ScopedEnable> {
if !self.is_enabled()? {
let scoped_enable = ScopedEnable { ep: Some(*self) };
self.enable()?;
Ok(scoped_enable)
} else {
Ok(ScopedEnable { ep: None })
}
}
pub fn enable(&self) -> crate::AResult<()> {
with_context!(("PCI {}: enable device", self), {
fs::OpenOptions::new().write(true).open(self.device_file("enable"))?.write_all(b"1")?;
Ok(())
})
}
pub fn disable(&self) -> crate::AResult<()> {
with_context!(("PCI {}: disable device", self), {
fs::OpenOptions::new().write(true).open(self.device_file("enable"))?.write_all(b"0")?;
Ok(())
})
}
pub fn vendor(&self) -> crate::AResult<VendorId> {
read_hex_info_file::<u16>(*self, "vendor", u16::from_str_radix).map(VendorId)
}
pub fn device(&self) -> crate::AResult<DeviceID> {
read_hex_info_file::<u16>(*self, "device", u16::from_str_radix).map(DeviceID)
}
pub fn subsystem_vendor(&self) -> crate::AResult<VendorId> {
read_hex_info_file::<u16>(*self, "subsystem_vendor", u16::from_str_radix).map(VendorId)
}
pub fn subsystem_device(&self) -> crate::AResult<DeviceID> {
read_hex_info_file::<u16>(*self, "subsystem_device", u16::from_str_radix).map(DeviceID)
}
pub fn class(&self) -> crate::AResult<Class> {
let v = read_hex_info_file::<u32>(*self, "class", u32::from_str_radix)?;
let class_code = ClassCode((v >> 16) as u8);
let subclass_code = SubClassCode((v >> 8) as u8);
let programming_interface = ProgrammingInterface(v as u8);
Ok(Class{class_code, subclass_code, programming_interface})
}
/// Bridges have a secondary bus (the bus directly connected devices on the other side are on)
pub fn secondary_bus(&self) -> crate::AResult<PciBus> {
let bus = read_decimal_info_file::<u8>(*self, "secondary_bus_number", u8::from_str_radix)?;
Ok(PciBus {
domain: self.bus.domain,
bus,
})
}
pub fn driver(&self) -> crate::AResult<Option<Driver>> {
let link = self.device_file("driver");
match fs::symlink_metadata(&link) {
Err(ref e) if e.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(e) => bail!("Couldn't locate driver for PCI device {}: {}", self, e),
Ok(attr) => if !attr.file_type().is_symlink() {
bail!("driver for PCI device {} not a symlink", self);
},
}
let path = with_context!(("Couldn't follow driver symlink for PCI device {}", self),
Ok(fs::canonicalize(link)?)
)?;
Ok(Some(Driver{path}))
}
}
impl fmt::Display for PciEndpoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}", self.bus, self.slot_function)
}
}
impl str::FromStr for PciEndpoint {
type Err = ::failure::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// max len: 0000:00:00.0
// short: 0:0.0
ensure!(s.len() <= 12, "PCI endpoint too long: {:?}", s);
let (domain, bus_s, devfun_s) = {
let mut parts = s.split(':');
let p1 = parts.next().ok_or_else(|| format_err!("Need at least one ':' in PCI endpoint: {:?}", s))?;
let p2 = parts.next().ok_or_else(|| format_err!("Need at least one ':' in PCI endpoint: {:?}", s))?;
match parts.next() {
None => (0, p1, p2),
Some(p3) => {
ensure!(parts.next().is_none(), "At most two ':' in PCI endpoint: {:?}", s);
let domain = with_context!(("invalid PCI domain: {}", p1),
Ok(u16::from_str_radix(p1, 16)?)
)?;
(domain, p2, p3)
}
}
};
let bus = with_context!(("invalid PCI bus: {}", bus_s),
Ok(u8::from_str_radix(bus_s, 16)?)
)?;
let slot_function = devfun_s.parse::<SlotFunction>()?;
let bus = PciBus {
domain,
bus,
};
Ok(PciEndpoint {
bus,
slot_function,
})
}
}
#[derive(Debug)]
pub struct ScopedEnable {
ep: Option<PciEndpoint>, // is none if already "closed" or was already enabled before
}
impl ScopedEnable {
pub fn close(mut self) -> crate::AResult<()> {
if let Some(ep) = self.ep.take() {
ep.disable()?;
}
Ok(())
}
}
impl Drop for ScopedEnable {
fn drop(&mut self) {
if let Some(ep) = self.ep.take() {
if let Err(e) = ep.disable() {
error!("PCI {}: Failed to disable temporarily enabled device: {}", ep, e);
}
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct VendorId(pub u16);
impl fmt::Display for VendorId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "0x{:04x}", self.0)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct DeviceID(pub u16);
impl fmt::Display for DeviceID {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "0x{:04x}", self.0)
}
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct ClassCode(pub u8);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct SubClassCode(pub u8);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct ProgrammingInterface(pub u8);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct Class {
pub class_code: ClassCode,
pub subclass_code: SubClassCode,
pub programming_interface: ProgrammingInterface,
}
impl fmt::Display for Class {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"0x{:02x}{:02x}{:02x}",
self.class_code.0,
self.subclass_code.0,
self.programming_interface.0,
)
}
}
#[cfg(test)]
mod test {
use super::SlotFunction;
fn check_dev_fun(dev: u8, fun: u8, repr: &str) {
assert!(dev < 0x20);
assert!(fun < 0x08);
match repr.parse::<SlotFunction>() {
Err(e) => panic!("{} failed to parse as SlotFunction: {}", repr, e),
Ok(df) => assert_eq!(SlotFunction(dev << 3 | fun), df, "failed validing parsed {}", repr),
}
}
fn check_dev_fun_canonical(dev: u8, fun: u8, repr: &str) {
check_dev_fun(dev, fun, repr);
assert_eq!(SlotFunction(dev << 3 | fun).to_string(), repr, "failed stringifying dev 0x{:02x} function {}", dev, fun);
}
fn check_invalid_dev_fun(repr: &str) {
assert!(repr.parse::<SlotFunction>().is_err(), "{:?} must not be a valid DEV.FUN");
}
#[test]
fn parse_dev_function() {
check_dev_fun(0b0_0000, 0b000, "0.0");
check_dev_fun_canonical(0b0_0000, 0b000, "00.0");
check_dev_fun_canonical(0b0_0000, 0b001, "00.1");
check_dev_fun_canonical(0b0_0000, 0b111, "00.7");
check_dev_fun_canonical(0b0_0001, 0b000, "01.0");
check_dev_fun_canonical(0b0_0001, 0b001, "01.1");
check_dev_fun_canonical(0b0_0001, 0b111, "01.7");
check_dev_fun_canonical(0b1_0000, 0b000, "10.0");
check_dev_fun_canonical(0b1_0000, 0b111, "10.7");
check_dev_fun_canonical(0b1_1111, 0b011, "1f.3");
check_dev_fun_canonical(0b1_1111, 0b111, "1f.7");
check_invalid_dev_fun("");
check_invalid_dev_fun(".");
check_invalid_dev_fun("0.");
check_invalid_dev_fun("00.");
check_invalid_dev_fun("000.");
check_invalid_dev_fun(".0");
check_invalid_dev_fun(".00");
check_invalid_dev_fun(".000");
check_invalid_dev_fun("0");
check_invalid_dev_fun("00");
check_invalid_dev_fun("000");
check_invalid_dev_fun("0000");
}
} | fn from_str(s: &str) -> Result<Self, Self::Err> {
let r = s.as_bytes();
ensure!(r.len() <= 4, "String too long for PCI device.function: {:?}", s); | random_line_split |
common.rs | // Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Portions of this file were originally copyrighted (c) 2018 The Grin Developers, issued under the Apache License,
// Version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0.
use std::convert::TryInto;
use digest::Digest;
use tari_common::DomainDigest;
use crate::{error::MerkleMountainRangeError, Hash};
const ALL_ONES: usize = std::usize::MAX;
#[derive(Copy, Clone)]
pub struct LeafIndex(pub usize);
/// Returns the MMR node index derived from the leaf index.
pub fn node_index(leaf_index: LeafIndex) -> usize {
if leaf_index.0 == 0 {
return 0;
}
2 * leaf_index.0 - leaf_index.0.count_ones() as usize
}
/// Returns the leaf index derived from the MMR node index.
pub fn leaf_index(node_index: u32) -> u32 {
let n = checked_n_leaves(node_index as usize)
.expect("checked_n_leaves can only overflow for `usize::MAX` and that is not possible");
// Conversion is safe because n < node_index
n.try_into().unwrap()
}
/// Is this position a leaf in the MMR?
/// We know the positions of all leaves based on the postorder height of an MMR of any size (somewhat unintuitively
/// but this is how the PMMR is "append only").
pub fn is_leaf(pos: usize) -> bool {
bintree_height(pos) == 0
}
/// Gets the postorder traversal index of all peaks in a MMR given its size.
/// Starts with the top peak, which is always on the left side of the range, and navigates toward lower siblings
/// toward the right of the range.
pub fn find_peaks(size: usize) -> Option<Vec<usize>> {
if size == 0 {
return Some(vec![]);
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut num_left = size;
let mut sum_prev_peaks = 0;
let mut peaks = vec![];
while peak_size != 0 {
if num_left >= peak_size {
peaks.push(sum_prev_peaks + peak_size - 1);
sum_prev_peaks += peak_size;
num_left -= peak_size;
}
peak_size >>= 1;
}
if num_left > 0 |
Some(peaks)
}
/// Calculates the positions of the (parent, sibling) of the node at the provided position.
/// Returns an error if the pos provided would result in an underflow or overflow.
pub fn family(pos: usize) -> Result<(usize, usize), MerkleMountainRangeError> {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
// Convert to i128 so that we don't over/underflow, and then we will cast back to usize after
let pos = pos as i128;
let peak = i128::from(peak);
let peak_map = peak_map as i128;
let res = if (peak_map & peak) == 0 {
(pos + 2 * peak, pos + 2 * peak - 1)
} else {
(pos + 1, pos + 1 - 2 * peak)
};
Ok((
res.0.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
res.1.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
))
}
/// For a given starting position calculate the parent and sibling positions
/// for the branch/path from that position to the peak of the tree.
/// We will use the sibling positions to generate the "path" of a Merkle proof.
pub fn family_branch(pos: usize, last_pos: usize) -> Vec<(usize, usize)> {
// loop going up the tree, from node to parent, as long as we stay inside
// the tree (as defined by last_pos).
let (peak_map, height) = peak_map_height(pos);
let mut peak = 1 << height;
let mut branch = vec![];
let mut current = pos;
let mut sibling;
while current < last_pos {
if (peak_map & peak) == 0 {
current += 2 * peak;
sibling = current - 1;
} else {
current += 1;
sibling = current - 2 * peak;
};
if current > last_pos {
break;
}
branch.push((current, sibling));
peak <<= 1;
}
branch
}
/// The height of a node in a full binary tree from its index.
pub fn bintree_height(num: usize) -> usize {
if num == 0 {
return 0;
}
peak_map_height(num).1
}
/// return (peak_map, pos_height) of given 0-based node pos prior to its addition
/// Example: on input 4 returns (0b11, 0) as mmr state before adding 4 was
/// 2
/// / \
/// 0 1 3
/// with 0b11 indicating presence of peaks of height 0 and 1.
/// NOTE:
/// the peak map also encodes the path taken from the root to the added node since the path turns left (resp. right)
/// if-and-only-if a peak at that height is absent (resp. present)
pub fn peak_map_height(mut pos: usize) -> (usize, usize) {
if pos == 0 {
return (0, 0);
}
let mut peak_size = ALL_ONES >> pos.leading_zeros();
let mut bitmap = 0;
while peak_size != 0 {
bitmap <<= 1;
if pos >= peak_size {
pos -= peak_size;
bitmap |= 1;
}
peak_size >>= 1;
}
(bitmap, pos)
}
/// Is the node at this pos the "left" sibling of its parent?
pub fn is_left_sibling(pos: usize) -> bool {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
(peak_map & peak) == 0
}
pub fn hash_together<D: Digest + DomainDigest>(left: &[u8], right: &[u8]) -> Hash {
D::new().chain_update(left).chain_update(right).finalize().to_vec()
}
/// The number of leaves in a MMR of the provided size.
/// Example: on input 5 returns (2 + 1 + 1) as mmr state before adding 5 was
/// 2
/// / \
/// 0 1 3 4
/// None is returned if the number of leaves exceeds the maximum value of a usize
pub fn checked_n_leaves(size: usize) -> Option<usize> {
if size == 0 {
return Some(0);
}
if size == usize::MAX {
return None;
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut nleaves = 0usize;
let mut size_left = size;
while peak_size != 0 {
if size_left >= peak_size {
nleaves += (peak_size + 1) >> 1;
size_left -= peak_size;
}
peak_size >>= 1;
}
if size_left == 0 {
Some(nleaves)
} else {
Some(nleaves + 1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn leaf_to_node_indices() {
assert_eq!(node_index(LeafIndex(0)), 0);
assert_eq!(node_index(LeafIndex(1)), 1);
assert_eq!(node_index(LeafIndex(2)), 3);
assert_eq!(node_index(LeafIndex(3)), 4);
assert_eq!(node_index(LeafIndex(5)), 8);
assert_eq!(node_index(LeafIndex(6)), 10);
assert_eq!(node_index(LeafIndex(7)), 11);
assert_eq!(node_index(LeafIndex(8)), 15);
}
#[test]
fn n_leaf_nodes() {
assert_eq!(checked_n_leaves(0), Some(0));
assert_eq!(checked_n_leaves(1), Some(1));
assert_eq!(checked_n_leaves(3), Some(2));
assert_eq!(checked_n_leaves(4), Some(3));
assert_eq!(checked_n_leaves(5), Some(4));
assert_eq!(checked_n_leaves(8), Some(5));
assert_eq!(checked_n_leaves(10), Some(6));
assert_eq!(checked_n_leaves(11), Some(7));
assert_eq!(checked_n_leaves(15), Some(8));
assert_eq!(checked_n_leaves(usize::MAX - 1), Some(9223372036854775808));
// Overflowed
assert_eq!(checked_n_leaves(usize::MAX), None);
}
#[test]
fn peak_vectors() {
assert_eq!(find_peaks(0), Some(Vec::<usize>::new()));
assert_eq!(find_peaks(1), Some(vec![0]));
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3), Some(vec![2]));
assert_eq!(find_peaks(4), Some(vec![2, 3]));
assert_eq!(find_peaks(5), None);
assert_eq!(find_peaks(6), None);
assert_eq!(find_peaks(7), Some(vec![6]));
assert_eq!(find_peaks(8), Some(vec![6, 7]));
assert_eq!(find_peaks(9), None);
assert_eq!(find_peaks(10), Some(vec![6, 9]));
assert_eq!(find_peaks(11), Some(vec![6, 9, 10]));
assert_eq!(find_peaks(12), None);
assert_eq!(find_peaks(13), None);
assert_eq!(find_peaks(14), None);
assert_eq!(find_peaks(15), Some(vec![14]));
assert_eq!(find_peaks(16), Some(vec![14, 15]));
assert_eq!(find_peaks(17), None);
assert_eq!(find_peaks(18), Some(vec![14, 17]));
assert_eq!(find_peaks(19), Some(vec![14, 17, 18]));
assert_eq!(find_peaks(20), None);
assert_eq!(find_peaks(21), None);
assert_eq!(find_peaks(22), Some(vec![14, 21]));
assert_eq!(find_peaks(23), Some(vec![14, 21, 22]));
assert_eq!(find_peaks(24), None);
assert_eq!(find_peaks(25), Some(vec![14, 21, 24]));
assert_eq!(find_peaks(26), Some(vec![14, 21, 24, 25]));
assert_eq!(find_peaks(27), None);
assert_eq!(find_peaks(28), None);
assert_eq!(find_peaks(56), Some(vec![30, 45, 52, 55]));
assert_eq!(find_peaks(60), None);
assert_eq!(find_peaks(123), None);
assert_eq!(find_peaks(130), Some(vec![126, 129]));
}
#[test]
fn peak_map_heights() {
assert_eq!(peak_map_height(0), (0, 0));
assert_eq!(peak_map_height(4), (0b11, 0));
// 6
// 2 5
// 0 1 3 4 7 8
assert_eq!(peak_map_height(9), (0b101, 1));
// 6
// 2 5 9
// 0 1 3 4 7 8 *
assert_eq!(peak_map_height(10), (0b110, 0));
assert_eq!(peak_map_height(12), (0b111, 1));
assert_eq!(peak_map_height(33), (0b10001, 1));
assert_eq!(peak_map_height(34), (0b10010, 0));
}
#[test]
fn is_sibling_left() {
assert!(is_left_sibling(0));
assert!(!is_left_sibling(1));
assert!(is_left_sibling(2));
assert!(is_left_sibling(3));
assert!(!is_left_sibling(4));
assert!(!is_left_sibling(5));
assert!(is_left_sibling(6));
assert!(is_left_sibling(7));
assert!(!is_left_sibling(8));
assert!(is_left_sibling(9));
assert!(is_left_sibling(10));
assert!(!is_left_sibling(11));
assert!(!is_left_sibling(12));
assert!(!is_left_sibling(13));
assert!(is_left_sibling(14));
assert!(is_left_sibling(15));
}
#[test]
fn families() {
assert_eq!(family(1).unwrap(), (2, 0));
assert_eq!(family(0).unwrap(), (2, 1));
assert_eq!(family(3).unwrap(), (5, 4));
assert_eq!(family(9).unwrap(), (13, 12));
assert_eq!(family(15).unwrap(), (17, 16));
assert_eq!(family(6).unwrap(), (14, 13));
assert_eq!(family(13).unwrap(), (14, 6));
}
#[test]
fn family_branches() {
// A 3 node tree (height 1)
assert_eq!(family_branch(0, 2), [(2, 1)]);
assert_eq!(family_branch(1, 2), [(2, 0)]);
assert_eq!(family_branch(2, 2), []);
// leaf node in a larger tree of 7 nodes (height 2)
assert_eq!(family_branch(0, 6), [(2, 1), (6, 5)]);
// note these only go as far up as the local peak, not necessarily the single root
assert_eq!(family_branch(0, 3), [(2, 1)]);
// pos 4 in a tree of size 4 is a local peak
assert_eq!(family_branch(3, 3), []);
// pos 4 in a tree of size 5 is also still a local peak
assert_eq!(family_branch(3, 4), []);
// pos 4 in a tree of size 6 has a parent and a sibling
assert_eq!(family_branch(3, 5), [(5, 4)]);
// a tree of size 7 is all under a single root
assert_eq!(family_branch(3, 6), [(5, 4), (6, 2)]);
// A tree with over a million nodes in it find the "family path" back up the tree from a leaf node at 0.
// Note: the first two entries in the branch are consistent with a small 7 node tree.
// Note: each sibling is on the left branch, this is an example of the largest possible list of peaks
// before we start combining them into larger peaks.
assert_eq!(family_branch(0, 1_048_999), [
(2, 1),
(6, 5),
(14, 13),
(30, 29),
(62, 61),
(126, 125),
(254, 253),
(510, 509),
(1022, 1021),
(2046, 2045),
(4094, 4093),
(8190, 8189),
(16382, 16381),
(32766, 32765),
(65534, 65533),
(131_070, 131_069),
(262_142, 262_141),
(524_286, 524_285),
(1_048_574, 1_048_573),
]);
}
#[test]
fn find_peaks_when_num_left_gt_zero() {
assert!(find_peaks(0).unwrap().is_empty());
assert_eq!(find_peaks(1).unwrap(), vec![0]);
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3).unwrap(), vec![2]);
assert_eq!(find_peaks(usize::MAX).unwrap(), [18446744073709551614].to_vec());
assert_eq!(find_peaks(usize::MAX - 1), None);
}
}
| {
// This happens, whenever the MMR is not valid, that is, all nodes are not
// fully spawned. For example, in this case
// 2
// / \
// 0 1 3 4
// is invalid, as it can be completed to form
// 6
// / \
// 2 5
// / \ / \
// 0 1 3 4
// which is of size 7 (with single peak [6])
return None;
} | conditional_block |
common.rs | // Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Portions of this file were originally copyrighted (c) 2018 The Grin Developers, issued under the Apache License,
// Version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0.
use std::convert::TryInto;
use digest::Digest;
use tari_common::DomainDigest;
use crate::{error::MerkleMountainRangeError, Hash};
const ALL_ONES: usize = std::usize::MAX;
#[derive(Copy, Clone)]
pub struct LeafIndex(pub usize);
/// Returns the MMR node index derived from the leaf index.
pub fn node_index(leaf_index: LeafIndex) -> usize {
if leaf_index.0 == 0 {
return 0;
}
2 * leaf_index.0 - leaf_index.0.count_ones() as usize
}
/// Returns the leaf index derived from the MMR node index.
pub fn leaf_index(node_index: u32) -> u32 {
let n = checked_n_leaves(node_index as usize)
.expect("checked_n_leaves can only overflow for `usize::MAX` and that is not possible");
// Conversion is safe because n < node_index
n.try_into().unwrap()
}
/// Is this position a leaf in the MMR?
/// We know the positions of all leaves based on the postorder height of an MMR of any size (somewhat unintuitively
/// but this is how the PMMR is "append only").
pub fn is_leaf(pos: usize) -> bool {
bintree_height(pos) == 0
}
/// Gets the postorder traversal index of all peaks in a MMR given its size.
/// Starts with the top peak, which is always on the left side of the range, and navigates toward lower siblings
/// toward the right of the range.
pub fn find_peaks(size: usize) -> Option<Vec<usize>> {
if size == 0 {
return Some(vec![]);
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut num_left = size;
let mut sum_prev_peaks = 0;
let mut peaks = vec![];
while peak_size != 0 {
if num_left >= peak_size {
peaks.push(sum_prev_peaks + peak_size - 1);
sum_prev_peaks += peak_size;
num_left -= peak_size;
}
peak_size >>= 1;
}
if num_left > 0 {
// This happens, whenever the MMR is not valid, that is, all nodes are not
// fully spawned. For example, in this case
// 2
// / \
// 0 1 3 4
// is invalid, as it can be completed to form
// 6
// / \
// 2 5
// / \ / \
// 0 1 3 4
// which is of size 7 (with single peak [6])
return None;
}
Some(peaks)
}
/// Calculates the positions of the (parent, sibling) of the node at the provided position.
/// Returns an error if the pos provided would result in an underflow or overflow.
pub fn family(pos: usize) -> Result<(usize, usize), MerkleMountainRangeError> {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
// Convert to i128 so that we don't over/underflow, and then we will cast back to usize after
let pos = pos as i128;
let peak = i128::from(peak);
let peak_map = peak_map as i128;
let res = if (peak_map & peak) == 0 {
(pos + 2 * peak, pos + 2 * peak - 1)
} else {
(pos + 1, pos + 1 - 2 * peak)
};
Ok((
res.0.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
res.1.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
))
}
/// For a given starting position calculate the parent and sibling positions
/// for the branch/path from that position to the peak of the tree.
/// We will use the sibling positions to generate the "path" of a Merkle proof.
pub fn family_branch(pos: usize, last_pos: usize) -> Vec<(usize, usize)> {
// loop going up the tree, from node to parent, as long as we stay inside
// the tree (as defined by last_pos).
let (peak_map, height) = peak_map_height(pos);
let mut peak = 1 << height;
let mut branch = vec![];
let mut current = pos;
let mut sibling;
while current < last_pos {
if (peak_map & peak) == 0 {
current += 2 * peak;
sibling = current - 1;
} else {
current += 1;
sibling = current - 2 * peak;
};
if current > last_pos {
break;
}
branch.push((current, sibling));
peak <<= 1;
}
branch
}
/// The height of a node in a full binary tree from its index.
pub fn bintree_height(num: usize) -> usize {
if num == 0 {
return 0;
}
peak_map_height(num).1
}
/// return (peak_map, pos_height) of given 0-based node pos prior to its addition
/// Example: on input 4 returns (0b11, 0) as mmr state before adding 4 was
/// 2
/// / \
/// 0 1 3
/// with 0b11 indicating presence of peaks of height 0 and 1.
/// NOTE:
/// the peak map also encodes the path taken from the root to the added node since the path turns left (resp. right)
/// if-and-only-if a peak at that height is absent (resp. present)
pub fn peak_map_height(mut pos: usize) -> (usize, usize) {
if pos == 0 {
return (0, 0);
}
let mut peak_size = ALL_ONES >> pos.leading_zeros();
let mut bitmap = 0;
while peak_size != 0 {
bitmap <<= 1;
if pos >= peak_size {
pos -= peak_size;
bitmap |= 1;
}
peak_size >>= 1;
}
(bitmap, pos)
}
/// Is the node at this pos the "left" sibling of its parent?
pub fn is_left_sibling(pos: usize) -> bool {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
(peak_map & peak) == 0
}
pub fn hash_together<D: Digest + DomainDigest>(left: &[u8], right: &[u8]) -> Hash {
D::new().chain_update(left).chain_update(right).finalize().to_vec()
}
/// The number of leaves in a MMR of the provided size.
/// Example: on input 5 returns (2 + 1 + 1) as mmr state before adding 5 was
/// 2
/// / \
/// 0 1 3 4
/// None is returned if the number of leaves exceeds the maximum value of a usize
pub fn checked_n_leaves(size: usize) -> Option<usize> {
if size == 0 {
return Some(0);
}
if size == usize::MAX {
return None;
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut nleaves = 0usize;
let mut size_left = size;
while peak_size != 0 {
if size_left >= peak_size {
nleaves += (peak_size + 1) >> 1;
size_left -= peak_size;
}
peak_size >>= 1;
}
if size_left == 0 {
Some(nleaves)
} else {
Some(nleaves + 1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn leaf_to_node_indices() {
assert_eq!(node_index(LeafIndex(0)), 0);
assert_eq!(node_index(LeafIndex(1)), 1);
assert_eq!(node_index(LeafIndex(2)), 3);
assert_eq!(node_index(LeafIndex(3)), 4);
assert_eq!(node_index(LeafIndex(5)), 8);
assert_eq!(node_index(LeafIndex(6)), 10);
assert_eq!(node_index(LeafIndex(7)), 11);
assert_eq!(node_index(LeafIndex(8)), 15);
}
#[test]
fn n_leaf_nodes() {
assert_eq!(checked_n_leaves(0), Some(0));
assert_eq!(checked_n_leaves(1), Some(1));
assert_eq!(checked_n_leaves(3), Some(2));
assert_eq!(checked_n_leaves(4), Some(3));
assert_eq!(checked_n_leaves(5), Some(4));
assert_eq!(checked_n_leaves(8), Some(5));
assert_eq!(checked_n_leaves(10), Some(6));
assert_eq!(checked_n_leaves(11), Some(7));
assert_eq!(checked_n_leaves(15), Some(8));
assert_eq!(checked_n_leaves(usize::MAX - 1), Some(9223372036854775808));
// Overflowed
assert_eq!(checked_n_leaves(usize::MAX), None);
}
#[test]
fn peak_vectors() {
assert_eq!(find_peaks(0), Some(Vec::<usize>::new()));
assert_eq!(find_peaks(1), Some(vec![0]));
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3), Some(vec![2]));
assert_eq!(find_peaks(4), Some(vec![2, 3]));
assert_eq!(find_peaks(5), None);
assert_eq!(find_peaks(6), None);
assert_eq!(find_peaks(7), Some(vec![6]));
assert_eq!(find_peaks(8), Some(vec![6, 7]));
assert_eq!(find_peaks(9), None);
assert_eq!(find_peaks(10), Some(vec![6, 9]));
assert_eq!(find_peaks(11), Some(vec![6, 9, 10]));
assert_eq!(find_peaks(12), None);
assert_eq!(find_peaks(13), None);
assert_eq!(find_peaks(14), None);
assert_eq!(find_peaks(15), Some(vec![14]));
assert_eq!(find_peaks(16), Some(vec![14, 15]));
assert_eq!(find_peaks(17), None);
assert_eq!(find_peaks(18), Some(vec![14, 17]));
assert_eq!(find_peaks(19), Some(vec![14, 17, 18]));
assert_eq!(find_peaks(20), None);
assert_eq!(find_peaks(21), None);
assert_eq!(find_peaks(22), Some(vec![14, 21]));
assert_eq!(find_peaks(23), Some(vec![14, 21, 22]));
assert_eq!(find_peaks(24), None);
assert_eq!(find_peaks(25), Some(vec![14, 21, 24]));
assert_eq!(find_peaks(26), Some(vec![14, 21, 24, 25]));
assert_eq!(find_peaks(27), None);
assert_eq!(find_peaks(28), None);
assert_eq!(find_peaks(56), Some(vec![30, 45, 52, 55]));
assert_eq!(find_peaks(60), None);
assert_eq!(find_peaks(123), None);
assert_eq!(find_peaks(130), Some(vec![126, 129]));
}
#[test]
fn | () {
assert_eq!(peak_map_height(0), (0, 0));
assert_eq!(peak_map_height(4), (0b11, 0));
// 6
// 2 5
// 0 1 3 4 7 8
assert_eq!(peak_map_height(9), (0b101, 1));
// 6
// 2 5 9
// 0 1 3 4 7 8 *
assert_eq!(peak_map_height(10), (0b110, 0));
assert_eq!(peak_map_height(12), (0b111, 1));
assert_eq!(peak_map_height(33), (0b10001, 1));
assert_eq!(peak_map_height(34), (0b10010, 0));
}
#[test]
fn is_sibling_left() {
assert!(is_left_sibling(0));
assert!(!is_left_sibling(1));
assert!(is_left_sibling(2));
assert!(is_left_sibling(3));
assert!(!is_left_sibling(4));
assert!(!is_left_sibling(5));
assert!(is_left_sibling(6));
assert!(is_left_sibling(7));
assert!(!is_left_sibling(8));
assert!(is_left_sibling(9));
assert!(is_left_sibling(10));
assert!(!is_left_sibling(11));
assert!(!is_left_sibling(12));
assert!(!is_left_sibling(13));
assert!(is_left_sibling(14));
assert!(is_left_sibling(15));
}
#[test]
fn families() {
assert_eq!(family(1).unwrap(), (2, 0));
assert_eq!(family(0).unwrap(), (2, 1));
assert_eq!(family(3).unwrap(), (5, 4));
assert_eq!(family(9).unwrap(), (13, 12));
assert_eq!(family(15).unwrap(), (17, 16));
assert_eq!(family(6).unwrap(), (14, 13));
assert_eq!(family(13).unwrap(), (14, 6));
}
#[test]
fn family_branches() {
// A 3 node tree (height 1)
assert_eq!(family_branch(0, 2), [(2, 1)]);
assert_eq!(family_branch(1, 2), [(2, 0)]);
assert_eq!(family_branch(2, 2), []);
// leaf node in a larger tree of 7 nodes (height 2)
assert_eq!(family_branch(0, 6), [(2, 1), (6, 5)]);
// note these only go as far up as the local peak, not necessarily the single root
assert_eq!(family_branch(0, 3), [(2, 1)]);
// pos 4 in a tree of size 4 is a local peak
assert_eq!(family_branch(3, 3), []);
// pos 4 in a tree of size 5 is also still a local peak
assert_eq!(family_branch(3, 4), []);
// pos 4 in a tree of size 6 has a parent and a sibling
assert_eq!(family_branch(3, 5), [(5, 4)]);
// a tree of size 7 is all under a single root
assert_eq!(family_branch(3, 6), [(5, 4), (6, 2)]);
// A tree with over a million nodes in it find the "family path" back up the tree from a leaf node at 0.
// Note: the first two entries in the branch are consistent with a small 7 node tree.
// Note: each sibling is on the left branch, this is an example of the largest possible list of peaks
// before we start combining them into larger peaks.
assert_eq!(family_branch(0, 1_048_999), [
(2, 1),
(6, 5),
(14, 13),
(30, 29),
(62, 61),
(126, 125),
(254, 253),
(510, 509),
(1022, 1021),
(2046, 2045),
(4094, 4093),
(8190, 8189),
(16382, 16381),
(32766, 32765),
(65534, 65533),
(131_070, 131_069),
(262_142, 262_141),
(524_286, 524_285),
(1_048_574, 1_048_573),
]);
}
#[test]
fn find_peaks_when_num_left_gt_zero() {
assert!(find_peaks(0).unwrap().is_empty());
assert_eq!(find_peaks(1).unwrap(), vec![0]);
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3).unwrap(), vec![2]);
assert_eq!(find_peaks(usize::MAX).unwrap(), [18446744073709551614].to_vec());
assert_eq!(find_peaks(usize::MAX - 1), None);
}
}
| peak_map_heights | identifier_name |
common.rs | // Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Portions of this file were originally copyrighted (c) 2018 The Grin Developers, issued under the Apache License,
// Version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0.
use std::convert::TryInto;
use digest::Digest;
use tari_common::DomainDigest;
use crate::{error::MerkleMountainRangeError, Hash};
const ALL_ONES: usize = std::usize::MAX;
#[derive(Copy, Clone)]
pub struct LeafIndex(pub usize);
/// Returns the MMR node index derived from the leaf index.
pub fn node_index(leaf_index: LeafIndex) -> usize {
if leaf_index.0 == 0 {
return 0;
}
2 * leaf_index.0 - leaf_index.0.count_ones() as usize
}
/// Returns the leaf index derived from the MMR node index.
pub fn leaf_index(node_index: u32) -> u32 {
let n = checked_n_leaves(node_index as usize)
.expect("checked_n_leaves can only overflow for `usize::MAX` and that is not possible");
// Conversion is safe because n < node_index
n.try_into().unwrap()
}
/// Is this position a leaf in the MMR?
/// We know the positions of all leaves based on the postorder height of an MMR of any size (somewhat unintuitively
/// but this is how the PMMR is "append only").
pub fn is_leaf(pos: usize) -> bool {
bintree_height(pos) == 0
}
/// Gets the postorder traversal index of all peaks in a MMR given its size.
/// Starts with the top peak, which is always on the left side of the range, and navigates toward lower siblings
/// toward the right of the range.
pub fn find_peaks(size: usize) -> Option<Vec<usize>> {
if size == 0 {
return Some(vec![]);
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut num_left = size;
let mut sum_prev_peaks = 0;
let mut peaks = vec![];
while peak_size != 0 {
if num_left >= peak_size {
peaks.push(sum_prev_peaks + peak_size - 1);
sum_prev_peaks += peak_size;
num_left -= peak_size;
}
peak_size >>= 1;
}
if num_left > 0 {
// This happens, whenever the MMR is not valid, that is, all nodes are not
// fully spawned. For example, in this case
// 2
// / \
// 0 1 3 4
// is invalid, as it can be completed to form
// 6
// / \
// 2 5
// / \ / \
// 0 1 3 4
// which is of size 7 (with single peak [6])
return None;
}
Some(peaks)
}
/// Calculates the positions of the (parent, sibling) of the node at the provided position.
/// Returns an error if the pos provided would result in an underflow or overflow.
pub fn family(pos: usize) -> Result<(usize, usize), MerkleMountainRangeError> {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
// Convert to i128 so that we don't over/underflow, and then we will cast back to usize after
let pos = pos as i128;
let peak = i128::from(peak);
let peak_map = peak_map as i128;
let res = if (peak_map & peak) == 0 {
(pos + 2 * peak, pos + 2 * peak - 1)
} else {
(pos + 1, pos + 1 - 2 * peak)
};
Ok((
res.0.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
res.1.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
))
}
/// For a given starting position calculate the parent and sibling positions
/// for the branch/path from that position to the peak of the tree.
/// We will use the sibling positions to generate the "path" of a Merkle proof.
pub fn family_branch(pos: usize, last_pos: usize) -> Vec<(usize, usize)> {
// loop going up the tree, from node to parent, as long as we stay inside
// the tree (as defined by last_pos).
let (peak_map, height) = peak_map_height(pos);
let mut peak = 1 << height;
let mut branch = vec![];
let mut current = pos;
let mut sibling;
while current < last_pos {
if (peak_map & peak) == 0 {
current += 2 * peak;
sibling = current - 1;
} else {
current += 1;
sibling = current - 2 * peak;
};
if current > last_pos {
break;
}
branch.push((current, sibling));
peak <<= 1;
}
branch
}
/// The height of a node in a full binary tree from its index.
pub fn bintree_height(num: usize) -> usize {
if num == 0 {
return 0;
}
peak_map_height(num).1
}
/// return (peak_map, pos_height) of given 0-based node pos prior to its addition
/// Example: on input 4 returns (0b11, 0) as mmr state before adding 4 was
/// 2
/// / \
/// 0 1 3
/// with 0b11 indicating presence of peaks of height 0 and 1.
/// NOTE:
/// the peak map also encodes the path taken from the root to the added node since the path turns left (resp. right)
/// if-and-only-if a peak at that height is absent (resp. present)
pub fn peak_map_height(mut pos: usize) -> (usize, usize) {
if pos == 0 {
return (0, 0);
}
let mut peak_size = ALL_ONES >> pos.leading_zeros();
let mut bitmap = 0;
while peak_size != 0 {
bitmap <<= 1;
if pos >= peak_size {
pos -= peak_size;
bitmap |= 1;
}
peak_size >>= 1;
}
(bitmap, pos)
}
/// Is the node at this pos the "left" sibling of its parent?
pub fn is_left_sibling(pos: usize) -> bool {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
(peak_map & peak) == 0
}
pub fn hash_together<D: Digest + DomainDigest>(left: &[u8], right: &[u8]) -> Hash {
D::new().chain_update(left).chain_update(right).finalize().to_vec()
}
/// The number of leaves in a MMR of the provided size.
/// Example: on input 5 returns (2 + 1 + 1) as mmr state before adding 5 was
/// 2
/// / \
/// 0 1 3 4
/// None is returned if the number of leaves exceeds the maximum value of a usize
pub fn checked_n_leaves(size: usize) -> Option<usize> {
if size == 0 {
return Some(0);
}
if size == usize::MAX {
return None;
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut nleaves = 0usize;
let mut size_left = size;
while peak_size != 0 {
if size_left >= peak_size {
nleaves += (peak_size + 1) >> 1;
size_left -= peak_size;
}
peak_size >>= 1;
}
if size_left == 0 {
Some(nleaves)
} else {
Some(nleaves + 1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn leaf_to_node_indices() {
assert_eq!(node_index(LeafIndex(0)), 0);
assert_eq!(node_index(LeafIndex(1)), 1);
assert_eq!(node_index(LeafIndex(2)), 3);
assert_eq!(node_index(LeafIndex(3)), 4);
assert_eq!(node_index(LeafIndex(5)), 8);
assert_eq!(node_index(LeafIndex(6)), 10);
assert_eq!(node_index(LeafIndex(7)), 11);
assert_eq!(node_index(LeafIndex(8)), 15);
}
#[test]
fn n_leaf_nodes() {
assert_eq!(checked_n_leaves(0), Some(0));
assert_eq!(checked_n_leaves(1), Some(1));
assert_eq!(checked_n_leaves(3), Some(2));
assert_eq!(checked_n_leaves(4), Some(3));
assert_eq!(checked_n_leaves(5), Some(4));
assert_eq!(checked_n_leaves(8), Some(5));
assert_eq!(checked_n_leaves(10), Some(6));
assert_eq!(checked_n_leaves(11), Some(7));
assert_eq!(checked_n_leaves(15), Some(8));
assert_eq!(checked_n_leaves(usize::MAX - 1), Some(9223372036854775808));
// Overflowed
assert_eq!(checked_n_leaves(usize::MAX), None);
}
#[test]
fn peak_vectors() {
assert_eq!(find_peaks(0), Some(Vec::<usize>::new()));
assert_eq!(find_peaks(1), Some(vec![0]));
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3), Some(vec![2]));
assert_eq!(find_peaks(4), Some(vec![2, 3]));
assert_eq!(find_peaks(5), None);
assert_eq!(find_peaks(6), None);
assert_eq!(find_peaks(7), Some(vec![6]));
assert_eq!(find_peaks(8), Some(vec![6, 7]));
assert_eq!(find_peaks(9), None);
assert_eq!(find_peaks(10), Some(vec![6, 9]));
assert_eq!(find_peaks(11), Some(vec![6, 9, 10]));
assert_eq!(find_peaks(12), None);
assert_eq!(find_peaks(13), None);
assert_eq!(find_peaks(14), None);
assert_eq!(find_peaks(15), Some(vec![14]));
assert_eq!(find_peaks(16), Some(vec![14, 15]));
assert_eq!(find_peaks(17), None);
assert_eq!(find_peaks(18), Some(vec![14, 17]));
assert_eq!(find_peaks(19), Some(vec![14, 17, 18]));
assert_eq!(find_peaks(20), None);
assert_eq!(find_peaks(21), None);
assert_eq!(find_peaks(22), Some(vec![14, 21]));
assert_eq!(find_peaks(23), Some(vec![14, 21, 22]));
assert_eq!(find_peaks(24), None);
assert_eq!(find_peaks(25), Some(vec![14, 21, 24]));
assert_eq!(find_peaks(26), Some(vec![14, 21, 24, 25]));
assert_eq!(find_peaks(27), None);
assert_eq!(find_peaks(28), None);
assert_eq!(find_peaks(56), Some(vec![30, 45, 52, 55]));
assert_eq!(find_peaks(60), None);
assert_eq!(find_peaks(123), None);
assert_eq!(find_peaks(130), Some(vec![126, 129]));
}
#[test]
fn peak_map_heights() {
assert_eq!(peak_map_height(0), (0, 0));
assert_eq!(peak_map_height(4), (0b11, 0));
// 6
// 2 5
// 0 1 3 4 7 8
assert_eq!(peak_map_height(9), (0b101, 1));
// 6
// 2 5 9
// 0 1 3 4 7 8 *
assert_eq!(peak_map_height(10), (0b110, 0));
assert_eq!(peak_map_height(12), (0b111, 1));
assert_eq!(peak_map_height(33), (0b10001, 1));
assert_eq!(peak_map_height(34), (0b10010, 0));
}
#[test]
fn is_sibling_left() {
assert!(is_left_sibling(0));
assert!(!is_left_sibling(1));
assert!(is_left_sibling(2));
assert!(is_left_sibling(3));
assert!(!is_left_sibling(4));
assert!(!is_left_sibling(5));
assert!(is_left_sibling(6));
assert!(is_left_sibling(7));
assert!(!is_left_sibling(8));
assert!(is_left_sibling(9));
assert!(is_left_sibling(10));
assert!(!is_left_sibling(11));
assert!(!is_left_sibling(12));
assert!(!is_left_sibling(13));
assert!(is_left_sibling(14));
assert!(is_left_sibling(15));
}
#[test]
fn families() {
assert_eq!(family(1).unwrap(), (2, 0));
assert_eq!(family(0).unwrap(), (2, 1));
assert_eq!(family(3).unwrap(), (5, 4));
assert_eq!(family(9).unwrap(), (13, 12));
assert_eq!(family(15).unwrap(), (17, 16));
assert_eq!(family(6).unwrap(), (14, 13));
assert_eq!(family(13).unwrap(), (14, 6));
}
#[test]
fn family_branches() {
// A 3 node tree (height 1)
assert_eq!(family_branch(0, 2), [(2, 1)]);
assert_eq!(family_branch(1, 2), [(2, 0)]);
assert_eq!(family_branch(2, 2), []);
// leaf node in a larger tree of 7 nodes (height 2)
assert_eq!(family_branch(0, 6), [(2, 1), (6, 5)]);
// note these only go as far up as the local peak, not necessarily the single root
assert_eq!(family_branch(0, 3), [(2, 1)]);
// pos 4 in a tree of size 4 is a local peak
assert_eq!(family_branch(3, 3), []);
// pos 4 in a tree of size 5 is also still a local peak
assert_eq!(family_branch(3, 4), []);
// pos 4 in a tree of size 6 has a parent and a sibling
assert_eq!(family_branch(3, 5), [(5, 4)]);
// a tree of size 7 is all under a single root | // A tree with over a million nodes in it find the "family path" back up the tree from a leaf node at 0.
// Note: the first two entries in the branch are consistent with a small 7 node tree.
// Note: each sibling is on the left branch, this is an example of the largest possible list of peaks
// before we start combining them into larger peaks.
assert_eq!(family_branch(0, 1_048_999), [
(2, 1),
(6, 5),
(14, 13),
(30, 29),
(62, 61),
(126, 125),
(254, 253),
(510, 509),
(1022, 1021),
(2046, 2045),
(4094, 4093),
(8190, 8189),
(16382, 16381),
(32766, 32765),
(65534, 65533),
(131_070, 131_069),
(262_142, 262_141),
(524_286, 524_285),
(1_048_574, 1_048_573),
]);
}
#[test]
fn find_peaks_when_num_left_gt_zero() {
assert!(find_peaks(0).unwrap().is_empty());
assert_eq!(find_peaks(1).unwrap(), vec![0]);
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3).unwrap(), vec![2]);
assert_eq!(find_peaks(usize::MAX).unwrap(), [18446744073709551614].to_vec());
assert_eq!(find_peaks(usize::MAX - 1), None);
}
} | assert_eq!(family_branch(3, 6), [(5, 4), (6, 2)]);
| random_line_split |
common.rs | // Copyright 2019. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Portions of this file were originally copyrighted (c) 2018 The Grin Developers, issued under the Apache License,
// Version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0.
use std::convert::TryInto;
use digest::Digest;
use tari_common::DomainDigest;
use crate::{error::MerkleMountainRangeError, Hash};
const ALL_ONES: usize = std::usize::MAX;
#[derive(Copy, Clone)]
pub struct LeafIndex(pub usize);
/// Returns the MMR node index derived from the leaf index.
pub fn node_index(leaf_index: LeafIndex) -> usize {
if leaf_index.0 == 0 {
return 0;
}
2 * leaf_index.0 - leaf_index.0.count_ones() as usize
}
/// Returns the leaf index derived from the MMR node index.
pub fn leaf_index(node_index: u32) -> u32 {
let n = checked_n_leaves(node_index as usize)
.expect("checked_n_leaves can only overflow for `usize::MAX` and that is not possible");
// Conversion is safe because n < node_index
n.try_into().unwrap()
}
/// Is this position a leaf in the MMR?
/// We know the positions of all leaves based on the postorder height of an MMR of any size (somewhat unintuitively
/// but this is how the PMMR is "append only").
pub fn is_leaf(pos: usize) -> bool {
bintree_height(pos) == 0
}
/// Gets the postorder traversal index of all peaks in a MMR given its size.
/// Starts with the top peak, which is always on the left side of the range, and navigates toward lower siblings
/// toward the right of the range.
pub fn find_peaks(size: usize) -> Option<Vec<usize>> {
if size == 0 {
return Some(vec![]);
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut num_left = size;
let mut sum_prev_peaks = 0;
let mut peaks = vec![];
while peak_size != 0 {
if num_left >= peak_size {
peaks.push(sum_prev_peaks + peak_size - 1);
sum_prev_peaks += peak_size;
num_left -= peak_size;
}
peak_size >>= 1;
}
if num_left > 0 {
// This happens, whenever the MMR is not valid, that is, all nodes are not
// fully spawned. For example, in this case
// 2
// / \
// 0 1 3 4
// is invalid, as it can be completed to form
// 6
// / \
// 2 5
// / \ / \
// 0 1 3 4
// which is of size 7 (with single peak [6])
return None;
}
Some(peaks)
}
/// Calculates the positions of the (parent, sibling) of the node at the provided position.
/// Returns an error if the pos provided would result in an underflow or overflow.
pub fn family(pos: usize) -> Result<(usize, usize), MerkleMountainRangeError> {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
// Convert to i128 so that we don't over/underflow, and then we will cast back to usize after
let pos = pos as i128;
let peak = i128::from(peak);
let peak_map = peak_map as i128;
let res = if (peak_map & peak) == 0 {
(pos + 2 * peak, pos + 2 * peak - 1)
} else {
(pos + 1, pos + 1 - 2 * peak)
};
Ok((
res.0.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
res.1.try_into().map_err(|_| MerkleMountainRangeError::OutOfRange)?,
))
}
/// For a given starting position calculate the parent and sibling positions
/// for the branch/path from that position to the peak of the tree.
/// We will use the sibling positions to generate the "path" of a Merkle proof.
pub fn family_branch(pos: usize, last_pos: usize) -> Vec<(usize, usize)> {
// loop going up the tree, from node to parent, as long as we stay inside
// the tree (as defined by last_pos).
let (peak_map, height) = peak_map_height(pos);
let mut peak = 1 << height;
let mut branch = vec![];
let mut current = pos;
let mut sibling;
while current < last_pos {
if (peak_map & peak) == 0 {
current += 2 * peak;
sibling = current - 1;
} else {
current += 1;
sibling = current - 2 * peak;
};
if current > last_pos {
break;
}
branch.push((current, sibling));
peak <<= 1;
}
branch
}
/// The height of a node in a full binary tree from its index.
pub fn bintree_height(num: usize) -> usize {
if num == 0 {
return 0;
}
peak_map_height(num).1
}
/// return (peak_map, pos_height) of given 0-based node pos prior to its addition
/// Example: on input 4 returns (0b11, 0) as mmr state before adding 4 was
/// 2
/// / \
/// 0 1 3
/// with 0b11 indicating presence of peaks of height 0 and 1.
/// NOTE:
/// the peak map also encodes the path taken from the root to the added node since the path turns left (resp. right)
/// if-and-only-if a peak at that height is absent (resp. present)
pub fn peak_map_height(mut pos: usize) -> (usize, usize) |
/// Is the node at this pos the "left" sibling of its parent?
pub fn is_left_sibling(pos: usize) -> bool {
let (peak_map, height) = peak_map_height(pos);
let peak = 1 << height;
(peak_map & peak) == 0
}
pub fn hash_together<D: Digest + DomainDigest>(left: &[u8], right: &[u8]) -> Hash {
D::new().chain_update(left).chain_update(right).finalize().to_vec()
}
/// The number of leaves in a MMR of the provided size.
/// Example: on input 5 returns (2 + 1 + 1) as mmr state before adding 5 was
/// 2
/// / \
/// 0 1 3 4
/// None is returned if the number of leaves exceeds the maximum value of a usize
pub fn checked_n_leaves(size: usize) -> Option<usize> {
if size == 0 {
return Some(0);
}
if size == usize::MAX {
return None;
}
let mut peak_size = ALL_ONES >> size.leading_zeros();
let mut nleaves = 0usize;
let mut size_left = size;
while peak_size != 0 {
if size_left >= peak_size {
nleaves += (peak_size + 1) >> 1;
size_left -= peak_size;
}
peak_size >>= 1;
}
if size_left == 0 {
Some(nleaves)
} else {
Some(nleaves + 1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn leaf_to_node_indices() {
assert_eq!(node_index(LeafIndex(0)), 0);
assert_eq!(node_index(LeafIndex(1)), 1);
assert_eq!(node_index(LeafIndex(2)), 3);
assert_eq!(node_index(LeafIndex(3)), 4);
assert_eq!(node_index(LeafIndex(5)), 8);
assert_eq!(node_index(LeafIndex(6)), 10);
assert_eq!(node_index(LeafIndex(7)), 11);
assert_eq!(node_index(LeafIndex(8)), 15);
}
#[test]
fn n_leaf_nodes() {
assert_eq!(checked_n_leaves(0), Some(0));
assert_eq!(checked_n_leaves(1), Some(1));
assert_eq!(checked_n_leaves(3), Some(2));
assert_eq!(checked_n_leaves(4), Some(3));
assert_eq!(checked_n_leaves(5), Some(4));
assert_eq!(checked_n_leaves(8), Some(5));
assert_eq!(checked_n_leaves(10), Some(6));
assert_eq!(checked_n_leaves(11), Some(7));
assert_eq!(checked_n_leaves(15), Some(8));
assert_eq!(checked_n_leaves(usize::MAX - 1), Some(9223372036854775808));
// Overflowed
assert_eq!(checked_n_leaves(usize::MAX), None);
}
#[test]
fn peak_vectors() {
assert_eq!(find_peaks(0), Some(Vec::<usize>::new()));
assert_eq!(find_peaks(1), Some(vec![0]));
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3), Some(vec![2]));
assert_eq!(find_peaks(4), Some(vec![2, 3]));
assert_eq!(find_peaks(5), None);
assert_eq!(find_peaks(6), None);
assert_eq!(find_peaks(7), Some(vec![6]));
assert_eq!(find_peaks(8), Some(vec![6, 7]));
assert_eq!(find_peaks(9), None);
assert_eq!(find_peaks(10), Some(vec![6, 9]));
assert_eq!(find_peaks(11), Some(vec![6, 9, 10]));
assert_eq!(find_peaks(12), None);
assert_eq!(find_peaks(13), None);
assert_eq!(find_peaks(14), None);
assert_eq!(find_peaks(15), Some(vec![14]));
assert_eq!(find_peaks(16), Some(vec![14, 15]));
assert_eq!(find_peaks(17), None);
assert_eq!(find_peaks(18), Some(vec![14, 17]));
assert_eq!(find_peaks(19), Some(vec![14, 17, 18]));
assert_eq!(find_peaks(20), None);
assert_eq!(find_peaks(21), None);
assert_eq!(find_peaks(22), Some(vec![14, 21]));
assert_eq!(find_peaks(23), Some(vec![14, 21, 22]));
assert_eq!(find_peaks(24), None);
assert_eq!(find_peaks(25), Some(vec![14, 21, 24]));
assert_eq!(find_peaks(26), Some(vec![14, 21, 24, 25]));
assert_eq!(find_peaks(27), None);
assert_eq!(find_peaks(28), None);
assert_eq!(find_peaks(56), Some(vec![30, 45, 52, 55]));
assert_eq!(find_peaks(60), None);
assert_eq!(find_peaks(123), None);
assert_eq!(find_peaks(130), Some(vec![126, 129]));
}
#[test]
fn peak_map_heights() {
assert_eq!(peak_map_height(0), (0, 0));
assert_eq!(peak_map_height(4), (0b11, 0));
// 6
// 2 5
// 0 1 3 4 7 8
assert_eq!(peak_map_height(9), (0b101, 1));
// 6
// 2 5 9
// 0 1 3 4 7 8 *
assert_eq!(peak_map_height(10), (0b110, 0));
assert_eq!(peak_map_height(12), (0b111, 1));
assert_eq!(peak_map_height(33), (0b10001, 1));
assert_eq!(peak_map_height(34), (0b10010, 0));
}
#[test]
fn is_sibling_left() {
assert!(is_left_sibling(0));
assert!(!is_left_sibling(1));
assert!(is_left_sibling(2));
assert!(is_left_sibling(3));
assert!(!is_left_sibling(4));
assert!(!is_left_sibling(5));
assert!(is_left_sibling(6));
assert!(is_left_sibling(7));
assert!(!is_left_sibling(8));
assert!(is_left_sibling(9));
assert!(is_left_sibling(10));
assert!(!is_left_sibling(11));
assert!(!is_left_sibling(12));
assert!(!is_left_sibling(13));
assert!(is_left_sibling(14));
assert!(is_left_sibling(15));
}
#[test]
fn families() {
assert_eq!(family(1).unwrap(), (2, 0));
assert_eq!(family(0).unwrap(), (2, 1));
assert_eq!(family(3).unwrap(), (5, 4));
assert_eq!(family(9).unwrap(), (13, 12));
assert_eq!(family(15).unwrap(), (17, 16));
assert_eq!(family(6).unwrap(), (14, 13));
assert_eq!(family(13).unwrap(), (14, 6));
}
#[test]
fn family_branches() {
// A 3 node tree (height 1)
assert_eq!(family_branch(0, 2), [(2, 1)]);
assert_eq!(family_branch(1, 2), [(2, 0)]);
assert_eq!(family_branch(2, 2), []);
// leaf node in a larger tree of 7 nodes (height 2)
assert_eq!(family_branch(0, 6), [(2, 1), (6, 5)]);
// note these only go as far up as the local peak, not necessarily the single root
assert_eq!(family_branch(0, 3), [(2, 1)]);
// pos 4 in a tree of size 4 is a local peak
assert_eq!(family_branch(3, 3), []);
// pos 4 in a tree of size 5 is also still a local peak
assert_eq!(family_branch(3, 4), []);
// pos 4 in a tree of size 6 has a parent and a sibling
assert_eq!(family_branch(3, 5), [(5, 4)]);
// a tree of size 7 is all under a single root
assert_eq!(family_branch(3, 6), [(5, 4), (6, 2)]);
// A tree with over a million nodes in it find the "family path" back up the tree from a leaf node at 0.
// Note: the first two entries in the branch are consistent with a small 7 node tree.
// Note: each sibling is on the left branch, this is an example of the largest possible list of peaks
// before we start combining them into larger peaks.
assert_eq!(family_branch(0, 1_048_999), [
(2, 1),
(6, 5),
(14, 13),
(30, 29),
(62, 61),
(126, 125),
(254, 253),
(510, 509),
(1022, 1021),
(2046, 2045),
(4094, 4093),
(8190, 8189),
(16382, 16381),
(32766, 32765),
(65534, 65533),
(131_070, 131_069),
(262_142, 262_141),
(524_286, 524_285),
(1_048_574, 1_048_573),
]);
}
#[test]
fn find_peaks_when_num_left_gt_zero() {
assert!(find_peaks(0).unwrap().is_empty());
assert_eq!(find_peaks(1).unwrap(), vec![0]);
assert_eq!(find_peaks(2), None);
assert_eq!(find_peaks(3).unwrap(), vec![2]);
assert_eq!(find_peaks(usize::MAX).unwrap(), [18446744073709551614].to_vec());
assert_eq!(find_peaks(usize::MAX - 1), None);
}
}
| {
if pos == 0 {
return (0, 0);
}
let mut peak_size = ALL_ONES >> pos.leading_zeros();
let mut bitmap = 0;
while peak_size != 0 {
bitmap <<= 1;
if pos >= peak_size {
pos -= peak_size;
bitmap |= 1;
}
peak_size >>= 1;
}
(bitmap, pos)
} | identifier_body |
search_log.go | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sysutil
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
pb "github.com/pingcap/kvproto/pkg/diagnosticspb"
)
type logFile struct {
file *os.File // The opened file handle
begin, end int64 // The timesteamp in millisecond of first line
}
func (l *logFile) BeginTime() int64 {
return l.begin
}
func (l *logFile) EndTime() int64 {
return l.end
}
func resolveFiles(ctx context.Context, logFilePath string, beginTime, endTime int64) ([]logFile, error) {
if logFilePath == "" {
return nil, errors.New("empty log file location configuration")
}
var logFiles []logFile
var skipFiles []*os.File
logDir := filepath.Dir(logFilePath)
ext := filepath.Ext(logFilePath)
filePrefix := logFilePath[:len(logFilePath)-len(ext)]
files, err := ioutil.ReadDir(logDir)
if err != nil {
return nil, err
}
walkFn := func(path string, info os.FileInfo) error {
if info.IsDir() {
return nil
}
// All rotated log files have the same prefix and extension with the original file
if !strings.HasPrefix(path, filePrefix) {
return nil
}
if !strings.HasSuffix(path, ext) {
return nil
}
if isCtxDone(ctx) {
return ctx.Err()
}
// If we cannot open the file, we skip to search the file instead of returning
// error and abort entire searching task.
// TODO: do we need to return some warning to client?
file, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm)
if err != nil {
return nil
}
reader := bufio.NewReader(file)
firstItem, err := readFirstValidLog(ctx, reader, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
lastItem, err := readLastValidLog(ctx, file, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
// Reset position to the start and skip this file if cannot seek to start
if _, err := file.Seek(0, io.SeekStart); err != nil {
skipFiles = append(skipFiles, file)
return nil
}
if beginTime > lastItem.Time || endTime < firstItem.Time {
skipFiles = append(skipFiles, file)
} else {
logFiles = append(logFiles, logFile{
file: file,
begin: firstItem.Time,
end: lastItem.Time,
})
}
return nil
}
for _, file := range files {
err := walkFn(filepath.Join(logDir, file.Name()), file)
if err != nil {
return nil, err
}
}
defer func() {
for _, f := range skipFiles {
_ = f.Close()
}
}()
// Sort by start time
sort.Slice(logFiles, func(i, j int) bool {
return logFiles[i].begin < logFiles[j].begin
})
return logFiles, err
}
func isCtxDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
func readFirstValidLog(ctx context.Context, reader *bufio.Reader, tryLines int64) (*pb.LogMessage, error) {
var tried int64
for {
line, err := readLine(reader)
if err != nil {
return nil, err
}
item, err := parseLogItem(line)
if err == nil {
return item, nil
}
tried++
if tried >= tryLines {
break
}
if isCtxDone(ctx) {
return nil, ctx.Err()
}
}
return nil, errors.New("not a valid log file")
}
func readLastValidLog(ctx context.Context, file *os.File, tryLines int) (*pb.LogMessage, error) {
var tried int
stat, _ := file.Stat()
endCursor := stat.Size()
for {
lines, readBytes, err := readLastLines(ctx, file, endCursor)
if err != nil {
return nil, err
}
// read out the file
if readBytes == 0 {
break
}
endCursor -= int64(readBytes)
for i := len(lines) - 1; i >= 0; i-- {
item, err := parseLogItem(lines[i])
if err == nil {
return item, nil
}
}
tried += len(lines)
if tried >= tryLines {
break
}
}
return nil, errors.New("not a valid log file")
}
// Read a line from a reader.
func readLine(reader *bufio.Reader) (string, error) {
var line, b []byte
var err error
isPrefix := true
for isPrefix {
b, isPrefix, err = reader.ReadLine()
line = append(line, b...)
if err != nil {
return "", err
}
}
return string(line), nil
}
const maxReadCacheSize = 1024 * 1024 * 16
// Read lines from the end of a file
// endCursor initial value should be the file size
func readLastLines(ctx context.Context, file *os.File, endCursor int64) ([]string, int, error) {
var lines []byte
var firstNonNewlinePos int
var cursor = endCursor
var size int64 = 256
for {
// stop if we are at the begining
// check it in the start to avoid read beyond the size
if cursor <= 0 {
break
}
// enlarge the read cache to avoid too many memory move.
size = size * 2
if size > maxReadCacheSize {
size = maxReadCacheSize
}
if cursor < size {
size = cursor | cursor -= size
_, err := file.Seek(cursor, io.SeekStart)
if err != nil {
return nil, 0, ctx.Err()
}
chars := make([]byte, size)
_, err = file.Read(chars)
if err != nil {
return nil, 0, ctx.Err()
}
lines = append(chars, lines...)
// find first '\n' or '\r'
for i := 0; i < len(chars)-1; i++ {
// reach the line end
// the first newline may be in the line end at the first round
if i >= len(lines)-1 {
break
}
if (chars[i] == 10 || chars[i] == 13) && chars[i+1] != 10 && chars[i+1] != 13 {
firstNonNewlinePos = i + 1
break
}
}
if firstNonNewlinePos > 0 {
break
}
if isCtxDone(ctx) {
return nil, 0, ctx.Err()
}
}
finalStr := string(lines[firstNonNewlinePos:])
return strings.Split(strings.ReplaceAll(finalStr, "\r\n", "\n"), "\n"), len(finalStr), nil
}
// ParseLogLevel returns LogLevel from string and return LogLevel_Info if
// the string is an invalid level string
func ParseLogLevel(s string) pb.LogLevel {
switch s {
case "debug", "DEBUG":
return pb.LogLevel_Debug
case "info", "INFO":
return pb.LogLevel_Info
case "warn", "WARN":
return pb.LogLevel_Warn
case "trace", "TRACE":
return pb.LogLevel_Trace
case "critical", "CRITICAL":
return pb.LogLevel_Critical
case "error", "ERROR":
return pb.LogLevel_Error
default:
return pb.LogLevel_UNKNOWN
}
}
// parses single log line and returns:
// 1. the timesteamp in unix milliseconds
// 2. the log level
// 3. the log item content
//
// [2019/08/26 06:19:13.011 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v2.1.14]...
// [2019/08/26 07:19:49.529 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v3.0.2]...
// [2019/08/21 01:43:01.460 -04:00] [INFO] [util.go:60] [PD] [release-version=v3.0.2]
// [2019/08/26 07:20:23.815 -04:00] [INFO] [mod.rs:28] ["Release Version: 3.0.2"]
func parseLogItem(s string) (*pb.LogMessage, error) {
timeLeftBound := strings.Index(s, "[")
timeRightBound := strings.Index(s, "]")
if timeLeftBound == -1 || timeRightBound == -1 || timeLeftBound > timeRightBound {
return nil, fmt.Errorf("invalid log string: %s", s)
}
time, err := parseTimeStamp(s[timeLeftBound+1 : timeRightBound])
if err != nil {
return nil, err
}
levelLeftBound := strings.Index(s[timeRightBound+1:], "[")
levelRightBound := strings.Index(s[timeRightBound+1:], "]")
if levelLeftBound == -1 || levelRightBound == -1 || levelLeftBound > levelRightBound {
return nil, fmt.Errorf("invalid log string: %s", s)
}
level := ParseLogLevel(s[timeRightBound+1+levelLeftBound+1 : timeRightBound+1+levelRightBound])
item := &pb.LogMessage{
Time: time,
Level: level,
Message: strings.TrimSpace(s[timeRightBound+levelRightBound+2:]),
}
return item, nil
}
const (
// TimeStampLayout is accessed in dashboard, keep it public
TimeStampLayout = "2006/01/02 15:04:05.000 -07:00"
timeStampLayoutLen = len(TimeStampLayout)
)
// TiDB / TiKV / PD unified log format
// [2019/03/04 17:04:24.614 +08:00] ...
func parseTimeStamp(s string) (int64, error) {
t, err := time.Parse(TimeStampLayout, s)
if err != nil {
return 0, err
}
return t.UnixNano() / int64(time.Millisecond), nil
}
// logIterator implements Iterator and IteratorWithPeek interface.
// It's used for reading logs from log files one by one by their
// time.
type logIterator struct {
// filters
begin int64
end int64
levelFlag int64
patterns []*regexp.Regexp
// inner state
fileIndex int
reader *bufio.Reader
pending []*os.File
preLog *pb.LogMessage
}
// The Close method close all resources the iterator has.
func (iter *logIterator) close() {
for _, f := range iter.pending {
_ = f.Close()
}
}
func (iter *logIterator) next(ctx context.Context) (*pb.LogMessage, error) {
// initial state
if iter.reader == nil {
if len(iter.pending) == 0 {
return nil, io.EOF
}
iter.reader = bufio.NewReader(iter.pending[iter.fileIndex])
}
nextLine:
for {
if isCtxDone(ctx) {
return nil, ctx.Err()
}
line, err := readLine(iter.reader)
// Switch to next log file
if err != nil && err == io.EOF {
iter.fileIndex++
if iter.fileIndex >= len(iter.pending) {
return nil, io.EOF
}
iter.reader.Reset(iter.pending[iter.fileIndex])
continue
}
line = strings.TrimSpace(line)
if iter.preLog == nil && len(line) < timeStampLayoutLen {
continue
}
item, err := parseLogItem(line)
if err != nil {
if iter.preLog == nil {
continue
}
// handle invalid log
// make whole line as log message with pre time and pre log_level
item = &pb.LogMessage{
Time: iter.preLog.Time,
Level: iter.preLog.Level,
Message: line,
}
} else {
iter.preLog = item
}
if item.Time > iter.end {
return nil, io.EOF
}
if item.Time < iter.begin {
continue
}
// always keep unknown log_level
if item.Level > pb.LogLevel_UNKNOWN && iter.levelFlag != 0 && iter.levelFlag&(1<<item.Level) == 0 {
continue
}
if len(iter.patterns) > 0 {
for _, p := range iter.patterns {
if !p.MatchString(item.Message) {
continue nextLine
}
}
}
return item, nil
}
} | } | random_line_split |
search_log.go | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sysutil
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
pb "github.com/pingcap/kvproto/pkg/diagnosticspb"
)
type logFile struct {
file *os.File // The opened file handle
begin, end int64 // The timesteamp in millisecond of first line
}
func (l *logFile) BeginTime() int64 {
return l.begin
}
func (l *logFile) EndTime() int64 {
return l.end
}
func resolveFiles(ctx context.Context, logFilePath string, beginTime, endTime int64) ([]logFile, error) |
func isCtxDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
func readFirstValidLog(ctx context.Context, reader *bufio.Reader, tryLines int64) (*pb.LogMessage, error) {
var tried int64
for {
line, err := readLine(reader)
if err != nil {
return nil, err
}
item, err := parseLogItem(line)
if err == nil {
return item, nil
}
tried++
if tried >= tryLines {
break
}
if isCtxDone(ctx) {
return nil, ctx.Err()
}
}
return nil, errors.New("not a valid log file")
}
func readLastValidLog(ctx context.Context, file *os.File, tryLines int) (*pb.LogMessage, error) {
var tried int
stat, _ := file.Stat()
endCursor := stat.Size()
for {
lines, readBytes, err := readLastLines(ctx, file, endCursor)
if err != nil {
return nil, err
}
// read out the file
if readBytes == 0 {
break
}
endCursor -= int64(readBytes)
for i := len(lines) - 1; i >= 0; i-- {
item, err := parseLogItem(lines[i])
if err == nil {
return item, nil
}
}
tried += len(lines)
if tried >= tryLines {
break
}
}
return nil, errors.New("not a valid log file")
}
// Read a line from a reader.
func readLine(reader *bufio.Reader) (string, error) {
var line, b []byte
var err error
isPrefix := true
for isPrefix {
b, isPrefix, err = reader.ReadLine()
line = append(line, b...)
if err != nil {
return "", err
}
}
return string(line), nil
}
const maxReadCacheSize = 1024 * 1024 * 16
// Read lines from the end of a file
// endCursor initial value should be the file size
func readLastLines(ctx context.Context, file *os.File, endCursor int64) ([]string, int, error) {
var lines []byte
var firstNonNewlinePos int
var cursor = endCursor
var size int64 = 256
for {
// stop if we are at the begining
// check it in the start to avoid read beyond the size
if cursor <= 0 {
break
}
// enlarge the read cache to avoid too many memory move.
size = size * 2
if size > maxReadCacheSize {
size = maxReadCacheSize
}
if cursor < size {
size = cursor
}
cursor -= size
_, err := file.Seek(cursor, io.SeekStart)
if err != nil {
return nil, 0, ctx.Err()
}
chars := make([]byte, size)
_, err = file.Read(chars)
if err != nil {
return nil, 0, ctx.Err()
}
lines = append(chars, lines...)
// find first '\n' or '\r'
for i := 0; i < len(chars)-1; i++ {
// reach the line end
// the first newline may be in the line end at the first round
if i >= len(lines)-1 {
break
}
if (chars[i] == 10 || chars[i] == 13) && chars[i+1] != 10 && chars[i+1] != 13 {
firstNonNewlinePos = i + 1
break
}
}
if firstNonNewlinePos > 0 {
break
}
if isCtxDone(ctx) {
return nil, 0, ctx.Err()
}
}
finalStr := string(lines[firstNonNewlinePos:])
return strings.Split(strings.ReplaceAll(finalStr, "\r\n", "\n"), "\n"), len(finalStr), nil
}
// ParseLogLevel returns LogLevel from string and return LogLevel_Info if
// the string is an invalid level string
func ParseLogLevel(s string) pb.LogLevel {
switch s {
case "debug", "DEBUG":
return pb.LogLevel_Debug
case "info", "INFO":
return pb.LogLevel_Info
case "warn", "WARN":
return pb.LogLevel_Warn
case "trace", "TRACE":
return pb.LogLevel_Trace
case "critical", "CRITICAL":
return pb.LogLevel_Critical
case "error", "ERROR":
return pb.LogLevel_Error
default:
return pb.LogLevel_UNKNOWN
}
}
// parses single log line and returns:
// 1. the timesteamp in unix milliseconds
// 2. the log level
// 3. the log item content
//
// [2019/08/26 06:19:13.011 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v2.1.14]...
// [2019/08/26 07:19:49.529 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v3.0.2]...
// [2019/08/21 01:43:01.460 -04:00] [INFO] [util.go:60] [PD] [release-version=v3.0.2]
// [2019/08/26 07:20:23.815 -04:00] [INFO] [mod.rs:28] ["Release Version: 3.0.2"]
func parseLogItem(s string) (*pb.LogMessage, error) {
timeLeftBound := strings.Index(s, "[")
timeRightBound := strings.Index(s, "]")
if timeLeftBound == -1 || timeRightBound == -1 || timeLeftBound > timeRightBound {
return nil, fmt.Errorf("invalid log string: %s", s)
}
time, err := parseTimeStamp(s[timeLeftBound+1 : timeRightBound])
if err != nil {
return nil, err
}
levelLeftBound := strings.Index(s[timeRightBound+1:], "[")
levelRightBound := strings.Index(s[timeRightBound+1:], "]")
if levelLeftBound == -1 || levelRightBound == -1 || levelLeftBound > levelRightBound {
return nil, fmt.Errorf("invalid log string: %s", s)
}
level := ParseLogLevel(s[timeRightBound+1+levelLeftBound+1 : timeRightBound+1+levelRightBound])
item := &pb.LogMessage{
Time: time,
Level: level,
Message: strings.TrimSpace(s[timeRightBound+levelRightBound+2:]),
}
return item, nil
}
const (
// TimeStampLayout is accessed in dashboard, keep it public
TimeStampLayout = "2006/01/02 15:04:05.000 -07:00"
timeStampLayoutLen = len(TimeStampLayout)
)
// TiDB / TiKV / PD unified log format
// [2019/03/04 17:04:24.614 +08:00] ...
func parseTimeStamp(s string) (int64, error) {
t, err := time.Parse(TimeStampLayout, s)
if err != nil {
return 0, err
}
return t.UnixNano() / int64(time.Millisecond), nil
}
// logIterator implements Iterator and IteratorWithPeek interface.
// It's used for reading logs from log files one by one by their
// time.
type logIterator struct {
// filters
begin int64
end int64
levelFlag int64
patterns []*regexp.Regexp
// inner state
fileIndex int
reader *bufio.Reader
pending []*os.File
preLog *pb.LogMessage
}
// The Close method close all resources the iterator has.
func (iter *logIterator) close() {
for _, f := range iter.pending {
_ = f.Close()
}
}
func (iter *logIterator) next(ctx context.Context) (*pb.LogMessage, error) {
// initial state
if iter.reader == nil {
if len(iter.pending) == 0 {
return nil, io.EOF
}
iter.reader = bufio.NewReader(iter.pending[iter.fileIndex])
}
nextLine:
for {
if isCtxDone(ctx) {
return nil, ctx.Err()
}
line, err := readLine(iter.reader)
// Switch to next log file
if err != nil && err == io.EOF {
iter.fileIndex++
if iter.fileIndex >= len(iter.pending) {
return nil, io.EOF
}
iter.reader.Reset(iter.pending[iter.fileIndex])
continue
}
line = strings.TrimSpace(line)
if iter.preLog == nil && len(line) < timeStampLayoutLen {
continue
}
item, err := parseLogItem(line)
if err != nil {
if iter.preLog == nil {
continue
}
// handle invalid log
// make whole line as log message with pre time and pre log_level
item = &pb.LogMessage{
Time: iter.preLog.Time,
Level: iter.preLog.Level,
Message: line,
}
} else {
iter.preLog = item
}
if item.Time > iter.end {
return nil, io.EOF
}
if item.Time < iter.begin {
continue
}
// always keep unknown log_level
if item.Level > pb.LogLevel_UNKNOWN && iter.levelFlag != 0 && iter.levelFlag&(1<<item.Level) == 0 {
continue
}
if len(iter.patterns) > 0 {
for _, p := range iter.patterns {
if !p.MatchString(item.Message) {
continue nextLine
}
}
}
return item, nil
}
}
| {
if logFilePath == "" {
return nil, errors.New("empty log file location configuration")
}
var logFiles []logFile
var skipFiles []*os.File
logDir := filepath.Dir(logFilePath)
ext := filepath.Ext(logFilePath)
filePrefix := logFilePath[:len(logFilePath)-len(ext)]
files, err := ioutil.ReadDir(logDir)
if err != nil {
return nil, err
}
walkFn := func(path string, info os.FileInfo) error {
if info.IsDir() {
return nil
}
// All rotated log files have the same prefix and extension with the original file
if !strings.HasPrefix(path, filePrefix) {
return nil
}
if !strings.HasSuffix(path, ext) {
return nil
}
if isCtxDone(ctx) {
return ctx.Err()
}
// If we cannot open the file, we skip to search the file instead of returning
// error and abort entire searching task.
// TODO: do we need to return some warning to client?
file, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm)
if err != nil {
return nil
}
reader := bufio.NewReader(file)
firstItem, err := readFirstValidLog(ctx, reader, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
lastItem, err := readLastValidLog(ctx, file, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
// Reset position to the start and skip this file if cannot seek to start
if _, err := file.Seek(0, io.SeekStart); err != nil {
skipFiles = append(skipFiles, file)
return nil
}
if beginTime > lastItem.Time || endTime < firstItem.Time {
skipFiles = append(skipFiles, file)
} else {
logFiles = append(logFiles, logFile{
file: file,
begin: firstItem.Time,
end: lastItem.Time,
})
}
return nil
}
for _, file := range files {
err := walkFn(filepath.Join(logDir, file.Name()), file)
if err != nil {
return nil, err
}
}
defer func() {
for _, f := range skipFiles {
_ = f.Close()
}
}()
// Sort by start time
sort.Slice(logFiles, func(i, j int) bool {
return logFiles[i].begin < logFiles[j].begin
})
return logFiles, err
} | identifier_body |
search_log.go | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sysutil
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
pb "github.com/pingcap/kvproto/pkg/diagnosticspb"
)
type logFile struct {
file *os.File // The opened file handle
begin, end int64 // The timesteamp in millisecond of first line
}
func (l *logFile) BeginTime() int64 {
return l.begin
}
func (l *logFile) EndTime() int64 {
return l.end
}
func resolveFiles(ctx context.Context, logFilePath string, beginTime, endTime int64) ([]logFile, error) {
if logFilePath == "" {
return nil, errors.New("empty log file location configuration")
}
var logFiles []logFile
var skipFiles []*os.File
logDir := filepath.Dir(logFilePath)
ext := filepath.Ext(logFilePath)
filePrefix := logFilePath[:len(logFilePath)-len(ext)]
files, err := ioutil.ReadDir(logDir)
if err != nil {
return nil, err
}
walkFn := func(path string, info os.FileInfo) error {
if info.IsDir() {
return nil
}
// All rotated log files have the same prefix and extension with the original file
if !strings.HasPrefix(path, filePrefix) {
return nil
}
if !strings.HasSuffix(path, ext) {
return nil
}
if isCtxDone(ctx) {
return ctx.Err()
}
// If we cannot open the file, we skip to search the file instead of returning
// error and abort entire searching task.
// TODO: do we need to return some warning to client?
file, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm)
if err != nil {
return nil
}
reader := bufio.NewReader(file)
firstItem, err := readFirstValidLog(ctx, reader, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
lastItem, err := readLastValidLog(ctx, file, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
// Reset position to the start and skip this file if cannot seek to start
if _, err := file.Seek(0, io.SeekStart); err != nil {
skipFiles = append(skipFiles, file)
return nil
}
if beginTime > lastItem.Time || endTime < firstItem.Time {
skipFiles = append(skipFiles, file)
} else {
logFiles = append(logFiles, logFile{
file: file,
begin: firstItem.Time,
end: lastItem.Time,
})
}
return nil
}
for _, file := range files {
err := walkFn(filepath.Join(logDir, file.Name()), file)
if err != nil {
return nil, err
}
}
defer func() {
for _, f := range skipFiles {
_ = f.Close()
}
}()
// Sort by start time
sort.Slice(logFiles, func(i, j int) bool {
return logFiles[i].begin < logFiles[j].begin
})
return logFiles, err
}
func isCtxDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
func readFirstValidLog(ctx context.Context, reader *bufio.Reader, tryLines int64) (*pb.LogMessage, error) {
var tried int64
for {
line, err := readLine(reader)
if err != nil {
return nil, err
}
item, err := parseLogItem(line)
if err == nil {
return item, nil
}
tried++
if tried >= tryLines {
break
}
if isCtxDone(ctx) {
return nil, ctx.Err()
}
}
return nil, errors.New("not a valid log file")
}
func readLastValidLog(ctx context.Context, file *os.File, tryLines int) (*pb.LogMessage, error) {
var tried int
stat, _ := file.Stat()
endCursor := stat.Size()
for {
lines, readBytes, err := readLastLines(ctx, file, endCursor)
if err != nil {
return nil, err
}
// read out the file
if readBytes == 0 {
break
}
endCursor -= int64(readBytes)
for i := len(lines) - 1; i >= 0; i-- {
item, err := parseLogItem(lines[i])
if err == nil {
return item, nil
}
}
tried += len(lines)
if tried >= tryLines {
break
}
}
return nil, errors.New("not a valid log file")
}
// Read a line from a reader.
func readLine(reader *bufio.Reader) (string, error) {
var line, b []byte
var err error
isPrefix := true
for isPrefix {
b, isPrefix, err = reader.ReadLine()
line = append(line, b...)
if err != nil |
}
return string(line), nil
}
const maxReadCacheSize = 1024 * 1024 * 16
// Read lines from the end of a file
// endCursor initial value should be the file size
func readLastLines(ctx context.Context, file *os.File, endCursor int64) ([]string, int, error) {
var lines []byte
var firstNonNewlinePos int
var cursor = endCursor
var size int64 = 256
for {
// stop if we are at the begining
// check it in the start to avoid read beyond the size
if cursor <= 0 {
break
}
// enlarge the read cache to avoid too many memory move.
size = size * 2
if size > maxReadCacheSize {
size = maxReadCacheSize
}
if cursor < size {
size = cursor
}
cursor -= size
_, err := file.Seek(cursor, io.SeekStart)
if err != nil {
return nil, 0, ctx.Err()
}
chars := make([]byte, size)
_, err = file.Read(chars)
if err != nil {
return nil, 0, ctx.Err()
}
lines = append(chars, lines...)
// find first '\n' or '\r'
for i := 0; i < len(chars)-1; i++ {
// reach the line end
// the first newline may be in the line end at the first round
if i >= len(lines)-1 {
break
}
if (chars[i] == 10 || chars[i] == 13) && chars[i+1] != 10 && chars[i+1] != 13 {
firstNonNewlinePos = i + 1
break
}
}
if firstNonNewlinePos > 0 {
break
}
if isCtxDone(ctx) {
return nil, 0, ctx.Err()
}
}
finalStr := string(lines[firstNonNewlinePos:])
return strings.Split(strings.ReplaceAll(finalStr, "\r\n", "\n"), "\n"), len(finalStr), nil
}
// ParseLogLevel returns LogLevel from string and return LogLevel_Info if
// the string is an invalid level string
func ParseLogLevel(s string) pb.LogLevel {
switch s {
case "debug", "DEBUG":
return pb.LogLevel_Debug
case "info", "INFO":
return pb.LogLevel_Info
case "warn", "WARN":
return pb.LogLevel_Warn
case "trace", "TRACE":
return pb.LogLevel_Trace
case "critical", "CRITICAL":
return pb.LogLevel_Critical
case "error", "ERROR":
return pb.LogLevel_Error
default:
return pb.LogLevel_UNKNOWN
}
}
// parses single log line and returns:
// 1. the timesteamp in unix milliseconds
// 2. the log level
// 3. the log item content
//
// [2019/08/26 06:19:13.011 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v2.1.14]...
// [2019/08/26 07:19:49.529 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v3.0.2]...
// [2019/08/21 01:43:01.460 -04:00] [INFO] [util.go:60] [PD] [release-version=v3.0.2]
// [2019/08/26 07:20:23.815 -04:00] [INFO] [mod.rs:28] ["Release Version: 3.0.2"]
func parseLogItem(s string) (*pb.LogMessage, error) {
timeLeftBound := strings.Index(s, "[")
timeRightBound := strings.Index(s, "]")
if timeLeftBound == -1 || timeRightBound == -1 || timeLeftBound > timeRightBound {
return nil, fmt.Errorf("invalid log string: %s", s)
}
time, err := parseTimeStamp(s[timeLeftBound+1 : timeRightBound])
if err != nil {
return nil, err
}
levelLeftBound := strings.Index(s[timeRightBound+1:], "[")
levelRightBound := strings.Index(s[timeRightBound+1:], "]")
if levelLeftBound == -1 || levelRightBound == -1 || levelLeftBound > levelRightBound {
return nil, fmt.Errorf("invalid log string: %s", s)
}
level := ParseLogLevel(s[timeRightBound+1+levelLeftBound+1 : timeRightBound+1+levelRightBound])
item := &pb.LogMessage{
Time: time,
Level: level,
Message: strings.TrimSpace(s[timeRightBound+levelRightBound+2:]),
}
return item, nil
}
const (
// TimeStampLayout is accessed in dashboard, keep it public
TimeStampLayout = "2006/01/02 15:04:05.000 -07:00"
timeStampLayoutLen = len(TimeStampLayout)
)
// TiDB / TiKV / PD unified log format
// [2019/03/04 17:04:24.614 +08:00] ...
func parseTimeStamp(s string) (int64, error) {
t, err := time.Parse(TimeStampLayout, s)
if err != nil {
return 0, err
}
return t.UnixNano() / int64(time.Millisecond), nil
}
// logIterator implements Iterator and IteratorWithPeek interface.
// It's used for reading logs from log files one by one by their
// time.
type logIterator struct {
// filters
begin int64
end int64
levelFlag int64
patterns []*regexp.Regexp
// inner state
fileIndex int
reader *bufio.Reader
pending []*os.File
preLog *pb.LogMessage
}
// The Close method close all resources the iterator has.
func (iter *logIterator) close() {
for _, f := range iter.pending {
_ = f.Close()
}
}
func (iter *logIterator) next(ctx context.Context) (*pb.LogMessage, error) {
// initial state
if iter.reader == nil {
if len(iter.pending) == 0 {
return nil, io.EOF
}
iter.reader = bufio.NewReader(iter.pending[iter.fileIndex])
}
nextLine:
for {
if isCtxDone(ctx) {
return nil, ctx.Err()
}
line, err := readLine(iter.reader)
// Switch to next log file
if err != nil && err == io.EOF {
iter.fileIndex++
if iter.fileIndex >= len(iter.pending) {
return nil, io.EOF
}
iter.reader.Reset(iter.pending[iter.fileIndex])
continue
}
line = strings.TrimSpace(line)
if iter.preLog == nil && len(line) < timeStampLayoutLen {
continue
}
item, err := parseLogItem(line)
if err != nil {
if iter.preLog == nil {
continue
}
// handle invalid log
// make whole line as log message with pre time and pre log_level
item = &pb.LogMessage{
Time: iter.preLog.Time,
Level: iter.preLog.Level,
Message: line,
}
} else {
iter.preLog = item
}
if item.Time > iter.end {
return nil, io.EOF
}
if item.Time < iter.begin {
continue
}
// always keep unknown log_level
if item.Level > pb.LogLevel_UNKNOWN && iter.levelFlag != 0 && iter.levelFlag&(1<<item.Level) == 0 {
continue
}
if len(iter.patterns) > 0 {
for _, p := range iter.patterns {
if !p.MatchString(item.Message) {
continue nextLine
}
}
}
return item, nil
}
}
| {
return "", err
} | conditional_block |
search_log.go | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sysutil
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"time"
pb "github.com/pingcap/kvproto/pkg/diagnosticspb"
)
type logFile struct {
file *os.File // The opened file handle
begin, end int64 // The timesteamp in millisecond of first line
}
func (l *logFile) BeginTime() int64 {
return l.begin
}
func (l *logFile) EndTime() int64 {
return l.end
}
func resolveFiles(ctx context.Context, logFilePath string, beginTime, endTime int64) ([]logFile, error) {
if logFilePath == "" {
return nil, errors.New("empty log file location configuration")
}
var logFiles []logFile
var skipFiles []*os.File
logDir := filepath.Dir(logFilePath)
ext := filepath.Ext(logFilePath)
filePrefix := logFilePath[:len(logFilePath)-len(ext)]
files, err := ioutil.ReadDir(logDir)
if err != nil {
return nil, err
}
walkFn := func(path string, info os.FileInfo) error {
if info.IsDir() {
return nil
}
// All rotated log files have the same prefix and extension with the original file
if !strings.HasPrefix(path, filePrefix) {
return nil
}
if !strings.HasSuffix(path, ext) {
return nil
}
if isCtxDone(ctx) {
return ctx.Err()
}
// If we cannot open the file, we skip to search the file instead of returning
// error and abort entire searching task.
// TODO: do we need to return some warning to client?
file, err := os.OpenFile(path, os.O_RDONLY, os.ModePerm)
if err != nil {
return nil
}
reader := bufio.NewReader(file)
firstItem, err := readFirstValidLog(ctx, reader, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
lastItem, err := readLastValidLog(ctx, file, 10)
if err != nil {
skipFiles = append(skipFiles, file)
return nil
}
// Reset position to the start and skip this file if cannot seek to start
if _, err := file.Seek(0, io.SeekStart); err != nil {
skipFiles = append(skipFiles, file)
return nil
}
if beginTime > lastItem.Time || endTime < firstItem.Time {
skipFiles = append(skipFiles, file)
} else {
logFiles = append(logFiles, logFile{
file: file,
begin: firstItem.Time,
end: lastItem.Time,
})
}
return nil
}
for _, file := range files {
err := walkFn(filepath.Join(logDir, file.Name()), file)
if err != nil {
return nil, err
}
}
defer func() {
for _, f := range skipFiles {
_ = f.Close()
}
}()
// Sort by start time
sort.Slice(logFiles, func(i, j int) bool {
return logFiles[i].begin < logFiles[j].begin
})
return logFiles, err
}
func isCtxDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
func readFirstValidLog(ctx context.Context, reader *bufio.Reader, tryLines int64) (*pb.LogMessage, error) {
var tried int64
for {
line, err := readLine(reader)
if err != nil {
return nil, err
}
item, err := parseLogItem(line)
if err == nil {
return item, nil
}
tried++
if tried >= tryLines {
break
}
if isCtxDone(ctx) {
return nil, ctx.Err()
}
}
return nil, errors.New("not a valid log file")
}
func readLastValidLog(ctx context.Context, file *os.File, tryLines int) (*pb.LogMessage, error) {
var tried int
stat, _ := file.Stat()
endCursor := stat.Size()
for {
lines, readBytes, err := readLastLines(ctx, file, endCursor)
if err != nil {
return nil, err
}
// read out the file
if readBytes == 0 {
break
}
endCursor -= int64(readBytes)
for i := len(lines) - 1; i >= 0; i-- {
item, err := parseLogItem(lines[i])
if err == nil {
return item, nil
}
}
tried += len(lines)
if tried >= tryLines {
break
}
}
return nil, errors.New("not a valid log file")
}
// Read a line from a reader.
func readLine(reader *bufio.Reader) (string, error) {
var line, b []byte
var err error
isPrefix := true
for isPrefix {
b, isPrefix, err = reader.ReadLine()
line = append(line, b...)
if err != nil {
return "", err
}
}
return string(line), nil
}
const maxReadCacheSize = 1024 * 1024 * 16
// Read lines from the end of a file
// endCursor initial value should be the file size
func | (ctx context.Context, file *os.File, endCursor int64) ([]string, int, error) {
var lines []byte
var firstNonNewlinePos int
var cursor = endCursor
var size int64 = 256
for {
// stop if we are at the begining
// check it in the start to avoid read beyond the size
if cursor <= 0 {
break
}
// enlarge the read cache to avoid too many memory move.
size = size * 2
if size > maxReadCacheSize {
size = maxReadCacheSize
}
if cursor < size {
size = cursor
}
cursor -= size
_, err := file.Seek(cursor, io.SeekStart)
if err != nil {
return nil, 0, ctx.Err()
}
chars := make([]byte, size)
_, err = file.Read(chars)
if err != nil {
return nil, 0, ctx.Err()
}
lines = append(chars, lines...)
// find first '\n' or '\r'
for i := 0; i < len(chars)-1; i++ {
// reach the line end
// the first newline may be in the line end at the first round
if i >= len(lines)-1 {
break
}
if (chars[i] == 10 || chars[i] == 13) && chars[i+1] != 10 && chars[i+1] != 13 {
firstNonNewlinePos = i + 1
break
}
}
if firstNonNewlinePos > 0 {
break
}
if isCtxDone(ctx) {
return nil, 0, ctx.Err()
}
}
finalStr := string(lines[firstNonNewlinePos:])
return strings.Split(strings.ReplaceAll(finalStr, "\r\n", "\n"), "\n"), len(finalStr), nil
}
// ParseLogLevel returns LogLevel from string and return LogLevel_Info if
// the string is an invalid level string
func ParseLogLevel(s string) pb.LogLevel {
switch s {
case "debug", "DEBUG":
return pb.LogLevel_Debug
case "info", "INFO":
return pb.LogLevel_Info
case "warn", "WARN":
return pb.LogLevel_Warn
case "trace", "TRACE":
return pb.LogLevel_Trace
case "critical", "CRITICAL":
return pb.LogLevel_Critical
case "error", "ERROR":
return pb.LogLevel_Error
default:
return pb.LogLevel_UNKNOWN
}
}
// parses single log line and returns:
// 1. the timesteamp in unix milliseconds
// 2. the log level
// 3. the log item content
//
// [2019/08/26 06:19:13.011 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v2.1.14]...
// [2019/08/26 07:19:49.529 -04:00] [INFO] [printer.go:41] ["Welcome to TiDB."] ["Release Version"=v3.0.2]...
// [2019/08/21 01:43:01.460 -04:00] [INFO] [util.go:60] [PD] [release-version=v3.0.2]
// [2019/08/26 07:20:23.815 -04:00] [INFO] [mod.rs:28] ["Release Version: 3.0.2"]
func parseLogItem(s string) (*pb.LogMessage, error) {
timeLeftBound := strings.Index(s, "[")
timeRightBound := strings.Index(s, "]")
if timeLeftBound == -1 || timeRightBound == -1 || timeLeftBound > timeRightBound {
return nil, fmt.Errorf("invalid log string: %s", s)
}
time, err := parseTimeStamp(s[timeLeftBound+1 : timeRightBound])
if err != nil {
return nil, err
}
levelLeftBound := strings.Index(s[timeRightBound+1:], "[")
levelRightBound := strings.Index(s[timeRightBound+1:], "]")
if levelLeftBound == -1 || levelRightBound == -1 || levelLeftBound > levelRightBound {
return nil, fmt.Errorf("invalid log string: %s", s)
}
level := ParseLogLevel(s[timeRightBound+1+levelLeftBound+1 : timeRightBound+1+levelRightBound])
item := &pb.LogMessage{
Time: time,
Level: level,
Message: strings.TrimSpace(s[timeRightBound+levelRightBound+2:]),
}
return item, nil
}
const (
// TimeStampLayout is accessed in dashboard, keep it public
TimeStampLayout = "2006/01/02 15:04:05.000 -07:00"
timeStampLayoutLen = len(TimeStampLayout)
)
// TiDB / TiKV / PD unified log format
// [2019/03/04 17:04:24.614 +08:00] ...
func parseTimeStamp(s string) (int64, error) {
t, err := time.Parse(TimeStampLayout, s)
if err != nil {
return 0, err
}
return t.UnixNano() / int64(time.Millisecond), nil
}
// logIterator implements Iterator and IteratorWithPeek interface.
// It's used for reading logs from log files one by one by their
// time.
type logIterator struct {
// filters
begin int64
end int64
levelFlag int64
patterns []*regexp.Regexp
// inner state
fileIndex int
reader *bufio.Reader
pending []*os.File
preLog *pb.LogMessage
}
// The Close method close all resources the iterator has.
func (iter *logIterator) close() {
for _, f := range iter.pending {
_ = f.Close()
}
}
func (iter *logIterator) next(ctx context.Context) (*pb.LogMessage, error) {
// initial state
if iter.reader == nil {
if len(iter.pending) == 0 {
return nil, io.EOF
}
iter.reader = bufio.NewReader(iter.pending[iter.fileIndex])
}
nextLine:
for {
if isCtxDone(ctx) {
return nil, ctx.Err()
}
line, err := readLine(iter.reader)
// Switch to next log file
if err != nil && err == io.EOF {
iter.fileIndex++
if iter.fileIndex >= len(iter.pending) {
return nil, io.EOF
}
iter.reader.Reset(iter.pending[iter.fileIndex])
continue
}
line = strings.TrimSpace(line)
if iter.preLog == nil && len(line) < timeStampLayoutLen {
continue
}
item, err := parseLogItem(line)
if err != nil {
if iter.preLog == nil {
continue
}
// handle invalid log
// make whole line as log message with pre time and pre log_level
item = &pb.LogMessage{
Time: iter.preLog.Time,
Level: iter.preLog.Level,
Message: line,
}
} else {
iter.preLog = item
}
if item.Time > iter.end {
return nil, io.EOF
}
if item.Time < iter.begin {
continue
}
// always keep unknown log_level
if item.Level > pb.LogLevel_UNKNOWN && iter.levelFlag != 0 && iter.levelFlag&(1<<item.Level) == 0 {
continue
}
if len(iter.patterns) > 0 {
for _, p := range iter.patterns {
if !p.MatchString(item.Message) {
continue nextLine
}
}
}
return item, nil
}
}
| readLastLines | identifier_name |
lisp.go | // ---------------------------------------------------------------------------
//
// Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// ---------------------------------------------------------------------------
//
// lisp.go
//
// This file contains function and type definitions used by xtr.go and ipc.go.
//
// This is a external data-plane from the lispers.net control-plane perspective
// and must be run with "lisp xtr-parameters" sub-command "ipc-data-plane =
// yes".
//
// ---------------------------------------------------------------------------
package main
import "fmt"
import "bufio"
import "os"
import "os/exec"
import "strings"
import "strconv"
import "time"
import "net"
import "hash"
import "math/rand"
import "encoding/binary"
import "crypto/aes"
import "crypto/cipher"
import "crypto/sha256"
import "crypto/hmac"
import "encoding/hex"
//
// ---------- Variable Definitions ----------
//
var lisp_debug_logging bool = true
var lisp_data_plane_logging bool = false
//
// ---------- Constants Definitions ----------
//
const LISP_DATA_PORT = 4341
const LISP_CTRL_PORT = 4342
const LISP_L2_DATA_PORT = 8472
const LISP_VXLAN_DATA_PORT = 4789
const LISP_VXLAN_GPE_PORT = 4790
//
// ---------- Type Definitions ----------
//
type Lisp_address struct {
instance_id int
mask_len int
address net.IP
mask_address net.IPMask
address_string string
}
//
// lisp_print_address
//
// Return string with address. And optionally prepend "[<iid>]"
//
func (a *Lisp_address) lisp_print_address(with_iid bool) string {
if (a.address_string == "") { a.address_string = a.address.String() }
if (with_iid) {
iid := a.instance_id
if (iid == 0xffffff) { iid = -1 }
return(fmt.Sprintf("[%d]%s", iid, a.address_string))
}
return(a.address_string)
}
//
// lisp_store_address
//
// Store and instance-ID and string representation of an IPv4 or IPv6 address
// and store in Lisp_address format.
//
func (a *Lisp_address) lisp_store_address(iid int, addr string) bool {
var address string
//
// Is this address string an address or a prefix?
//
if (strings.Contains(addr, "/")) {
split := strings.Split(addr, "/")
address = split[0]
a.mask_len, _ = strconv.Atoi(split[1])
} else {
address = addr
a.mask_len = -1
}
a.instance_id = iid
//
// Parse address string. ParseIP() will put IPv4 addresses in a 16-byte
// array. We don't want that because address []byte length will determine
// address family.
//
a.address = net.ParseIP(address)
if (strings.Contains(addr, ".")) {
a.address = a.address[12:16]
}
//
// Set mask-length and mask address.
//
if (a.mask_len == -1) {
a.mask_len = len(a.address) * 8
}
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
//
// Store string for printing.
//
a.address_string = addr
return(true)
}
//
// lisp_is_ipv4
//
// Return true if Lisp_address is IPv4.
//
func (a *Lisp_address) lisp_is_ipv4() bool {
return((len(a.address) == 4))
}
//
// lisp_is_ipv6
//
// Return true if Lisp_address is IPv6.
//
func (a *Lisp_address) lisp_is_ipv6() bool {
return((len(a.address) == 16))
}
//
// lisp_is_multicast
//
// Return true if Lisp_address is an IPv4 or IPv6 multicast group address.
//
func (a *Lisp_address) lisp_is_multicast() bool {
if (a.lisp_is_ipv4()) {
return(int(a.address[0]) >= 224 && int(a.address[0]) < 240)
}
if (a.lisp_is_ipv6()) {
return(a.address[0] == 0xff)
}
return(false)
}
//
// lisp_make_address
//
// Store and instance-ID and byte representation of an IPv4 or IPv6 address
// and store in Lisp_address format. Note that Lisp_address.address_string
// is created when it is needed (in Lisp_address.lisp_print_address()).
//
func (a *Lisp_address) lisp_make_address(iid int, addr []byte) {
a.instance_id = iid
a.address = addr
a.mask_len = len(a.address) * 8
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
}
//
// lisp_exact_match
//
// Compare two addresses and return true if they match.
//
func (a *Lisp_address) lisp_exact_match(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.mask_len != addr.mask_len) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.address.Equal(addr.address) == false) {
return(false)
}
return(true)
}
//
// lisp_more_specific
//
// Return true if the supplied address is more specific than the method
// address. If the mask-lengths are the same, a true is returned.
//
func (a *Lisp_address) lisp_more_specific(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.mask_len > addr.mask_len) {
return(false)
}
for i := 0; i < len(a.address); i++ {
if (a.mask_address[i] == 0) {
break
}
if ((a.address[i] & a.mask_address[i]) !=
(addr.address[i] & a.mask_address[i])) {
return(false)
}
}
return(true)
}
//
// lisp_hash_address
//
// Hash address to aid in selecting a source UDP port.
//
func (a *Lisp_address) lisp_hash_address() uint16 {
var hash uint = 0
for i := 0; i < len(a.address); i++ {
hash = hash ^ uint(a.address[i])
}
//
// Fold result into a short.
//
return(uint16(hash >> 16) ^ uint16(hash & 0xffff))
}
type Lisp_database struct {
eid_prefix Lisp_address
}
type Lisp_interface struct {
instance_id int
}
type Lisp_map_cache struct {
next_mc *Lisp_map_cache
eid_prefix Lisp_address
rloc_set []Lisp_rloc
rle_set []Lisp_rloc
}
type Lisp_rloc struct {
rloc Lisp_address
encap_port int
stats Lisp_stats
keys [4]*Lisp_keys
use_key_id int
}
type Lisp_keys struct {
crypto_key string
icv_key string
iv []byte
crypto_alg cipher.AEAD
hash_alg hash.Hash
}
type Lisp_stats struct {
packets uint64
bytes uint64
last_packet time.Time
} |
//
// lisp_count
//
// Increment stats counters. Either do it for an RLOC/RLE entry or for the
// lisp_decap_stats map. Argument 'key-name' needs to be set if stats is nil.
//
func lisp_count(stats *Lisp_stats, key_name string, packet []byte) {
if (stats == nil) {
s, ok := lisp_decap_stats[key_name]
if (!ok) {
s = new(Lisp_stats)
lisp_decap_stats[key_name] = s
}
s.packets += 1
s.bytes += uint64(len(packet))
s.last_packet = time.Now()
} else {
stats.packets += 1
stats.bytes += uint64(len(packet))
stats.last_packet = time.Now()
}
}
//
// lisp_find_rloc
//
// Find RLOC entry in map-cache entry based on supplied RLOC address.
//
func (mc *Lisp_map_cache) lisp_find_rloc(rloc_addr Lisp_address) (*Lisp_rloc) {
for _, rloc := range mc.rloc_set {
if (rloc_addr.lisp_exact_match(rloc.rloc)) { return(&rloc) }
}
return(nil)
}
//
// lprint
//
// Print control-plane debug logging output when configured.
//
func lprint(format string, args ...interface{}) {
if (!lisp_debug_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// dprint
//
// Print data-plane debug logging output when configured.
//
func dprint(format string, args ...interface{}) {
if (!lisp_data_plane_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// debug
//
// For temporary debug output that highlights line in boldface red.
//
func debug(format string, args ...interface{}) {
f := red(">>>") + format + red("<<<") + "\n"
fmt.Printf(f, args...)
}
//
// debugv
//
// For temporary debug output that shows the contents of a data structure.
// Very useful for debugging.
//
func debugv(args interface{}) {
debug("%#v", args)
}
//
// lisp_command_output
//
// Execute a system command and return a string with output.
//
func lisp_command_output(command string) string {
cmd := exec.Command(command)
out, err := cmd.CombinedOutput()
if (err != nil) {
return("")
}
output := string(out)
return(output[0:len(output)-1])
}
//
// lisp_read_file
//
// Read entire file into a string.
//
func lisp_read_file(filename string) string {
fd, err := os.Open(filename)
if (err != nil) {
return("")
}
scanner := bufio.NewScanner(fd)
scanner.Scan()
fd.Close()
return(scanner.Text())
}
//
// lisp_write_file
//
// Write supplied string to supplied file.
//
func lisp_write_file(filename string, text string) {
fd, err := os.Create(filename)
if (err != nil) {
lprint("Could not create file %s", filename)
return
}
_, err = fd.WriteString(text)
if (err != nil) {
lprint("Could not write string to file %s", filename)
return
}
fd.Close()
}
//
// bold
//
// Make input string boldface.
//
func bold(str string) string {
return("\033[1m" + str + "\033[0m")
}
//
// green
//
// Make input string green.
//
func green(str string) string {
return("\033[92m" + bold(str) + "\033[0m")
}
//
// red
//
// Make input string red.
//
func red(str string) string {
return("\033[91m" + bold(str) + "\033[0m")
}
//
// lisp_log_packet
//
// Log a received data packet either native or LISP encapsulated. This function
// should be called only when lisp_data_plane_logging is true.
//
func lisp_log_packet(prefix_string string, packet []byte, is_lisp bool) {
var num int
var udp, lisp []byte
ip := true
if (packet[0] == 0x45) {
num = 20
} else if (packet[0] == 0x60) {
num = 40
} else {
num = 8
if (packet[8] == 0x45) { num += 20 }
if (packet[8] == 0x60) { num += 40 }
ip = false
}
udp = packet[num:num+8]
lisp = packet[num+8:num+16]
packet_string := fmt.Sprintf("%s: ", prefix_string)
p := packet
for i := 0; i < num; i += 4 {
packet_string += fmt.Sprintf("%02x%02x%02x%02x ", p[i], p[i+1],
p[i+2], p[i+3])
}
//
// Return for invalid packet.
//
if (ip == false) {
dprint(packet_string)
return
}
if (!is_lisp) {
dprint(packet_string)
return
}
packet_string += fmt.Sprintf("UDP: ")
for i := 0; i < 8; i += 4 {
packet_string += fmt.Sprintf("%02x%02x%02x%02x ", udp[i], udp[i+1],
udp[i+2], udp[i+3])
}
packet_string += fmt.Sprintf("LISP: ")
for i := 0; i < 8; i += 4 {
packet_string += fmt.Sprintf("%02x%02x%02x%02x ", lisp[i], lisp[i+1],
lisp[i+2], lisp[i+3])
}
dprint(packet_string)
}
//
// lisp_get_local_address
//
// Given supplied interface, return locaal IPv4 and IPv6 addresses.
//
func lisp_get_local_address(device string) (string, string) {
var ipv4 string = ""
var ipv6 string = ""
intf, _ := net.InterfaceByName(device)
addrs, _ := intf.Addrs()
for _, a := range addrs {
addr := strings.Split(a.String(), "/")[0]
if (addr == "::1") { continue }
if (strings.Contains(addr, "fe80")) { continue }
if (strings.Contains(addr, "127.0.0.1")) { continue }
if (strings.Contains(addr, ":")) { ipv6 = addr }
if (strings.Count(addr, ".") == 3) { ipv4 = addr }
}
return ipv4, ipv6
}
//
// lisp_setup_keys
//
// Store crypto and hash data structures so they are ready for encryption and
// ICV checking.
//
func (r *Lisp_keys) lisp_setup_keys(crypto_key string, icv_key string) {
r.crypto_key = crypto_key
r.icv_key = icv_key
//
// Allocate an IV used for encryption during encapsulation. AES-GCM wants
// a 12-byte IV/nonce.
//
r.iv = make([]byte, 12)
binary.BigEndian.PutUint32(r.iv[0:4], rand.Uint32())
binary.BigEndian.PutUint64(r.iv[4:12], rand.Uint64())
ekey, err := hex.DecodeString(crypto_key)
if (err != nil) {
lprint("hex.DecodeString() failed for crypto-key, err %s", err)
return
}
block, err := aes.NewCipher(ekey)
if (err != nil) {
lprint("aes.NewCipher() failed, err %s", err)
return
}
r.crypto_alg, err = cipher.NewGCM(block)
if (err != nil) {
lprint("cipher.NewGCM() failed, err %s", err)
return
}
ikey, err := hex.DecodeString(icv_key)
if (err != nil) {
lprint("hex.DecodeString() failed for icv-key, err %s", err)
return
}
r.hash_alg = hmac.New(sha256.New, ikey)
lprint("Setup new keys")
return
}
//----------------------------------------------------------------------------- | random_line_split | |
lisp.go | // ---------------------------------------------------------------------------
//
// Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// ---------------------------------------------------------------------------
//
// lisp.go
//
// This file contains function and type definitions used by xtr.go and ipc.go.
//
// This is a external data-plane from the lispers.net control-plane perspective
// and must be run with "lisp xtr-parameters" sub-command "ipc-data-plane =
// yes".
//
// ---------------------------------------------------------------------------
package main
import "fmt"
import "bufio"
import "os"
import "os/exec"
import "strings"
import "strconv"
import "time"
import "net"
import "hash"
import "math/rand"
import "encoding/binary"
import "crypto/aes"
import "crypto/cipher"
import "crypto/sha256"
import "crypto/hmac"
import "encoding/hex"
//
// ---------- Variable Definitions ----------
//
var lisp_debug_logging bool = true
var lisp_data_plane_logging bool = false
//
// ---------- Constants Definitions ----------
//
const LISP_DATA_PORT = 4341
const LISP_CTRL_PORT = 4342
const LISP_L2_DATA_PORT = 8472
const LISP_VXLAN_DATA_PORT = 4789
const LISP_VXLAN_GPE_PORT = 4790
//
// ---------- Type Definitions ----------
//
type Lisp_address struct {
instance_id int
mask_len int
address net.IP
mask_address net.IPMask
address_string string
}
//
// lisp_print_address
//
// Return string with address. And optionally prepend "[<iid>]"
//
func (a *Lisp_address) lisp_print_address(with_iid bool) string {
if (a.address_string == "") { a.address_string = a.address.String() }
if (with_iid) {
iid := a.instance_id
if (iid == 0xffffff) { iid = -1 }
return(fmt.Sprintf("[%d]%s", iid, a.address_string))
}
return(a.address_string)
}
//
// lisp_store_address
//
// Store and instance-ID and string representation of an IPv4 or IPv6 address
// and store in Lisp_address format.
//
func (a *Lisp_address) lisp_store_address(iid int, addr string) bool {
var address string
//
// Is this address string an address or a prefix?
//
if (strings.Contains(addr, "/")) {
split := strings.Split(addr, "/")
address = split[0]
a.mask_len, _ = strconv.Atoi(split[1])
} else {
address = addr
a.mask_len = -1
}
a.instance_id = iid
//
// Parse address string. ParseIP() will put IPv4 addresses in a 16-byte
// array. We don't want that because address []byte length will determine
// address family.
//
a.address = net.ParseIP(address)
if (strings.Contains(addr, ".")) {
a.address = a.address[12:16]
}
//
// Set mask-length and mask address.
//
if (a.mask_len == -1) {
a.mask_len = len(a.address) * 8
}
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
//
// Store string for printing.
//
a.address_string = addr
return(true)
}
//
// lisp_is_ipv4
//
// Return true if Lisp_address is IPv4.
//
func (a *Lisp_address) lisp_is_ipv4() bool {
return((len(a.address) == 4))
}
//
// lisp_is_ipv6
//
// Return true if Lisp_address is IPv6.
//
func (a *Lisp_address) lisp_is_ipv6() bool {
return((len(a.address) == 16))
}
//
// lisp_is_multicast
//
// Return true if Lisp_address is an IPv4 or IPv6 multicast group address.
//
func (a *Lisp_address) lisp_is_multicast() bool {
if (a.lisp_is_ipv4()) {
return(int(a.address[0]) >= 224 && int(a.address[0]) < 240)
}
if (a.lisp_is_ipv6()) {
return(a.address[0] == 0xff)
}
return(false)
}
//
// lisp_make_address
//
// Store and instance-ID and byte representation of an IPv4 or IPv6 address
// and store in Lisp_address format. Note that Lisp_address.address_string
// is created when it is needed (in Lisp_address.lisp_print_address()).
//
func (a *Lisp_address) lisp_make_address(iid int, addr []byte) {
a.instance_id = iid
a.address = addr
a.mask_len = len(a.address) * 8
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
}
//
// lisp_exact_match
//
// Compare two addresses and return true if they match.
//
func (a *Lisp_address) lisp_exact_match(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.mask_len != addr.mask_len) |
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.address.Equal(addr.address) == false) {
return(false)
}
return(true)
}
//
// lisp_more_specific
//
// Return true if the supplied address is more specific than the method
// address. If the mask-lengths are the same, a true is returned.
//
func (a *Lisp_address) lisp_more_specific(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.mask_len > addr.mask_len) {
return(false)
}
for i := 0; i < len(a.address); i++ {
if (a.mask_address[i] == 0) {
break
}
if ((a.address[i] & a.mask_address[i]) !=
(addr.address[i] & a.mask_address[i])) {
return(false)
}
}
return(true)
}
//
// lisp_hash_address
//
// Hash address to aid in selecting a source UDP port.
//
func (a *Lisp_address) lisp_hash_address() uint16 {
var hash uint = 0
for i := 0; i < len(a.address); i++ {
hash = hash ^ uint(a.address[i])
}
//
// Fold result into a short.
//
return(uint16(hash >> 16) ^ uint16(hash & 0xffff))
}
type Lisp_database struct {
eid_prefix Lisp_address
}
type Lisp_interface struct {
instance_id int
}
type Lisp_map_cache struct {
next_mc *Lisp_map_cache
eid_prefix Lisp_address
rloc_set []Lisp_rloc
rle_set []Lisp_rloc
}
type Lisp_rloc struct {
rloc Lisp_address
encap_port int
stats Lisp_stats
keys [4]*Lisp_keys
use_key_id int
}
type Lisp_keys struct {
crypto_key string
icv_key string
iv []byte
crypto_alg cipher.AEAD
hash_alg hash.Hash
}
type Lisp_stats struct {
packets uint64
bytes uint64
last_packet time.Time
}
//
// lisp_count
//
// Increment stats counters. Either do it for an RLOC/RLE entry or for the
// lisp_decap_stats map. Argument 'key-name' needs to be set if stats is nil.
//
func lisp_count(stats *Lisp_stats, key_name string, packet []byte) {
if (stats == nil) {
s, ok := lisp_decap_stats[key_name]
if (!ok) {
s = new(Lisp_stats)
lisp_decap_stats[key_name] = s
}
s.packets += 1
s.bytes += uint64(len(packet))
s.last_packet = time.Now()
} else {
stats.packets += 1
stats.bytes += uint64(len(packet))
stats.last_packet = time.Now()
}
}
//
// lisp_find_rloc
//
// Find RLOC entry in map-cache entry based on supplied RLOC address.
//
func (mc *Lisp_map_cache) lisp_find_rloc(rloc_addr Lisp_address) (*Lisp_rloc) {
for _, rloc := range mc.rloc_set {
if (rloc_addr.lisp_exact_match(rloc.rloc)) { return(&rloc) }
}
return(nil)
}
//
// lprint
//
// Print control-plane debug logging output when configured.
//
func lprint(format string, args ...interface{}) {
if (!lisp_debug_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// dprint
//
// Print data-plane debug logging output when configured.
//
func dprint(format string, args ...interface{}) {
if (!lisp_data_plane_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// debug
//
// For temporary debug output that highlights line in boldface red.
//
func debug(format string, args ...interface{}) {
f := red(">>>") + format + red("<<<") + "\n"
fmt.Printf(f, args...)
}
//
// debugv
//
// For temporary debug output that shows the contents of a data structure.
// Very useful for debugging.
//
func debugv(args interface{}) {
debug("%#v", args)
}
//
// lisp_command_output
//
// Execute a system command and return a string with output.
//
func lisp_command_output(command string) string {
cmd := exec.Command(command)
out, err := cmd.CombinedOutput()
if (err != nil) {
return("")
}
output := string(out)
return(output[0:len(output)-1])
}
//
// lisp_read_file
//
// Read entire file into a string.
//
func lisp_read_file(filename string) string {
fd, err := os.Open(filename)
if (err != nil) {
return("")
}
scanner := bufio.NewScanner(fd)
scanner.Scan()
fd.Close()
return(scanner.Text())
}
//
// lisp_write_file
//
// Write supplied string to supplied file.
//
func lisp_write_file(filename string, text string) {
fd, err := os.Create(filename)
if (err != nil) {
lprint("Could not create file %s", filename)
return
}
_, err = fd.WriteString(text)
if (err != nil) {
lprint("Could not write string to file %s", filename)
return
}
fd.Close()
}
//
// bold
//
// Make input string boldface.
//
func bold(str string) string {
return("\033[1m" + str + "\033[0m")
}
//
// green
//
// Make input string green.
//
func green(str string) string {
return("\033[92m" + bold(str) + "\033[0m")
}
//
// red
//
// Make input string red.
//
func red(str string) string {
return("\033[91m" + bold(str) + "\033[0m")
}
//
// lisp_log_packet
//
// Log a received data packet either native or LISP encapsulated. This function
// should be called only when lisp_data_plane_logging is true.
//
func lisp_log_packet(prefix_string string, packet []byte, is_lisp bool) {
var num int
var udp, lisp []byte
ip := true
if (packet[0] == 0x45) {
num = 20
} else if (packet[0] == 0x60) {
num = 40
} else {
num = 8
if (packet[8] == 0x45) { num += 20 }
if (packet[8] == 0x60) { num += 40 }
ip = false
}
udp = packet[num:num+8]
lisp = packet[num+8:num+16]
packet_string := fmt.Sprintf("%s: ", prefix_string)
p := packet
for i := 0; i < num; i += 4 {
packet_string += fmt.Sprintf("%02x%02x%02x%02x ", p[i], p[i+1],
p[i+2], p[i+3])
}
//
// Return for invalid packet.
//
if (ip == false) {
dprint(packet_string)
return
}
if (!is_lisp) {
dprint(packet_string)
return
}
packet_string += fmt.Sprintf("UDP: ")
for i := 0; i < 8; i += 4 {
packet_string += fmt.Sprintf("%02x%02x%02x%02x ", udp[i], udp[i+1],
udp[i+2], udp[i+3])
}
packet_string += fmt.Sprintf("LISP: ")
for i := 0; i < 8; i += 4 {
packet_string += fmt.Sprintf("%02x%02x%02x%02x ", lisp[i], lisp[i+1],
lisp[i+2], lisp[i+3])
}
dprint(packet_string)
}
//
// lisp_get_local_address
//
// Given supplied interface, return locaal IPv4 and IPv6 addresses.
//
func lisp_get_local_address(device string) (string, string) {
var ipv4 string = ""
var ipv6 string = ""
intf, _ := net.InterfaceByName(device)
addrs, _ := intf.Addrs()
for _, a := range addrs {
addr := strings.Split(a.String(), "/")[0]
if (addr == "::1") { continue }
if (strings.Contains(addr, "fe80")) { continue }
if (strings.Contains(addr, "127.0.0.1")) { continue }
if (strings.Contains(addr, ":")) { ipv6 = addr }
if (strings.Count(addr, ".") == 3) { ipv4 = addr }
}
return ipv4, ipv6
}
//
// lisp_setup_keys
//
// Store crypto and hash data structures so they are ready for encryption and
// ICV checking.
//
func (r *Lisp_keys) lisp_setup_keys(crypto_key string, icv_key string) {
r.crypto_key = crypto_key
r.icv_key = icv_key
//
// Allocate an IV used for encryption during encapsulation. AES-GCM wants
// a 12-byte IV/nonce.
//
r.iv = make([]byte, 12)
binary.BigEndian.PutUint32(r.iv[0:4], rand.Uint32())
binary.BigEndian.PutUint64(r.iv[4:12], rand.Uint64())
ekey, err := hex.DecodeString(crypto_key)
if (err != nil) {
lprint("hex.DecodeString() failed for crypto-key, err %s", err)
return
}
block, err := aes.NewCipher(ekey)
if (err != nil) {
lprint("aes.NewCipher() failed, err %s", err)
return
}
r.crypto_alg, err = cipher.NewGCM(block)
if (err != nil) {
lprint("cipher.NewGCM() failed, err %s", err)
return
}
ikey, err := hex.DecodeString(icv_key)
if (err != nil) {
lprint("hex.DecodeString() failed for icv-key, err %s", err)
return
}
r.hash_alg = hmac.New(sha256.New, ikey)
lprint("Setup new keys")
return
}
//-----------------------------------------------------------------------------
| {
return(false)
} | conditional_block |
lisp.go | // ---------------------------------------------------------------------------
//
// Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// ---------------------------------------------------------------------------
//
// lisp.go
//
// This file contains function and type definitions used by xtr.go and ipc.go.
//
// This is a external data-plane from the lispers.net control-plane perspective
// and must be run with "lisp xtr-parameters" sub-command "ipc-data-plane =
// yes".
//
// ---------------------------------------------------------------------------
package main
import "fmt"
import "bufio"
import "os"
import "os/exec"
import "strings"
import "strconv"
import "time"
import "net"
import "hash"
import "math/rand"
import "encoding/binary"
import "crypto/aes"
import "crypto/cipher"
import "crypto/sha256"
import "crypto/hmac"
import "encoding/hex"
//
// ---------- Variable Definitions ----------
//
var lisp_debug_logging bool = true
var lisp_data_plane_logging bool = false
//
// ---------- Constants Definitions ----------
//
const LISP_DATA_PORT = 4341
const LISP_CTRL_PORT = 4342
const LISP_L2_DATA_PORT = 8472
const LISP_VXLAN_DATA_PORT = 4789
const LISP_VXLAN_GPE_PORT = 4790
//
// ---------- Type Definitions ----------
//
type Lisp_address struct {
instance_id int
mask_len int
address net.IP
mask_address net.IPMask
address_string string
}
//
// lisp_print_address
//
// Return string with address. And optionally prepend "[<iid>]"
//
func (a *Lisp_address) lisp_print_address(with_iid bool) string {
if (a.address_string == "") { a.address_string = a.address.String() }
if (with_iid) {
iid := a.instance_id
if (iid == 0xffffff) { iid = -1 }
return(fmt.Sprintf("[%d]%s", iid, a.address_string))
}
return(a.address_string)
}
//
// lisp_store_address
//
// Store and instance-ID and string representation of an IPv4 or IPv6 address
// and store in Lisp_address format.
//
func (a *Lisp_address) lisp_store_address(iid int, addr string) bool {
var address string
//
// Is this address string an address or a prefix?
//
if (strings.Contains(addr, "/")) {
split := strings.Split(addr, "/")
address = split[0]
a.mask_len, _ = strconv.Atoi(split[1])
} else {
address = addr
a.mask_len = -1
}
a.instance_id = iid
//
// Parse address string. ParseIP() will put IPv4 addresses in a 16-byte
// array. We don't want that because address []byte length will determine
// address family.
//
a.address = net.ParseIP(address)
if (strings.Contains(addr, ".")) {
a.address = a.address[12:16]
}
//
// Set mask-length and mask address.
//
if (a.mask_len == -1) {
a.mask_len = len(a.address) * 8
}
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
//
// Store string for printing.
//
a.address_string = addr
return(true)
}
//
// lisp_is_ipv4
//
// Return true if Lisp_address is IPv4.
//
func (a *Lisp_address) lisp_is_ipv4() bool {
return((len(a.address) == 4))
}
//
// lisp_is_ipv6
//
// Return true if Lisp_address is IPv6.
//
func (a *Lisp_address) lisp_is_ipv6() bool {
return((len(a.address) == 16))
}
//
// lisp_is_multicast
//
// Return true if Lisp_address is an IPv4 or IPv6 multicast group address.
//
func (a *Lisp_address) lisp_is_multicast() bool {
if (a.lisp_is_ipv4()) {
return(int(a.address[0]) >= 224 && int(a.address[0]) < 240)
}
if (a.lisp_is_ipv6()) {
return(a.address[0] == 0xff)
}
return(false)
}
//
// lisp_make_address
//
// Store and instance-ID and byte representation of an IPv4 or IPv6 address
// and store in Lisp_address format. Note that Lisp_address.address_string
// is created when it is needed (in Lisp_address.lisp_print_address()).
//
func (a *Lisp_address) lisp_make_address(iid int, addr []byte) {
a.instance_id = iid
a.address = addr
a.mask_len = len(a.address) * 8
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
}
//
// lisp_exact_match
//
// Compare two addresses and return true if they match.
//
func (a *Lisp_address) lisp_exact_match(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.mask_len != addr.mask_len) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.address.Equal(addr.address) == false) {
return(false)
}
return(true)
}
//
// lisp_more_specific
//
// Return true if the supplied address is more specific than the method
// address. If the mask-lengths are the same, a true is returned.
//
func (a *Lisp_address) lisp_more_specific(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.mask_len > addr.mask_len) {
return(false)
}
for i := 0; i < len(a.address); i++ {
if (a.mask_address[i] == 0) {
break
}
if ((a.address[i] & a.mask_address[i]) !=
(addr.address[i] & a.mask_address[i])) {
return(false)
}
}
return(true)
}
//
// lisp_hash_address
//
// Hash address to aid in selecting a source UDP port.
//
func (a *Lisp_address) lisp_hash_address() uint16 {
var hash uint = 0
for i := 0; i < len(a.address); i++ {
hash = hash ^ uint(a.address[i])
}
//
// Fold result into a short.
//
return(uint16(hash >> 16) ^ uint16(hash & 0xffff))
}
type Lisp_database struct {
eid_prefix Lisp_address
}
type Lisp_interface struct {
instance_id int
}
type Lisp_map_cache struct {
next_mc *Lisp_map_cache
eid_prefix Lisp_address
rloc_set []Lisp_rloc
rle_set []Lisp_rloc
}
type Lisp_rloc struct {
rloc Lisp_address
encap_port int
stats Lisp_stats
keys [4]*Lisp_keys
use_key_id int
}
type Lisp_keys struct {
crypto_key string
icv_key string
iv []byte
crypto_alg cipher.AEAD
hash_alg hash.Hash
}
type Lisp_stats struct {
packets uint64
bytes uint64
last_packet time.Time
}
//
// lisp_count
//
// Increment stats counters. Either do it for an RLOC/RLE entry or for the
// lisp_decap_stats map. Argument 'key-name' needs to be set if stats is nil.
//
func lisp_count(stats *Lisp_stats, key_name string, packet []byte) {
if (stats == nil) {
s, ok := lisp_decap_stats[key_name]
if (!ok) {
s = new(Lisp_stats)
lisp_decap_stats[key_name] = s
}
s.packets += 1
s.bytes += uint64(len(packet))
s.last_packet = time.Now()
} else {
stats.packets += 1
stats.bytes += uint64(len(packet))
stats.last_packet = time.Now()
}
}
//
// lisp_find_rloc
//
// Find RLOC entry in map-cache entry based on supplied RLOC address.
//
func (mc *Lisp_map_cache) lisp_find_rloc(rloc_addr Lisp_address) (*Lisp_rloc) {
for _, rloc := range mc.rloc_set {
if (rloc_addr.lisp_exact_match(rloc.rloc)) { return(&rloc) }
}
return(nil)
}
//
// lprint
//
// Print control-plane debug logging output when configured.
//
func lprint(format string, args ...interface{}) {
if (!lisp_debug_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// dprint
//
// Print data-plane debug logging output when configured.
//
func dprint(format string, args ...interface{}) {
if (!lisp_data_plane_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// debug
//
// For temporary debug output that highlights line in boldface red.
//
func debug(format string, args ...interface{}) {
f := red(">>>") + format + red("<<<") + "\n"
fmt.Printf(f, args...)
}
//
// debugv
//
// For temporary debug output that shows the contents of a data structure.
// Very useful for debugging.
//
func debugv(args interface{}) {
debug("%#v", args)
}
//
// lisp_command_output
//
// Execute a system command and return a string with output.
//
func | (command string) string {
cmd := exec.Command(command)
out, err := cmd.CombinedOutput()
if (err != nil) {
return("")
}
output := string(out)
return(output[0:len(output)-1])
}
//
// lisp_read_file
//
// Read entire file into a string.
//
func lisp_read_file(filename string) string {
fd, err := os.Open(filename)
if (err != nil) {
return("")
}
scanner := bufio.NewScanner(fd)
scanner.Scan()
fd.Close()
return(scanner.Text())
}
//
// lisp_write_file
//
// Write supplied string to supplied file.
//
func lisp_write_file(filename string, text string) {
fd, err := os.Create(filename)
if (err != nil) {
lprint("Could not create file %s", filename)
return
}
_, err = fd.WriteString(text)
if (err != nil) {
lprint("Could not write string to file %s", filename)
return
}
fd.Close()
}
//
// bold
//
// Make input string boldface.
//
func bold(str string) string {
return("\033[1m" + str + "\033[0m")
}
//
// green
//
// Make input string green.
//
func green(str string) string {
return("\033[92m" + bold(str) + "\033[0m")
}
//
// red
//
// Make input string red.
//
func red(str string) string {
return("\033[91m" + bold(str) + "\033[0m")
}
//
// lisp_log_packet
//
// Log a received data packet either native or LISP encapsulated. This function
// should be called only when lisp_data_plane_logging is true.
//
func lisp_log_packet(prefix_string string, packet []byte, is_lisp bool) {
var num int
var udp, lisp []byte
ip := true
if (packet[0] == 0x45) {
num = 20
} else if (packet[0] == 0x60) {
num = 40
} else {
num = 8
if (packet[8] == 0x45) { num += 20 }
if (packet[8] == 0x60) { num += 40 }
ip = false
}
udp = packet[num:num+8]
lisp = packet[num+8:num+16]
packet_string := fmt.Sprintf("%s: ", prefix_string)
p := packet
for i := 0; i < num; i += 4 {
packet_string += fmt.Sprintf("%02x%02x%02x%02x ", p[i], p[i+1],
p[i+2], p[i+3])
}
//
// Return for invalid packet.
//
if (ip == false) {
dprint(packet_string)
return
}
if (!is_lisp) {
dprint(packet_string)
return
}
packet_string += fmt.Sprintf("UDP: ")
for i := 0; i < 8; i += 4 {
packet_string += fmt.Sprintf("%02x%02x%02x%02x ", udp[i], udp[i+1],
udp[i+2], udp[i+3])
}
packet_string += fmt.Sprintf("LISP: ")
for i := 0; i < 8; i += 4 {
packet_string += fmt.Sprintf("%02x%02x%02x%02x ", lisp[i], lisp[i+1],
lisp[i+2], lisp[i+3])
}
dprint(packet_string)
}
//
// lisp_get_local_address
//
// Given supplied interface, return locaal IPv4 and IPv6 addresses.
//
func lisp_get_local_address(device string) (string, string) {
var ipv4 string = ""
var ipv6 string = ""
intf, _ := net.InterfaceByName(device)
addrs, _ := intf.Addrs()
for _, a := range addrs {
addr := strings.Split(a.String(), "/")[0]
if (addr == "::1") { continue }
if (strings.Contains(addr, "fe80")) { continue }
if (strings.Contains(addr, "127.0.0.1")) { continue }
if (strings.Contains(addr, ":")) { ipv6 = addr }
if (strings.Count(addr, ".") == 3) { ipv4 = addr }
}
return ipv4, ipv6
}
//
// lisp_setup_keys
//
// Store crypto and hash data structures so they are ready for encryption and
// ICV checking.
//
func (r *Lisp_keys) lisp_setup_keys(crypto_key string, icv_key string) {
r.crypto_key = crypto_key
r.icv_key = icv_key
//
// Allocate an IV used for encryption during encapsulation. AES-GCM wants
// a 12-byte IV/nonce.
//
r.iv = make([]byte, 12)
binary.BigEndian.PutUint32(r.iv[0:4], rand.Uint32())
binary.BigEndian.PutUint64(r.iv[4:12], rand.Uint64())
ekey, err := hex.DecodeString(crypto_key)
if (err != nil) {
lprint("hex.DecodeString() failed for crypto-key, err %s", err)
return
}
block, err := aes.NewCipher(ekey)
if (err != nil) {
lprint("aes.NewCipher() failed, err %s", err)
return
}
r.crypto_alg, err = cipher.NewGCM(block)
if (err != nil) {
lprint("cipher.NewGCM() failed, err %s", err)
return
}
ikey, err := hex.DecodeString(icv_key)
if (err != nil) {
lprint("hex.DecodeString() failed for icv-key, err %s", err)
return
}
r.hash_alg = hmac.New(sha256.New, ikey)
lprint("Setup new keys")
return
}
//-----------------------------------------------------------------------------
| lisp_command_output | identifier_name |
lisp.go | // ---------------------------------------------------------------------------
//
// Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// ---------------------------------------------------------------------------
//
// lisp.go
//
// This file contains function and type definitions used by xtr.go and ipc.go.
//
// This is a external data-plane from the lispers.net control-plane perspective
// and must be run with "lisp xtr-parameters" sub-command "ipc-data-plane =
// yes".
//
// ---------------------------------------------------------------------------
package main
import "fmt"
import "bufio"
import "os"
import "os/exec"
import "strings"
import "strconv"
import "time"
import "net"
import "hash"
import "math/rand"
import "encoding/binary"
import "crypto/aes"
import "crypto/cipher"
import "crypto/sha256"
import "crypto/hmac"
import "encoding/hex"
//
// ---------- Variable Definitions ----------
//
var lisp_debug_logging bool = true
var lisp_data_plane_logging bool = false
//
// ---------- Constants Definitions ----------
//
const LISP_DATA_PORT = 4341
const LISP_CTRL_PORT = 4342
const LISP_L2_DATA_PORT = 8472
const LISP_VXLAN_DATA_PORT = 4789
const LISP_VXLAN_GPE_PORT = 4790
//
// ---------- Type Definitions ----------
//
type Lisp_address struct {
instance_id int
mask_len int
address net.IP
mask_address net.IPMask
address_string string
}
//
// lisp_print_address
//
// Return string with address. And optionally prepend "[<iid>]"
//
func (a *Lisp_address) lisp_print_address(with_iid bool) string {
if (a.address_string == "") { a.address_string = a.address.String() }
if (with_iid) {
iid := a.instance_id
if (iid == 0xffffff) { iid = -1 }
return(fmt.Sprintf("[%d]%s", iid, a.address_string))
}
return(a.address_string)
}
//
// lisp_store_address
//
// Store and instance-ID and string representation of an IPv4 or IPv6 address
// and store in Lisp_address format.
//
func (a *Lisp_address) lisp_store_address(iid int, addr string) bool {
var address string
//
// Is this address string an address or a prefix?
//
if (strings.Contains(addr, "/")) {
split := strings.Split(addr, "/")
address = split[0]
a.mask_len, _ = strconv.Atoi(split[1])
} else {
address = addr
a.mask_len = -1
}
a.instance_id = iid
//
// Parse address string. ParseIP() will put IPv4 addresses in a 16-byte
// array. We don't want that because address []byte length will determine
// address family.
//
a.address = net.ParseIP(address)
if (strings.Contains(addr, ".")) {
a.address = a.address[12:16]
}
//
// Set mask-length and mask address.
//
if (a.mask_len == -1) {
a.mask_len = len(a.address) * 8
}
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
//
// Store string for printing.
//
a.address_string = addr
return(true)
}
//
// lisp_is_ipv4
//
// Return true if Lisp_address is IPv4.
//
func (a *Lisp_address) lisp_is_ipv4() bool |
//
// lisp_is_ipv6
//
// Return true if Lisp_address is IPv6.
//
func (a *Lisp_address) lisp_is_ipv6() bool {
return((len(a.address) == 16))
}
//
// lisp_is_multicast
//
// Return true if Lisp_address is an IPv4 or IPv6 multicast group address.
//
func (a *Lisp_address) lisp_is_multicast() bool {
if (a.lisp_is_ipv4()) {
return(int(a.address[0]) >= 224 && int(a.address[0]) < 240)
}
if (a.lisp_is_ipv6()) {
return(a.address[0] == 0xff)
}
return(false)
}
//
// lisp_make_address
//
// Store and instance-ID and byte representation of an IPv4 or IPv6 address
// and store in Lisp_address format. Note that Lisp_address.address_string
// is created when it is needed (in Lisp_address.lisp_print_address()).
//
func (a *Lisp_address) lisp_make_address(iid int, addr []byte) {
a.instance_id = iid
a.address = addr
a.mask_len = len(a.address) * 8
a.mask_address = net.CIDRMask(a.mask_len, len(a.address) * 8)
}
//
// lisp_exact_match
//
// Compare two addresses and return true if they match.
//
func (a *Lisp_address) lisp_exact_match(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.mask_len != addr.mask_len) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.address.Equal(addr.address) == false) {
return(false)
}
return(true)
}
//
// lisp_more_specific
//
// Return true if the supplied address is more specific than the method
// address. If the mask-lengths are the same, a true is returned.
//
func (a *Lisp_address) lisp_more_specific(addr Lisp_address) (bool) {
if (len(a.address) != len(addr.address)) {
return(false)
}
if (a.instance_id != addr.instance_id) {
return(false)
}
if (a.mask_len > addr.mask_len) {
return(false)
}
for i := 0; i < len(a.address); i++ {
if (a.mask_address[i] == 0) {
break
}
if ((a.address[i] & a.mask_address[i]) !=
(addr.address[i] & a.mask_address[i])) {
return(false)
}
}
return(true)
}
//
// lisp_hash_address
//
// Hash address to aid in selecting a source UDP port.
//
func (a *Lisp_address) lisp_hash_address() uint16 {
var hash uint = 0
for i := 0; i < len(a.address); i++ {
hash = hash ^ uint(a.address[i])
}
//
// Fold result into a short.
//
return(uint16(hash >> 16) ^ uint16(hash & 0xffff))
}
type Lisp_database struct {
eid_prefix Lisp_address
}
type Lisp_interface struct {
instance_id int
}
type Lisp_map_cache struct {
next_mc *Lisp_map_cache
eid_prefix Lisp_address
rloc_set []Lisp_rloc
rle_set []Lisp_rloc
}
type Lisp_rloc struct {
rloc Lisp_address
encap_port int
stats Lisp_stats
keys [4]*Lisp_keys
use_key_id int
}
type Lisp_keys struct {
crypto_key string
icv_key string
iv []byte
crypto_alg cipher.AEAD
hash_alg hash.Hash
}
type Lisp_stats struct {
packets uint64
bytes uint64
last_packet time.Time
}
//
// lisp_count
//
// Increment stats counters. Either do it for an RLOC/RLE entry or for the
// lisp_decap_stats map. Argument 'key-name' needs to be set if stats is nil.
//
func lisp_count(stats *Lisp_stats, key_name string, packet []byte) {
if (stats == nil) {
s, ok := lisp_decap_stats[key_name]
if (!ok) {
s = new(Lisp_stats)
lisp_decap_stats[key_name] = s
}
s.packets += 1
s.bytes += uint64(len(packet))
s.last_packet = time.Now()
} else {
stats.packets += 1
stats.bytes += uint64(len(packet))
stats.last_packet = time.Now()
}
}
//
// lisp_find_rloc
//
// Find RLOC entry in map-cache entry based on supplied RLOC address.
//
func (mc *Lisp_map_cache) lisp_find_rloc(rloc_addr Lisp_address) (*Lisp_rloc) {
for _, rloc := range mc.rloc_set {
if (rloc_addr.lisp_exact_match(rloc.rloc)) { return(&rloc) }
}
return(nil)
}
//
// lprint
//
// Print control-plane debug logging output when configured.
//
func lprint(format string, args ...interface{}) {
if (!lisp_debug_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// dprint
//
// Print data-plane debug logging output when configured.
//
func dprint(format string, args ...interface{}) {
if (!lisp_data_plane_logging) {
return
}
ts := time.Now()
ms := ts.Nanosecond() / 1000000
ds := fmt.Sprintf("%02d/%02d/%02d %02d:%02d:%02d.%03d", ts.Month(),
ts.Day(), ts.Year(), ts.Hour(), ts.Minute(), ts.Second(), ms)
f := ds + ": xtr: " + format + "\n"
fmt.Printf(f, args...)
}
//
// debug
//
// For temporary debug output that highlights line in boldface red.
//
func debug(format string, args ...interface{}) {
f := red(">>>") + format + red("<<<") + "\n"
fmt.Printf(f, args...)
}
//
// debugv
//
// For temporary debug output that shows the contents of a data structure.
// Very useful for debugging.
//
func debugv(args interface{}) {
debug("%#v", args)
}
//
// lisp_command_output
//
// Execute a system command and return a string with output.
//
func lisp_command_output(command string) string {
cmd := exec.Command(command)
out, err := cmd.CombinedOutput()
if (err != nil) {
return("")
}
output := string(out)
return(output[0:len(output)-1])
}
//
// lisp_read_file
//
// Read entire file into a string.
//
func lisp_read_file(filename string) string {
fd, err := os.Open(filename)
if (err != nil) {
return("")
}
scanner := bufio.NewScanner(fd)
scanner.Scan()
fd.Close()
return(scanner.Text())
}
//
// lisp_write_file
//
// Write supplied string to supplied file.
//
func lisp_write_file(filename string, text string) {
fd, err := os.Create(filename)
if (err != nil) {
lprint("Could not create file %s", filename)
return
}
_, err = fd.WriteString(text)
if (err != nil) {
lprint("Could not write string to file %s", filename)
return
}
fd.Close()
}
//
// bold
//
// Make input string boldface.
//
func bold(str string) string {
return("\033[1m" + str + "\033[0m")
}
//
// green
//
// Make input string green.
//
func green(str string) string {
return("\033[92m" + bold(str) + "\033[0m")
}
//
// red
//
// Make input string red.
//
func red(str string) string {
return("\033[91m" + bold(str) + "\033[0m")
}
//
// lisp_log_packet
//
// Log a received data packet either native or LISP encapsulated. This function
// should be called only when lisp_data_plane_logging is true.
//
func lisp_log_packet(prefix_string string, packet []byte, is_lisp bool) {
var num int
var udp, lisp []byte
ip := true
if (packet[0] == 0x45) {
num = 20
} else if (packet[0] == 0x60) {
num = 40
} else {
num = 8
if (packet[8] == 0x45) { num += 20 }
if (packet[8] == 0x60) { num += 40 }
ip = false
}
udp = packet[num:num+8]
lisp = packet[num+8:num+16]
packet_string := fmt.Sprintf("%s: ", prefix_string)
p := packet
for i := 0; i < num; i += 4 {
packet_string += fmt.Sprintf("%02x%02x%02x%02x ", p[i], p[i+1],
p[i+2], p[i+3])
}
//
// Return for invalid packet.
//
if (ip == false) {
dprint(packet_string)
return
}
if (!is_lisp) {
dprint(packet_string)
return
}
packet_string += fmt.Sprintf("UDP: ")
for i := 0; i < 8; i += 4 {
packet_string += fmt.Sprintf("%02x%02x%02x%02x ", udp[i], udp[i+1],
udp[i+2], udp[i+3])
}
packet_string += fmt.Sprintf("LISP: ")
for i := 0; i < 8; i += 4 {
packet_string += fmt.Sprintf("%02x%02x%02x%02x ", lisp[i], lisp[i+1],
lisp[i+2], lisp[i+3])
}
dprint(packet_string)
}
//
// lisp_get_local_address
//
// Given supplied interface, return locaal IPv4 and IPv6 addresses.
//
func lisp_get_local_address(device string) (string, string) {
var ipv4 string = ""
var ipv6 string = ""
intf, _ := net.InterfaceByName(device)
addrs, _ := intf.Addrs()
for _, a := range addrs {
addr := strings.Split(a.String(), "/")[0]
if (addr == "::1") { continue }
if (strings.Contains(addr, "fe80")) { continue }
if (strings.Contains(addr, "127.0.0.1")) { continue }
if (strings.Contains(addr, ":")) { ipv6 = addr }
if (strings.Count(addr, ".") == 3) { ipv4 = addr }
}
return ipv4, ipv6
}
//
// lisp_setup_keys
//
// Store crypto and hash data structures so they are ready for encryption and
// ICV checking.
//
func (r *Lisp_keys) lisp_setup_keys(crypto_key string, icv_key string) {
r.crypto_key = crypto_key
r.icv_key = icv_key
//
// Allocate an IV used for encryption during encapsulation. AES-GCM wants
// a 12-byte IV/nonce.
//
r.iv = make([]byte, 12)
binary.BigEndian.PutUint32(r.iv[0:4], rand.Uint32())
binary.BigEndian.PutUint64(r.iv[4:12], rand.Uint64())
ekey, err := hex.DecodeString(crypto_key)
if (err != nil) {
lprint("hex.DecodeString() failed for crypto-key, err %s", err)
return
}
block, err := aes.NewCipher(ekey)
if (err != nil) {
lprint("aes.NewCipher() failed, err %s", err)
return
}
r.crypto_alg, err = cipher.NewGCM(block)
if (err != nil) {
lprint("cipher.NewGCM() failed, err %s", err)
return
}
ikey, err := hex.DecodeString(icv_key)
if (err != nil) {
lprint("hex.DecodeString() failed for icv-key, err %s", err)
return
}
r.hash_alg = hmac.New(sha256.New, ikey)
lprint("Setup new keys")
return
}
//-----------------------------------------------------------------------------
| {
return((len(a.address) == 4))
} | identifier_body |
FastSlamV2.py | import rospy
import tf
from nav_msgs.msg import Odometry
from aruco_msgs.msg import MarkerArray
import numpy as np
import matplotlib.pyplot as plt
import time
#Ponto de partida (0,0) - Porta da sala
#Marker de id 0 tem coordenadas (x_map0,y_map0) em metros
x_map=[0.0]*41;
y_map=[0.0]*41;
x_map[0]=0.097+0.385;
y_map[0]=0.945;
x_map[1]=x_map[0]+0.77;
y_map[1]=y_map[0];
x_map[2]=x_map[1]+0.70;
y_map[2]=y_map[0];
x_map[3]=x_map[2]+2.495;
y_map[3]=y_map[0];
x_map[4]=x_map[3]+3.00;
y_map[4]=y_map[0];
x_map[5]=x_map[4]+3.00;
y_map[5]=y_map[0];
x_map[6]=x_map[5]+2.74;
y_map[6]=y_map[0];
x_map[7]=x_map[6]+2.643;
y_map[7]=y_map[0]-0.20;
x_map[8]=x_map[7];
x_map[9]=x_map[7];
x_map[10]=x_map[7];
x_map[11]=x_map[7];
y_map[8]=y_map[7]-1.205;
y_map[9]=y_map[8]-1.17;
y_map[10]=y_map[8]-4.84;
y_map[11]=y_map[10]-6.93;
y_map[12]=y_map[11]-2.55;
x_map[12]=x_map[11]-0.27;
y_map[13]=y_map[14]=y_map[15]=y_map[16]=y_map[17]=y_map[12];
x_map[13]=x_map[12]-0.60;
x_map[14]=x_map[13]-0.95;
x_map[15]=x_map[14]-4.095;
x_map[16]=x_map[15]-3.005;
x_map[17]=x_map[16]-4.20;
x_map[18]=x_map[19]=x_map[23]=x_map[30]=x_map[20]=x_map[21]=0.097;
x_map[22]=0;
y_map[22]=y_map[18]+0.67;
y_map[18]=y_map[17]+0.22;
y_map[19]=y_map[18]+0.67+0.615;
y_map[23]=y_map[19]+0.55;
y_map[30]=y_map[23]+0.70;
y_map[20]=y_map[23]+3.93;
y_map[21]=y_map[20]+5.895;
x_map[24]=1.67+0.097;
y_map[24]=-0.84;
x_map[25]=x_map[4];
y_map[25]=y_map[4]-1.575;
x_map[26]=x_map[9]-1.235;
y_map[26]=y_map[9];
x_map[28]=x_map[26]+0.10;
y_map[28]=y_map[26]-2.00;
x_map[27]=x_map[26];
y_map[27]=y_map[26]-2.00-4.21;
x_map[29]=x_map[14]-1.45;
y_map[29]=y_map[14]+0.20;
# Markers da zona dos elevadores
x_map[31]=x_map[21]+1.79;
y_map[31]=y_map[21]+0.095;
x_map[33]=x_map[31]+2.15;
y_map[33]=y_map[31]+0.10;
y_map[37]=y_map[31];
x_map[37]=x_map[33]+1.00;
x_map[40]=x_map[37]+1.00;
y_map[40]=y_map[37]-1.10;
x_map[39]=x_map[37]+2.65;
y_map[39]=y_map[37]+0.35;
x_map[38]=x_map[39];
y_map[38]=y_map[39]-2.66;
y_map[36]=y_map[34]=y_map[35]=y_map[38];
x_map[36]=x_map[38]-1.46;
x_map[34]=x_map[36]-1.96;
x_map[35]=x_map[34]-1.46;
y_map[32]=y_map[35]+0.28;
x_map[32]=x_map[35]-0.68-0.28;
camara_distance_z = 0.12 # 15.5 cm <-> 13 cm #dia 13/12/2018 <-> 12 cm => 12.5 cm inicio a 81 cm
camara_distance_x = 0.011 # 1.1 cm
# Constants
NUMBER_MARKERS = 41
KEY_NUMBER = 2**(5*5) # number of total combinations possible in aruco code
number_of_dimensions = 2
Frequency = 9.5
NUMBER_PARTICLES = 100
translation_noise = 0.1
rotation_noise = 0.1
noise_factor = 1
minimum_move = 0
Sensor_noise = 0.1
Odom_noise = 0.1
validity_threshold = 50
circle = np.arange(0, 2*np.pi, 0.1)
o_size = 0.3
line = np.arange(0, o_size, o_size)
fig, ax = plt.subplots()
robot_line, = ax.plot([0], [0], color='black', marker='o', markersize=12)
robot_orientation, = ax.plot(line, line, color='lime', marker='.', markersize=2, linewidth=2)
marker_line, = ax.plot(circle, circle, color='red', marker='.', markersize=8, linestyle="")
robot_path, = ax.plot([0], [0], color='black', marker='.', markersize=2, linewidth=0.2)
path_map, = plt.plot(x_map, y_map, color='grey', marker='*', markersize=8, linestyle="")
x_f = [circle]*NUMBER_MARKERS
y_f = [circle]*NUMBER_MARKERS
plt.ion()
plt.xlim(-10, 20)
plt.ylim(-20, 10)
plt.xlabel('X', fontsize=10) # X axis label
plt.ylabel('Y', fontsize=10) # Y axis label
plt.title('FastSlam 2.0')
#plt.legend()
plt.grid(True) # Enabling gridding
def drawing_plot(particles):
Max = 0
Max_id = 0
for i in range(NUMBER_PARTICLES):
if particles[i].get_weight() > Max:
Max = particles[i].get_weight()
Max_id = i
pose = particles[Max_id].get_position()
x = pose[0]
y = pose[1]
o = pose[2]
x_o = x + o_size*np.cos(o)
y_o = y + o_size*np.sin(o)
x_path, y_path = particles[Max_id].get_path()
plt.show(block=False)
robot_path.set_xdata(x_path)
robot_path.set_ydata(y_path)
ax.draw_artist(ax.patch)
ax.draw_artist(robot_path)
robot_line.set_xdata(x)
robot_line.set_ydata(y)
ax.draw_artist(ax.patch)
ax.draw_artist(robot_line)
robot_orientation.set_xdata([x, x_o])
robot_orientation.set_ydata([y, y_o])
ax.draw_artist(ax.patch)
ax.draw_artist(robot_orientation)
Landmarkers = particles[Max_id].get_landmarkers()
i = 0
for marker in Landmarkers:
if marker == None:
x_f[i] = KEY_NUMBER + circle
y_f[i] = KEY_NUMBER + circle
i += 1
continue
pose_m = marker.get_marker_position()
x_m = pose_m[0]
y_m = pose_m[1]
std_m = marker.get_marker_covariance()
x_std_m = std_m[0][0]
y_std_m = std_m[1][1]
x_f[i] = x_m + x_std_m * np.cos(circle)
y_f[i] = y_m + y_std_m * np.sin(circle)
i += 1
marker_line.set_xdata(x_f)
marker_line.set_ydata(y_f)
ax.draw_artist(ax.patch)
ax.draw_artist(marker_line)
fig.canvas.flush_events()
def resample_particles(particles, updated_marker):
# Returns a new set of particles obtained by performing stochastic universal sampling, according to the particle weights.
# distance between pointers
step = 1.0/NUMBER_PARTICLES
# random start of first pointer
r = np.random.uniform(0,step)
# where we are along the weights
c = particles[0].get_weight()
# index of weight container and corresponding particle
i = 0
index = 0
new_particles = []
#loop over all particle weights
for particle in particles:
#go through the weights until you find the particle
u = r + index*step
while u > c:
i = i + 1
c = c + particles[i].get_weight()
#add that particle
if i == index:
new_particle = particles[i]
new_particle.set_weight(step)
else:
new_particle = particles[i].copy(updated_marker)
#new_particle = copy.deepcopy(particles[i])
#new_particle.set_weight(step)
new_particles.append(new_particle)
#increase the threshold
index += 1
del particles
return new_particles
class Particle():
#each particle has a pose(x,y,o), a weight(w) and a series of kalman filters for every landmark
#in the beggining all particles are in the origin frame of the world (0,0,0)
def __init__(self):
self.X_robot = np.array([0, 0, 0], dtype='float64').transpose()
self.weight = 1.0/NUMBER_PARTICLES
self.Landmarkers = [None]*NUMBER_MARKERS
self.x_path = np.array([0], dtype='float64')
self.y_path = np.array([0], dtype='float64')
def get_kalman_filters(self, marker_id, Z):
if self.Landmarkers[marker_id] == None:
self.Landmarkers[marker_id] = KalmanFilter()
return self.Landmarkers[marker_id]
def particle_prediction(self, motion_model):
#if the robot moves we just add the motion model to the previous pose to predict the particle position
x = 0
y = 1
o = 2
noise = np.array([np.random.normal(0,translation_noise), np.random.normal(0,translation_noise), np.random.normal(0,rotation_noise)], dtype='float64').transpose()
noise = noise*motion_model*noise_factor
self.X_robot = self.X_robot + motion_model + noise
while self.X_robot[o] > np.pi:
self.X_robot[o] = self.X_robot[o] - 2*np.pi
while self.X_robot[o] < -np.pi:
self.X_robot[o] = self.X_robot[o] + 2*np.pi
self.x_path = np.insert(self.x_path, 0, self.X_robot[x])
self.y_path = np.insert(self.y_path, 0, self.X_robot[y])
return self.X_robot
def update_weight(self, marker_id):
std = self.Landmarkers[marker_id].get_marker_covariance()
dev = self.Landmarkers[marker_id].get_marker_validity()
fact = np.sqrt(np.linalg.det(2* np.pi * std))
expo = - np.dot(dev.T, np.linalg.inv(std).dot(dev))/2
self.weight = self.weight / fact * np.exp(expo)
def get_weight(self):
return self.weight
def normalize_weight(self, total_weight):
self.weight = self.weight / total_weight
def set_weight(self, new_weight):
self.weight = new_weight
def get_position(self):
return self.X_robot
def get_landmarkers(self):
return self.Landmarkers
def get_path(self):
return self.x_path, self.y_path
def copy(self, updated_marker):
new_particle = Particle()
del new_particle.x_path
del new_particle.y_path
del new_particle.Landmarkers
del new_particle.X_robot
new_particle.x_path = np.copy(self.x_path)
new_particle.y_path = np.copy(self.y_path)
for i in range(len(self.Landmarkers)):
if self.Landmarkers[i] != None and updated_marker[i] == True:
self.Landmarkers[i] = self.Landmarkers[i].copy()
new_particle.Landmarkers = self.Landmarkers
new_particle.X_robot = np.copy(self.X_robot)
return new_particle
class KalmanFilter():
def __init__(self):
# X_ espected value of landmarks' position (x,y)
# X_robot (x, y, yaw)
# H gradient of markers' relative position to robot (h:(x_m,y_m) -> (distance, orientation); H = dh/dX_ and X_ = X(k+1|k))
# S covariance matrix markers' position
# Q covariance matrix markers' measurement
# V diference between measurement and estimated markers' position
self.first = True
self.R_t = np.identity(number_of_dimensions, dtype='float64')*Sensor_noise #sensor noise
self.P_t = np.identity(number_of_dimensions+1, dtype='float64')*Odom_noise #sensor noise
def compute_G(self, X_robot):
x = 0 # x position
y = 1 # y position
y = self.X_[y] - X_robot[y]
x = self.X_[x] - X_robot[x]
# compute H
denominator = x**2 + y**2
g_o11 = x / np.sqrt(denominator)
g_o12 = y / np.sqrt(denominator)
g_o21 = -y / denominator
g_o22 = x / denominator
self.G_o = np.array([[g_o11, g_o12], [g_o21, g_o22]])
g_s11 = -g_o11
g_s12 = -g_o12
g_s21 = -g_o21
g_s22 = -g_o22
self.G_s = np.array([[g_s21, g_s22, 0], [g_s21, g_s22, -1]])
def Apply_EKF(self, X_robot, Z):
x = 0 # x position
y = 1 # y position
o = 2 # o orientaio
d = 0 # distance measured
fi = 1 # orientaion of the measurement
if self.first == True:
# the angle is in the direction y to x, reverse of the usual x to y
angle = (X_robot[o] + Z[fi])
self.X_ = np.array([X_robot[x] + Z[d]*np.cos(angle), X_robot[y] + Z[d]*np.sin(angle)], dtype='float64').transpose() # first landmark position
self.compute_G(X_robot)
self.S = np.linalg.inv(self.G_o.dot(np.linalg.inv(self.R_t).dot(self.G_o.T)))
self.V = np.array([0, 0], dtype='float64').transpose()
self.L_t = np.identity(number_of_dimensions, dtype='float64')
else:
# Prediction
y = self.X_[y] - X_robot[y]
x = self.X_[x] - X_robot[x]
d = np.sqrt(x**2 + y**2) # distance
fi = np.arctan2(y, x) - X_robot[o] # direction
while fi > np.pi:
fi = fi - 2*np.pi
while fi < -np.pi:
fi = fi + 2*np.pi
Z_ = np.array([d, fi], dtype='float64').transpose()
self.compute_G(X_robot)
self.Q = self.G_o.dot(self.S.dot(self.G_o.T)) + self.R_t
# Observation
self.V = np.subtract(Z, Z_) # Z = [d, teta]
def Update(self):
# Update
if self.first == False:
# K kalman gain
K = self.S.dot(self.G_o.T.dot(np.linalg.inv(self.Q)))
self.X_ = self.X_ + K.dot(self.V)
self.L_t = self.G_s.dot(self.P_t.dot(self.G_s.T)) + self.G_o.dot(self.S.dot(self.G_o.T)) + self.R_t
self.S = (np.identity(number_of_dimensions)- K.dot(self.G_o)).dot(self.S)
else:
self.first = False
def get_marker_position(self):
return self.X_
def get_marker_covariance(self):
return self.L_t
def get_marker_validity(self):
return self.V
def measurement_validition(self):
return np.dot(self.V.T, np.linalg.inv(self.L_t)).dot(self.V)
def copy(self):
new_KF = KalmanFilter()
new_KF.X_ = np.copy(self.X_)
new_KF.S = np.copy(self.S)
new_KF.L_t = np.copy(self.L_t)
new_KF.V = np.copy(self.V)
new_KF.first = False
return new_KF
class markers():
def __init__(self):
self.could_it_read = False
self.z_distance_left_eye_to_robot_wheel = camara_distance_z
self.x_distance_left_eye_to_robot_wheel = camara_distance_x
self.markers_info = [None]*NUMBER_MARKERS
self.list_ids = np.ones(NUMBER_MARKERS, dtype='int32')*KEY_NUMBER
def callback_Markers(self, data):
# static tf could be applied here: z = z + z_distance_left_eye_to_robot_wheel, x = x + x_distance_left_eye_to_robot_wheel
for i in range(NUMBER_MARKERS):
try:
marker_info = data.markers.pop()
except:
break
self.list_ids[i] = marker_info.id
self.markers_info[marker_info.id] = marker_info
def get_measerment(self, index):
x = self.markers_info[index].pose.pose.position.x # right-left
z = self.markers_info[index].pose.pose.position.z # front-back
# position of the marker relative to base_link
z = z + self.z_distance_left_eye_to_robot_wheel
x = x + self.x_distance_left_eye_to_robot_wheel
marker_distance = np.sqrt(z**2+x**2)
marker_direction = np.arctan(x/z)
return np.array([marker_distance, -marker_direction], dtype='float64').transpose()
def get_list_ids(self):
return self.list_ids
def reset_list_ids(self):
i = 0
while self.list_ids[i] != KEY_NUMBER:
self.list_ids[i] = KEY_NUMBER
i += 1
def marker_info(self, index):
return self.markers_info[index]
class odom():
def __init__(self):
self.read_move = np.array([0, 0, 0], dtype='float64').transpose()
self.first_read = True
def callback_odom(self, data):
# robo_frame
frame_id = data.header.frame_id # odom
child_frame_id = data.child_frame_id # base_link
# pose
x = data.pose.pose.position.x # front-back
y = data.pose.pose.position.y # right-left
orientation_x = data.pose.pose.orientation.x
orientation_y = data.pose.pose.orientation.y
orientation_z = data.pose.pose.orientation.z
orientation_w = data.pose.pose.orientation.w
roll, pitch, yaw = tf.transformations.euler_from_quaternion((orientation_x, orientation_y, orientation_z, orientation_w))
if self.first_read == True:
self.last_position = np.array([x, y, yaw], dtype='float64').transpose()
self.total_movement = np.array([0, 0, 0], dtype='float64').transpose()
self.first_read = False
self.odom_position = np.array([x, y, yaw], dtype='float64').transpose()
self.movement = np.subtract(self.odom_position, self.last_position)
self.total_movement = np.add(self.total_movement, np.absolute(self.movement))
if self.movement[2] > np.pi:
self.movement[2] = 2*np.pi - self.movement[2]
if self.movement[2] < -np.pi:
self.movement[2] = - 2*np.pi - self.movement[2]
self.last_position = self.odom_position
self.read_move = np.add(self.read_move, self.movement)
def | (self):
return self.read_move
def get_movement(self):
msg = self.read_move
self.read_move = np.array([0, 0, 0], dtype='float64').transpose()
return msg
def get_total_movement(self):
return self.total_movement
def FastSlam():
odom_measurement = odom()
particles = [Particle() for i in range(NUMBER_PARTICLES)]
updated_marker = [False]*NUMBER_MARKERS
marker_measurement = markers()
rospy.init_node('FastSlam', anonymous=True)
frequency = rospy.Rate(Frequency)
rospy.Subscriber("RosAria/pose", Odometry, odom_measurement.callback_odom)
#Subscriber of aruco publisher topic with arucos observations
rospy.Subscriber('aruco_marker_publisher/markers', MarkerArray, marker_measurement.callback_Markers)
while not rospy.is_shutdown():
start_time = time.time()
motion_model = odom_measurement.actual_movement()
landmarkers_ids = marker_measurement.get_list_ids()
if landmarkers_ids[0] != KEY_NUMBER and np.linalg.norm(motion_model) > minimum_move:
total_weight = 0
motion_model = odom_measurement.get_movement()
for i in range(NUMBER_PARTICLES):
expected_state = particles[i].particle_prediction(motion_model)
for marker_id in landmarkers_ids:
if marker_id == KEY_NUMBER:
break
updated_marker[marker_id] = True
landmarker_measurement = marker_measurement.get_measerment(marker_id)
kalman_filter = particles[i].get_kalman_filters(marker_id, landmarker_measurement)
kalman_filter.Apply_EKF(expected_state, landmarker_measurement)
validity_info = kalman_filter.measurement_validition()
particles[i].update_weight(marker_id)
##############################################################
if np.linalg.norm(validity_info) < validity_threshold:
##############################################################
kalman_filter.Update()
total_weight += particles[i].get_weight()
sum_weights = 0
for i in range(NUMBER_PARTICLES):
particles[i].normalize_weight(total_weight)
sum_weights += particles[i].get_weight()**2
drawing_plot(particles)
neff = 1.0/sum_weights
if neff < float(NUMBER_PARTICLES)/2:
particles = resample_particles(particles, updated_marker)
del updated_marker
updated_marker = [False]*NUMBER_MARKERS
marker_measurement.reset_list_ids()
elapsed_time = time.time() - start_time
if elapsed_time > 9*10**-2:
print elapsed_time
frequency.sleep()
if __name__ == '__main__':
FastSlam() | actual_movement | identifier_name |
FastSlamV2.py | import rospy
import tf
from nav_msgs.msg import Odometry
from aruco_msgs.msg import MarkerArray
import numpy as np
import matplotlib.pyplot as plt
import time
#Ponto de partida (0,0) - Porta da sala
#Marker de id 0 tem coordenadas (x_map0,y_map0) em metros
x_map=[0.0]*41;
y_map=[0.0]*41;
x_map[0]=0.097+0.385;
y_map[0]=0.945;
x_map[1]=x_map[0]+0.77;
y_map[1]=y_map[0];
x_map[2]=x_map[1]+0.70;
y_map[2]=y_map[0];
x_map[3]=x_map[2]+2.495;
y_map[3]=y_map[0];
x_map[4]=x_map[3]+3.00;
y_map[4]=y_map[0];
x_map[5]=x_map[4]+3.00;
y_map[5]=y_map[0];
x_map[6]=x_map[5]+2.74;
y_map[6]=y_map[0];
x_map[7]=x_map[6]+2.643;
y_map[7]=y_map[0]-0.20;
x_map[8]=x_map[7];
x_map[9]=x_map[7];
x_map[10]=x_map[7];
x_map[11]=x_map[7];
y_map[8]=y_map[7]-1.205;
y_map[9]=y_map[8]-1.17;
y_map[10]=y_map[8]-4.84;
y_map[11]=y_map[10]-6.93;
y_map[12]=y_map[11]-2.55;
x_map[12]=x_map[11]-0.27;
y_map[13]=y_map[14]=y_map[15]=y_map[16]=y_map[17]=y_map[12];
x_map[13]=x_map[12]-0.60;
x_map[14]=x_map[13]-0.95;
x_map[15]=x_map[14]-4.095;
x_map[16]=x_map[15]-3.005;
x_map[17]=x_map[16]-4.20;
x_map[18]=x_map[19]=x_map[23]=x_map[30]=x_map[20]=x_map[21]=0.097;
x_map[22]=0;
y_map[22]=y_map[18]+0.67;
y_map[18]=y_map[17]+0.22;
y_map[19]=y_map[18]+0.67+0.615;
y_map[23]=y_map[19]+0.55;
y_map[30]=y_map[23]+0.70;
y_map[20]=y_map[23]+3.93;
y_map[21]=y_map[20]+5.895;
x_map[24]=1.67+0.097;
y_map[24]=-0.84;
x_map[25]=x_map[4];
y_map[25]=y_map[4]-1.575;
x_map[26]=x_map[9]-1.235;
y_map[26]=y_map[9];
x_map[28]=x_map[26]+0.10;
y_map[28]=y_map[26]-2.00;
x_map[27]=x_map[26];
y_map[27]=y_map[26]-2.00-4.21;
x_map[29]=x_map[14]-1.45;
y_map[29]=y_map[14]+0.20;
# Markers da zona dos elevadores
x_map[31]=x_map[21]+1.79;
y_map[31]=y_map[21]+0.095;
x_map[33]=x_map[31]+2.15;
y_map[33]=y_map[31]+0.10;
y_map[37]=y_map[31];
x_map[37]=x_map[33]+1.00;
x_map[40]=x_map[37]+1.00;
y_map[40]=y_map[37]-1.10;
x_map[39]=x_map[37]+2.65;
y_map[39]=y_map[37]+0.35;
x_map[38]=x_map[39];
y_map[38]=y_map[39]-2.66;
y_map[36]=y_map[34]=y_map[35]=y_map[38];
x_map[36]=x_map[38]-1.46;
x_map[34]=x_map[36]-1.96;
x_map[35]=x_map[34]-1.46;
y_map[32]=y_map[35]+0.28;
x_map[32]=x_map[35]-0.68-0.28;
camara_distance_z = 0.12 # 15.5 cm <-> 13 cm #dia 13/12/2018 <-> 12 cm => 12.5 cm inicio a 81 cm
camara_distance_x = 0.011 # 1.1 cm
# Constants
NUMBER_MARKERS = 41
KEY_NUMBER = 2**(5*5) # number of total combinations possible in aruco code
number_of_dimensions = 2
Frequency = 9.5
NUMBER_PARTICLES = 100
translation_noise = 0.1
rotation_noise = 0.1
noise_factor = 1
minimum_move = 0
Sensor_noise = 0.1
Odom_noise = 0.1
validity_threshold = 50
circle = np.arange(0, 2*np.pi, 0.1)
o_size = 0.3
line = np.arange(0, o_size, o_size)
fig, ax = plt.subplots()
robot_line, = ax.plot([0], [0], color='black', marker='o', markersize=12)
robot_orientation, = ax.plot(line, line, color='lime', marker='.', markersize=2, linewidth=2)
marker_line, = ax.plot(circle, circle, color='red', marker='.', markersize=8, linestyle="")
robot_path, = ax.plot([0], [0], color='black', marker='.', markersize=2, linewidth=0.2)
path_map, = plt.plot(x_map, y_map, color='grey', marker='*', markersize=8, linestyle="")
x_f = [circle]*NUMBER_MARKERS
y_f = [circle]*NUMBER_MARKERS
plt.ion()
plt.xlim(-10, 20)
plt.ylim(-20, 10)
plt.xlabel('X', fontsize=10) # X axis label
plt.ylabel('Y', fontsize=10) # Y axis label
plt.title('FastSlam 2.0')
#plt.legend()
plt.grid(True) # Enabling gridding
def drawing_plot(particles):
Max = 0
Max_id = 0
for i in range(NUMBER_PARTICLES):
if particles[i].get_weight() > Max:
Max = particles[i].get_weight()
Max_id = i
pose = particles[Max_id].get_position()
x = pose[0]
y = pose[1]
o = pose[2]
x_o = x + o_size*np.cos(o)
y_o = y + o_size*np.sin(o)
x_path, y_path = particles[Max_id].get_path()
plt.show(block=False)
robot_path.set_xdata(x_path)
robot_path.set_ydata(y_path)
ax.draw_artist(ax.patch)
ax.draw_artist(robot_path)
robot_line.set_xdata(x)
robot_line.set_ydata(y)
ax.draw_artist(ax.patch)
ax.draw_artist(robot_line)
robot_orientation.set_xdata([x, x_o])
robot_orientation.set_ydata([y, y_o])
ax.draw_artist(ax.patch)
ax.draw_artist(robot_orientation)
Landmarkers = particles[Max_id].get_landmarkers()
i = 0
for marker in Landmarkers:
if marker == None:
x_f[i] = KEY_NUMBER + circle
y_f[i] = KEY_NUMBER + circle
i += 1
continue
pose_m = marker.get_marker_position()
x_m = pose_m[0]
y_m = pose_m[1]
std_m = marker.get_marker_covariance()
x_std_m = std_m[0][0]
y_std_m = std_m[1][1]
x_f[i] = x_m + x_std_m * np.cos(circle)
y_f[i] = y_m + y_std_m * np.sin(circle)
i += 1
marker_line.set_xdata(x_f)
marker_line.set_ydata(y_f)
ax.draw_artist(ax.patch)
ax.draw_artist(marker_line)
fig.canvas.flush_events()
def resample_particles(particles, updated_marker):
# Returns a new set of particles obtained by performing stochastic universal sampling, according to the particle weights.
# distance between pointers
step = 1.0/NUMBER_PARTICLES
# random start of first pointer
r = np.random.uniform(0,step)
# where we are along the weights
c = particles[0].get_weight()
# index of weight container and corresponding particle
i = 0
index = 0
new_particles = []
#loop over all particle weights
for particle in particles:
#go through the weights until you find the particle
u = r + index*step
while u > c:
i = i + 1
c = c + particles[i].get_weight()
#add that particle
if i == index:
new_particle = particles[i]
new_particle.set_weight(step)
else:
new_particle = particles[i].copy(updated_marker)
#new_particle = copy.deepcopy(particles[i])
#new_particle.set_weight(step)
new_particles.append(new_particle)
#increase the threshold
index += 1
del particles
return new_particles
class Particle():
#each particle has a pose(x,y,o), a weight(w) and a series of kalman filters for every landmark
#in the beggining all particles are in the origin frame of the world (0,0,0)
def __init__(self):
self.X_robot = np.array([0, 0, 0], dtype='float64').transpose()
self.weight = 1.0/NUMBER_PARTICLES
self.Landmarkers = [None]*NUMBER_MARKERS
self.x_path = np.array([0], dtype='float64')
self.y_path = np.array([0], dtype='float64')
def get_kalman_filters(self, marker_id, Z):
if self.Landmarkers[marker_id] == None:
self.Landmarkers[marker_id] = KalmanFilter()
return self.Landmarkers[marker_id]
def particle_prediction(self, motion_model):
#if the robot moves we just add the motion model to the previous pose to predict the particle position
x = 0
y = 1
o = 2
noise = np.array([np.random.normal(0,translation_noise), np.random.normal(0,translation_noise), np.random.normal(0,rotation_noise)], dtype='float64').transpose()
noise = noise*motion_model*noise_factor
self.X_robot = self.X_robot + motion_model + noise
while self.X_robot[o] > np.pi:
self.X_robot[o] = self.X_robot[o] - 2*np.pi
while self.X_robot[o] < -np.pi:
self.X_robot[o] = self.X_robot[o] + 2*np.pi
self.x_path = np.insert(self.x_path, 0, self.X_robot[x])
self.y_path = np.insert(self.y_path, 0, self.X_robot[y])
return self.X_robot
def update_weight(self, marker_id):
std = self.Landmarkers[marker_id].get_marker_covariance()
dev = self.Landmarkers[marker_id].get_marker_validity()
fact = np.sqrt(np.linalg.det(2* np.pi * std))
expo = - np.dot(dev.T, np.linalg.inv(std).dot(dev))/2
self.weight = self.weight / fact * np.exp(expo)
def get_weight(self):
return self.weight
def normalize_weight(self, total_weight):
self.weight = self.weight / total_weight
def set_weight(self, new_weight):
self.weight = new_weight
def get_position(self):
return self.X_robot
def get_landmarkers(self):
return self.Landmarkers
def get_path(self):
return self.x_path, self.y_path
def copy(self, updated_marker):
new_particle = Particle()
del new_particle.x_path
del new_particle.y_path
del new_particle.Landmarkers
del new_particle.X_robot
new_particle.x_path = np.copy(self.x_path)
new_particle.y_path = np.copy(self.y_path)
for i in range(len(self.Landmarkers)):
if self.Landmarkers[i] != None and updated_marker[i] == True:
self.Landmarkers[i] = self.Landmarkers[i].copy()
new_particle.Landmarkers = self.Landmarkers
new_particle.X_robot = np.copy(self.X_robot)
return new_particle
class KalmanFilter():
def __init__(self):
# X_ espected value of landmarks' position (x,y)
# X_robot (x, y, yaw)
# H gradient of markers' relative position to robot (h:(x_m,y_m) -> (distance, orientation); H = dh/dX_ and X_ = X(k+1|k))
# S covariance matrix markers' position
# Q covariance matrix markers' measurement
# V diference between measurement and estimated markers' position
self.first = True
self.R_t = np.identity(number_of_dimensions, dtype='float64')*Sensor_noise #sensor noise
self.P_t = np.identity(number_of_dimensions+1, dtype='float64')*Odom_noise #sensor noise
def compute_G(self, X_robot):
x = 0 # x position
y = 1 # y position
y = self.X_[y] - X_robot[y]
x = self.X_[x] - X_robot[x]
# compute H
denominator = x**2 + y**2
g_o11 = x / np.sqrt(denominator)
g_o12 = y / np.sqrt(denominator)
g_o21 = -y / denominator
g_o22 = x / denominator
self.G_o = np.array([[g_o11, g_o12], [g_o21, g_o22]])
g_s11 = -g_o11
g_s12 = -g_o12
g_s21 = -g_o21
g_s22 = -g_o22
self.G_s = np.array([[g_s21, g_s22, 0], [g_s21, g_s22, -1]])
def Apply_EKF(self, X_robot, Z):
x = 0 # x position
y = 1 # y position
o = 2 # o orientaio
d = 0 # distance measured
fi = 1 # orientaion of the measurement
if self.first == True:
# the angle is in the direction y to x, reverse of the usual x to y
angle = (X_robot[o] + Z[fi])
self.X_ = np.array([X_robot[x] + Z[d]*np.cos(angle), X_robot[y] + Z[d]*np.sin(angle)], dtype='float64').transpose() # first landmark position
self.compute_G(X_robot)
self.S = np.linalg.inv(self.G_o.dot(np.linalg.inv(self.R_t).dot(self.G_o.T)))
self.V = np.array([0, 0], dtype='float64').transpose()
self.L_t = np.identity(number_of_dimensions, dtype='float64')
else:
# Prediction
y = self.X_[y] - X_robot[y]
x = self.X_[x] - X_robot[x]
d = np.sqrt(x**2 + y**2) # distance
fi = np.arctan2(y, x) - X_robot[o] # direction
while fi > np.pi:
fi = fi - 2*np.pi
while fi < -np.pi:
fi = fi + 2*np.pi
Z_ = np.array([d, fi], dtype='float64').transpose()
self.compute_G(X_robot)
self.Q = self.G_o.dot(self.S.dot(self.G_o.T)) + self.R_t
# Observation
self.V = np.subtract(Z, Z_) # Z = [d, teta]
def Update(self):
# Update
if self.first == False:
# K kalman gain
K = self.S.dot(self.G_o.T.dot(np.linalg.inv(self.Q)))
self.X_ = self.X_ + K.dot(self.V)
self.L_t = self.G_s.dot(self.P_t.dot(self.G_s.T)) + self.G_o.dot(self.S.dot(self.G_o.T)) + self.R_t
self.S = (np.identity(number_of_dimensions)- K.dot(self.G_o)).dot(self.S)
else:
self.first = False
def get_marker_position(self):
return self.X_
def get_marker_covariance(self):
return self.L_t
def get_marker_validity(self):
return self.V
def measurement_validition(self):
return np.dot(self.V.T, np.linalg.inv(self.L_t)).dot(self.V)
def copy(self):
new_KF = KalmanFilter()
new_KF.X_ = np.copy(self.X_)
new_KF.S = np.copy(self.S)
new_KF.L_t = np.copy(self.L_t)
new_KF.V = np.copy(self.V)
new_KF.first = False
return new_KF
class markers():
def __init__(self):
self.could_it_read = False
self.z_distance_left_eye_to_robot_wheel = camara_distance_z
self.x_distance_left_eye_to_robot_wheel = camara_distance_x
self.markers_info = [None]*NUMBER_MARKERS
self.list_ids = np.ones(NUMBER_MARKERS, dtype='int32')*KEY_NUMBER
def callback_Markers(self, data):
# static tf could be applied here: z = z + z_distance_left_eye_to_robot_wheel, x = x + x_distance_left_eye_to_robot_wheel
for i in range(NUMBER_MARKERS):
try:
marker_info = data.markers.pop()
except:
break
self.list_ids[i] = marker_info.id
self.markers_info[marker_info.id] = marker_info
def get_measerment(self, index):
x = self.markers_info[index].pose.pose.position.x # right-left
z = self.markers_info[index].pose.pose.position.z # front-back
# position of the marker relative to base_link
z = z + self.z_distance_left_eye_to_robot_wheel
x = x + self.x_distance_left_eye_to_robot_wheel
marker_distance = np.sqrt(z**2+x**2)
marker_direction = np.arctan(x/z)
return np.array([marker_distance, -marker_direction], dtype='float64').transpose()
def get_list_ids(self):
return self.list_ids
def reset_list_ids(self):
i = 0
while self.list_ids[i] != KEY_NUMBER:
self.list_ids[i] = KEY_NUMBER
i += 1
def marker_info(self, index):
return self.markers_info[index]
class odom():
def __init__(self):
|
def callback_odom(self, data):
# robo_frame
frame_id = data.header.frame_id # odom
child_frame_id = data.child_frame_id # base_link
# pose
x = data.pose.pose.position.x # front-back
y = data.pose.pose.position.y # right-left
orientation_x = data.pose.pose.orientation.x
orientation_y = data.pose.pose.orientation.y
orientation_z = data.pose.pose.orientation.z
orientation_w = data.pose.pose.orientation.w
roll, pitch, yaw = tf.transformations.euler_from_quaternion((orientation_x, orientation_y, orientation_z, orientation_w))
if self.first_read == True:
self.last_position = np.array([x, y, yaw], dtype='float64').transpose()
self.total_movement = np.array([0, 0, 0], dtype='float64').transpose()
self.first_read = False
self.odom_position = np.array([x, y, yaw], dtype='float64').transpose()
self.movement = np.subtract(self.odom_position, self.last_position)
self.total_movement = np.add(self.total_movement, np.absolute(self.movement))
if self.movement[2] > np.pi:
self.movement[2] = 2*np.pi - self.movement[2]
if self.movement[2] < -np.pi:
self.movement[2] = - 2*np.pi - self.movement[2]
self.last_position = self.odom_position
self.read_move = np.add(self.read_move, self.movement)
def actual_movement(self):
return self.read_move
def get_movement(self):
msg = self.read_move
self.read_move = np.array([0, 0, 0], dtype='float64').transpose()
return msg
def get_total_movement(self):
return self.total_movement
def FastSlam():
odom_measurement = odom()
particles = [Particle() for i in range(NUMBER_PARTICLES)]
updated_marker = [False]*NUMBER_MARKERS
marker_measurement = markers()
rospy.init_node('FastSlam', anonymous=True)
frequency = rospy.Rate(Frequency)
rospy.Subscriber("RosAria/pose", Odometry, odom_measurement.callback_odom)
#Subscriber of aruco publisher topic with arucos observations
rospy.Subscriber('aruco_marker_publisher/markers', MarkerArray, marker_measurement.callback_Markers)
while not rospy.is_shutdown():
start_time = time.time()
motion_model = odom_measurement.actual_movement()
landmarkers_ids = marker_measurement.get_list_ids()
if landmarkers_ids[0] != KEY_NUMBER and np.linalg.norm(motion_model) > minimum_move:
total_weight = 0
motion_model = odom_measurement.get_movement()
for i in range(NUMBER_PARTICLES):
expected_state = particles[i].particle_prediction(motion_model)
for marker_id in landmarkers_ids:
if marker_id == KEY_NUMBER:
break
updated_marker[marker_id] = True
landmarker_measurement = marker_measurement.get_measerment(marker_id)
kalman_filter = particles[i].get_kalman_filters(marker_id, landmarker_measurement)
kalman_filter.Apply_EKF(expected_state, landmarker_measurement)
validity_info = kalman_filter.measurement_validition()
particles[i].update_weight(marker_id)
##############################################################
if np.linalg.norm(validity_info) < validity_threshold:
##############################################################
kalman_filter.Update()
total_weight += particles[i].get_weight()
sum_weights = 0
for i in range(NUMBER_PARTICLES):
particles[i].normalize_weight(total_weight)
sum_weights += particles[i].get_weight()**2
drawing_plot(particles)
neff = 1.0/sum_weights
if neff < float(NUMBER_PARTICLES)/2:
particles = resample_particles(particles, updated_marker)
del updated_marker
updated_marker = [False]*NUMBER_MARKERS
marker_measurement.reset_list_ids()
elapsed_time = time.time() - start_time
if elapsed_time > 9*10**-2:
print elapsed_time
frequency.sleep()
if __name__ == '__main__':
FastSlam() | self.read_move = np.array([0, 0, 0], dtype='float64').transpose()
self.first_read = True | identifier_body |
FastSlamV2.py | import rospy
import tf
from nav_msgs.msg import Odometry
from aruco_msgs.msg import MarkerArray
import numpy as np
import matplotlib.pyplot as plt
import time
#Ponto de partida (0,0) - Porta da sala
#Marker de id 0 tem coordenadas (x_map0,y_map0) em metros
x_map=[0.0]*41;
y_map=[0.0]*41;
x_map[0]=0.097+0.385;
y_map[0]=0.945;
x_map[1]=x_map[0]+0.77;
y_map[1]=y_map[0];
x_map[2]=x_map[1]+0.70;
y_map[2]=y_map[0];
x_map[3]=x_map[2]+2.495;
y_map[3]=y_map[0];
x_map[4]=x_map[3]+3.00;
y_map[4]=y_map[0];
x_map[5]=x_map[4]+3.00;
y_map[5]=y_map[0];
x_map[6]=x_map[5]+2.74;
y_map[6]=y_map[0];
x_map[7]=x_map[6]+2.643;
y_map[7]=y_map[0]-0.20;
x_map[8]=x_map[7];
x_map[9]=x_map[7];
x_map[10]=x_map[7];
x_map[11]=x_map[7];
y_map[8]=y_map[7]-1.205;
y_map[9]=y_map[8]-1.17;
y_map[10]=y_map[8]-4.84;
y_map[11]=y_map[10]-6.93;
y_map[12]=y_map[11]-2.55;
x_map[12]=x_map[11]-0.27;
y_map[13]=y_map[14]=y_map[15]=y_map[16]=y_map[17]=y_map[12];
x_map[13]=x_map[12]-0.60;
x_map[14]=x_map[13]-0.95;
x_map[15]=x_map[14]-4.095;
x_map[16]=x_map[15]-3.005;
x_map[17]=x_map[16]-4.20;
x_map[18]=x_map[19]=x_map[23]=x_map[30]=x_map[20]=x_map[21]=0.097;
x_map[22]=0;
y_map[22]=y_map[18]+0.67;
y_map[18]=y_map[17]+0.22;
y_map[19]=y_map[18]+0.67+0.615;
y_map[23]=y_map[19]+0.55;
y_map[30]=y_map[23]+0.70;
y_map[20]=y_map[23]+3.93;
y_map[21]=y_map[20]+5.895;
x_map[24]=1.67+0.097;
y_map[24]=-0.84;
x_map[25]=x_map[4];
y_map[25]=y_map[4]-1.575;
x_map[26]=x_map[9]-1.235;
y_map[26]=y_map[9];
x_map[28]=x_map[26]+0.10;
y_map[28]=y_map[26]-2.00;
x_map[27]=x_map[26];
y_map[27]=y_map[26]-2.00-4.21;
x_map[29]=x_map[14]-1.45;
y_map[29]=y_map[14]+0.20;
# Markers da zona dos elevadores
x_map[31]=x_map[21]+1.79;
y_map[31]=y_map[21]+0.095;
x_map[33]=x_map[31]+2.15;
y_map[33]=y_map[31]+0.10;
y_map[37]=y_map[31];
x_map[37]=x_map[33]+1.00;
x_map[40]=x_map[37]+1.00;
y_map[40]=y_map[37]-1.10;
x_map[39]=x_map[37]+2.65;
y_map[39]=y_map[37]+0.35;
x_map[38]=x_map[39];
y_map[38]=y_map[39]-2.66;
y_map[36]=y_map[34]=y_map[35]=y_map[38];
x_map[36]=x_map[38]-1.46;
x_map[34]=x_map[36]-1.96;
x_map[35]=x_map[34]-1.46;
y_map[32]=y_map[35]+0.28;
x_map[32]=x_map[35]-0.68-0.28;
camara_distance_z = 0.12 # 15.5 cm <-> 13 cm #dia 13/12/2018 <-> 12 cm => 12.5 cm inicio a 81 cm
camara_distance_x = 0.011 # 1.1 cm
# Constants
NUMBER_MARKERS = 41
KEY_NUMBER = 2**(5*5) # number of total combinations possible in aruco code
number_of_dimensions = 2
Frequency = 9.5
NUMBER_PARTICLES = 100
translation_noise = 0.1
rotation_noise = 0.1
noise_factor = 1
minimum_move = 0
Sensor_noise = 0.1
Odom_noise = 0.1
validity_threshold = 50
circle = np.arange(0, 2*np.pi, 0.1)
o_size = 0.3
line = np.arange(0, o_size, o_size)
fig, ax = plt.subplots()
robot_line, = ax.plot([0], [0], color='black', marker='o', markersize=12)
robot_orientation, = ax.plot(line, line, color='lime', marker='.', markersize=2, linewidth=2)
marker_line, = ax.plot(circle, circle, color='red', marker='.', markersize=8, linestyle="")
robot_path, = ax.plot([0], [0], color='black', marker='.', markersize=2, linewidth=0.2)
path_map, = plt.plot(x_map, y_map, color='grey', marker='*', markersize=8, linestyle="")
x_f = [circle]*NUMBER_MARKERS
y_f = [circle]*NUMBER_MARKERS
plt.ion()
plt.xlim(-10, 20)
plt.ylim(-20, 10)
plt.xlabel('X', fontsize=10) # X axis label
plt.ylabel('Y', fontsize=10) # Y axis label
plt.title('FastSlam 2.0')
#plt.legend()
plt.grid(True) # Enabling gridding
def drawing_plot(particles):
Max = 0
Max_id = 0
for i in range(NUMBER_PARTICLES):
if particles[i].get_weight() > Max:
Max = particles[i].get_weight()
Max_id = i
pose = particles[Max_id].get_position()
x = pose[0]
y = pose[1]
o = pose[2]
x_o = x + o_size*np.cos(o)
y_o = y + o_size*np.sin(o)
x_path, y_path = particles[Max_id].get_path()
plt.show(block=False)
robot_path.set_xdata(x_path)
robot_path.set_ydata(y_path)
ax.draw_artist(ax.patch)
ax.draw_artist(robot_path)
robot_line.set_xdata(x)
robot_line.set_ydata(y)
ax.draw_artist(ax.patch)
ax.draw_artist(robot_line)
robot_orientation.set_xdata([x, x_o])
robot_orientation.set_ydata([y, y_o])
ax.draw_artist(ax.patch)
ax.draw_artist(robot_orientation)
Landmarkers = particles[Max_id].get_landmarkers()
i = 0
for marker in Landmarkers:
|
marker_line.set_xdata(x_f)
marker_line.set_ydata(y_f)
ax.draw_artist(ax.patch)
ax.draw_artist(marker_line)
fig.canvas.flush_events()
def resample_particles(particles, updated_marker):
# Returns a new set of particles obtained by performing stochastic universal sampling, according to the particle weights.
# distance between pointers
step = 1.0/NUMBER_PARTICLES
# random start of first pointer
r = np.random.uniform(0,step)
# where we are along the weights
c = particles[0].get_weight()
# index of weight container and corresponding particle
i = 0
index = 0
new_particles = []
#loop over all particle weights
for particle in particles:
#go through the weights until you find the particle
u = r + index*step
while u > c:
i = i + 1
c = c + particles[i].get_weight()
#add that particle
if i == index:
new_particle = particles[i]
new_particle.set_weight(step)
else:
new_particle = particles[i].copy(updated_marker)
#new_particle = copy.deepcopy(particles[i])
#new_particle.set_weight(step)
new_particles.append(new_particle)
#increase the threshold
index += 1
del particles
return new_particles
class Particle():
#each particle has a pose(x,y,o), a weight(w) and a series of kalman filters for every landmark
#in the beggining all particles are in the origin frame of the world (0,0,0)
def __init__(self):
self.X_robot = np.array([0, 0, 0], dtype='float64').transpose()
self.weight = 1.0/NUMBER_PARTICLES
self.Landmarkers = [None]*NUMBER_MARKERS
self.x_path = np.array([0], dtype='float64')
self.y_path = np.array([0], dtype='float64')
def get_kalman_filters(self, marker_id, Z):
if self.Landmarkers[marker_id] == None:
self.Landmarkers[marker_id] = KalmanFilter()
return self.Landmarkers[marker_id]
def particle_prediction(self, motion_model):
#if the robot moves we just add the motion model to the previous pose to predict the particle position
x = 0
y = 1
o = 2
noise = np.array([np.random.normal(0,translation_noise), np.random.normal(0,translation_noise), np.random.normal(0,rotation_noise)], dtype='float64').transpose()
noise = noise*motion_model*noise_factor
self.X_robot = self.X_robot + motion_model + noise
while self.X_robot[o] > np.pi:
self.X_robot[o] = self.X_robot[o] - 2*np.pi
while self.X_robot[o] < -np.pi:
self.X_robot[o] = self.X_robot[o] + 2*np.pi
self.x_path = np.insert(self.x_path, 0, self.X_robot[x])
self.y_path = np.insert(self.y_path, 0, self.X_robot[y])
return self.X_robot
def update_weight(self, marker_id):
std = self.Landmarkers[marker_id].get_marker_covariance()
dev = self.Landmarkers[marker_id].get_marker_validity()
fact = np.sqrt(np.linalg.det(2* np.pi * std))
expo = - np.dot(dev.T, np.linalg.inv(std).dot(dev))/2
self.weight = self.weight / fact * np.exp(expo)
def get_weight(self):
return self.weight
def normalize_weight(self, total_weight):
self.weight = self.weight / total_weight
def set_weight(self, new_weight):
self.weight = new_weight
def get_position(self):
return self.X_robot
def get_landmarkers(self):
return self.Landmarkers
def get_path(self):
return self.x_path, self.y_path
def copy(self, updated_marker):
new_particle = Particle()
del new_particle.x_path
del new_particle.y_path
del new_particle.Landmarkers
del new_particle.X_robot
new_particle.x_path = np.copy(self.x_path)
new_particle.y_path = np.copy(self.y_path)
for i in range(len(self.Landmarkers)):
if self.Landmarkers[i] != None and updated_marker[i] == True:
self.Landmarkers[i] = self.Landmarkers[i].copy()
new_particle.Landmarkers = self.Landmarkers
new_particle.X_robot = np.copy(self.X_robot)
return new_particle
class KalmanFilter():
def __init__(self):
# X_ espected value of landmarks' position (x,y)
# X_robot (x, y, yaw)
# H gradient of markers' relative position to robot (h:(x_m,y_m) -> (distance, orientation); H = dh/dX_ and X_ = X(k+1|k))
# S covariance matrix markers' position
# Q covariance matrix markers' measurement
# V diference between measurement and estimated markers' position
self.first = True
self.R_t = np.identity(number_of_dimensions, dtype='float64')*Sensor_noise #sensor noise
self.P_t = np.identity(number_of_dimensions+1, dtype='float64')*Odom_noise #sensor noise
def compute_G(self, X_robot):
x = 0 # x position
y = 1 # y position
y = self.X_[y] - X_robot[y]
x = self.X_[x] - X_robot[x]
# compute H
denominator = x**2 + y**2
g_o11 = x / np.sqrt(denominator)
g_o12 = y / np.sqrt(denominator)
g_o21 = -y / denominator
g_o22 = x / denominator
self.G_o = np.array([[g_o11, g_o12], [g_o21, g_o22]])
g_s11 = -g_o11
g_s12 = -g_o12
g_s21 = -g_o21
g_s22 = -g_o22
self.G_s = np.array([[g_s21, g_s22, 0], [g_s21, g_s22, -1]])
def Apply_EKF(self, X_robot, Z):
x = 0 # x position
y = 1 # y position
o = 2 # o orientaio
d = 0 # distance measured
fi = 1 # orientaion of the measurement
if self.first == True:
# the angle is in the direction y to x, reverse of the usual x to y
angle = (X_robot[o] + Z[fi])
self.X_ = np.array([X_robot[x] + Z[d]*np.cos(angle), X_robot[y] + Z[d]*np.sin(angle)], dtype='float64').transpose() # first landmark position
self.compute_G(X_robot)
self.S = np.linalg.inv(self.G_o.dot(np.linalg.inv(self.R_t).dot(self.G_o.T)))
self.V = np.array([0, 0], dtype='float64').transpose()
self.L_t = np.identity(number_of_dimensions, dtype='float64')
else:
# Prediction
y = self.X_[y] - X_robot[y]
x = self.X_[x] - X_robot[x]
d = np.sqrt(x**2 + y**2) # distance
fi = np.arctan2(y, x) - X_robot[o] # direction
while fi > np.pi:
fi = fi - 2*np.pi
while fi < -np.pi:
fi = fi + 2*np.pi
Z_ = np.array([d, fi], dtype='float64').transpose()
self.compute_G(X_robot)
self.Q = self.G_o.dot(self.S.dot(self.G_o.T)) + self.R_t
# Observation
self.V = np.subtract(Z, Z_) # Z = [d, teta]
def Update(self):
# Update
if self.first == False:
# K kalman gain
K = self.S.dot(self.G_o.T.dot(np.linalg.inv(self.Q)))
self.X_ = self.X_ + K.dot(self.V)
self.L_t = self.G_s.dot(self.P_t.dot(self.G_s.T)) + self.G_o.dot(self.S.dot(self.G_o.T)) + self.R_t
self.S = (np.identity(number_of_dimensions)- K.dot(self.G_o)).dot(self.S)
else:
self.first = False
def get_marker_position(self):
return self.X_
def get_marker_covariance(self):
return self.L_t
def get_marker_validity(self):
return self.V
def measurement_validition(self):
return np.dot(self.V.T, np.linalg.inv(self.L_t)).dot(self.V)
def copy(self):
new_KF = KalmanFilter()
new_KF.X_ = np.copy(self.X_)
new_KF.S = np.copy(self.S)
new_KF.L_t = np.copy(self.L_t)
new_KF.V = np.copy(self.V)
new_KF.first = False
return new_KF
class markers():
def __init__(self):
self.could_it_read = False
self.z_distance_left_eye_to_robot_wheel = camara_distance_z
self.x_distance_left_eye_to_robot_wheel = camara_distance_x
self.markers_info = [None]*NUMBER_MARKERS
self.list_ids = np.ones(NUMBER_MARKERS, dtype='int32')*KEY_NUMBER
def callback_Markers(self, data):
# static tf could be applied here: z = z + z_distance_left_eye_to_robot_wheel, x = x + x_distance_left_eye_to_robot_wheel
for i in range(NUMBER_MARKERS):
try:
marker_info = data.markers.pop()
except:
break
self.list_ids[i] = marker_info.id
self.markers_info[marker_info.id] = marker_info
def get_measerment(self, index):
x = self.markers_info[index].pose.pose.position.x # right-left
z = self.markers_info[index].pose.pose.position.z # front-back
# position of the marker relative to base_link
z = z + self.z_distance_left_eye_to_robot_wheel
x = x + self.x_distance_left_eye_to_robot_wheel
marker_distance = np.sqrt(z**2+x**2)
marker_direction = np.arctan(x/z)
return np.array([marker_distance, -marker_direction], dtype='float64').transpose()
def get_list_ids(self):
return self.list_ids
def reset_list_ids(self):
i = 0
while self.list_ids[i] != KEY_NUMBER:
self.list_ids[i] = KEY_NUMBER
i += 1
def marker_info(self, index):
return self.markers_info[index]
class odom():
def __init__(self):
self.read_move = np.array([0, 0, 0], dtype='float64').transpose()
self.first_read = True
def callback_odom(self, data):
# robo_frame
frame_id = data.header.frame_id # odom
child_frame_id = data.child_frame_id # base_link
# pose
x = data.pose.pose.position.x # front-back
y = data.pose.pose.position.y # right-left
orientation_x = data.pose.pose.orientation.x
orientation_y = data.pose.pose.orientation.y
orientation_z = data.pose.pose.orientation.z
orientation_w = data.pose.pose.orientation.w
roll, pitch, yaw = tf.transformations.euler_from_quaternion((orientation_x, orientation_y, orientation_z, orientation_w))
if self.first_read == True:
self.last_position = np.array([x, y, yaw], dtype='float64').transpose()
self.total_movement = np.array([0, 0, 0], dtype='float64').transpose()
self.first_read = False
self.odom_position = np.array([x, y, yaw], dtype='float64').transpose()
self.movement = np.subtract(self.odom_position, self.last_position)
self.total_movement = np.add(self.total_movement, np.absolute(self.movement))
if self.movement[2] > np.pi:
self.movement[2] = 2*np.pi - self.movement[2]
if self.movement[2] < -np.pi:
self.movement[2] = - 2*np.pi - self.movement[2]
self.last_position = self.odom_position
self.read_move = np.add(self.read_move, self.movement)
def actual_movement(self):
return self.read_move
def get_movement(self):
msg = self.read_move
self.read_move = np.array([0, 0, 0], dtype='float64').transpose()
return msg
def get_total_movement(self):
return self.total_movement
def FastSlam():
odom_measurement = odom()
particles = [Particle() for i in range(NUMBER_PARTICLES)]
updated_marker = [False]*NUMBER_MARKERS
marker_measurement = markers()
rospy.init_node('FastSlam', anonymous=True)
frequency = rospy.Rate(Frequency)
rospy.Subscriber("RosAria/pose", Odometry, odom_measurement.callback_odom)
#Subscriber of aruco publisher topic with arucos observations
rospy.Subscriber('aruco_marker_publisher/markers', MarkerArray, marker_measurement.callback_Markers)
while not rospy.is_shutdown():
start_time = time.time()
motion_model = odom_measurement.actual_movement()
landmarkers_ids = marker_measurement.get_list_ids()
if landmarkers_ids[0] != KEY_NUMBER and np.linalg.norm(motion_model) > minimum_move:
total_weight = 0
motion_model = odom_measurement.get_movement()
for i in range(NUMBER_PARTICLES):
expected_state = particles[i].particle_prediction(motion_model)
for marker_id in landmarkers_ids:
if marker_id == KEY_NUMBER:
break
updated_marker[marker_id] = True
landmarker_measurement = marker_measurement.get_measerment(marker_id)
kalman_filter = particles[i].get_kalman_filters(marker_id, landmarker_measurement)
kalman_filter.Apply_EKF(expected_state, landmarker_measurement)
validity_info = kalman_filter.measurement_validition()
particles[i].update_weight(marker_id)
##############################################################
if np.linalg.norm(validity_info) < validity_threshold:
##############################################################
kalman_filter.Update()
total_weight += particles[i].get_weight()
sum_weights = 0
for i in range(NUMBER_PARTICLES):
particles[i].normalize_weight(total_weight)
sum_weights += particles[i].get_weight()**2
drawing_plot(particles)
neff = 1.0/sum_weights
if neff < float(NUMBER_PARTICLES)/2:
particles = resample_particles(particles, updated_marker)
del updated_marker
updated_marker = [False]*NUMBER_MARKERS
marker_measurement.reset_list_ids()
elapsed_time = time.time() - start_time
if elapsed_time > 9*10**-2:
print elapsed_time
frequency.sleep()
if __name__ == '__main__':
FastSlam() | if marker == None:
x_f[i] = KEY_NUMBER + circle
y_f[i] = KEY_NUMBER + circle
i += 1
continue
pose_m = marker.get_marker_position()
x_m = pose_m[0]
y_m = pose_m[1]
std_m = marker.get_marker_covariance()
x_std_m = std_m[0][0]
y_std_m = std_m[1][1]
x_f[i] = x_m + x_std_m * np.cos(circle)
y_f[i] = y_m + y_std_m * np.sin(circle)
i += 1 | conditional_block |
FastSlamV2.py | import rospy
import tf
from nav_msgs.msg import Odometry
from aruco_msgs.msg import MarkerArray
import numpy as np
import matplotlib.pyplot as plt
import time
#Ponto de partida (0,0) - Porta da sala
#Marker de id 0 tem coordenadas (x_map0,y_map0) em metros
x_map=[0.0]*41;
y_map=[0.0]*41;
x_map[0]=0.097+0.385;
y_map[0]=0.945;
x_map[1]=x_map[0]+0.77;
y_map[1]=y_map[0];
x_map[2]=x_map[1]+0.70;
y_map[2]=y_map[0];
x_map[3]=x_map[2]+2.495;
y_map[3]=y_map[0];
x_map[4]=x_map[3]+3.00;
y_map[4]=y_map[0];
x_map[5]=x_map[4]+3.00;
y_map[5]=y_map[0];
x_map[6]=x_map[5]+2.74;
y_map[6]=y_map[0];
x_map[7]=x_map[6]+2.643;
y_map[7]=y_map[0]-0.20;
x_map[8]=x_map[7];
x_map[9]=x_map[7];
x_map[10]=x_map[7];
x_map[11]=x_map[7];
y_map[8]=y_map[7]-1.205;
y_map[9]=y_map[8]-1.17;
y_map[10]=y_map[8]-4.84;
y_map[11]=y_map[10]-6.93;
y_map[12]=y_map[11]-2.55;
x_map[12]=x_map[11]-0.27;
y_map[13]=y_map[14]=y_map[15]=y_map[16]=y_map[17]=y_map[12];
x_map[13]=x_map[12]-0.60;
x_map[14]=x_map[13]-0.95;
x_map[15]=x_map[14]-4.095;
x_map[16]=x_map[15]-3.005;
x_map[17]=x_map[16]-4.20;
x_map[18]=x_map[19]=x_map[23]=x_map[30]=x_map[20]=x_map[21]=0.097;
x_map[22]=0;
y_map[22]=y_map[18]+0.67;
y_map[18]=y_map[17]+0.22;
y_map[19]=y_map[18]+0.67+0.615;
y_map[23]=y_map[19]+0.55;
y_map[30]=y_map[23]+0.70;
y_map[20]=y_map[23]+3.93;
y_map[21]=y_map[20]+5.895;
x_map[24]=1.67+0.097;
y_map[24]=-0.84;
x_map[25]=x_map[4];
y_map[25]=y_map[4]-1.575;
x_map[26]=x_map[9]-1.235;
y_map[26]=y_map[9];
x_map[28]=x_map[26]+0.10;
y_map[28]=y_map[26]-2.00;
x_map[27]=x_map[26];
y_map[27]=y_map[26]-2.00-4.21;
x_map[29]=x_map[14]-1.45;
y_map[29]=y_map[14]+0.20;
# Markers da zona dos elevadores
x_map[31]=x_map[21]+1.79;
y_map[31]=y_map[21]+0.095;
x_map[33]=x_map[31]+2.15;
y_map[33]=y_map[31]+0.10;
y_map[37]=y_map[31];
x_map[37]=x_map[33]+1.00;
x_map[40]=x_map[37]+1.00;
y_map[40]=y_map[37]-1.10;
x_map[39]=x_map[37]+2.65;
y_map[39]=y_map[37]+0.35;
x_map[38]=x_map[39];
y_map[38]=y_map[39]-2.66;
y_map[36]=y_map[34]=y_map[35]=y_map[38];
x_map[36]=x_map[38]-1.46;
x_map[34]=x_map[36]-1.96;
x_map[35]=x_map[34]-1.46;
y_map[32]=y_map[35]+0.28;
x_map[32]=x_map[35]-0.68-0.28;
camara_distance_z = 0.12 # 15.5 cm <-> 13 cm #dia 13/12/2018 <-> 12 cm => 12.5 cm inicio a 81 cm
camara_distance_x = 0.011 # 1.1 cm
# Constants
NUMBER_MARKERS = 41
KEY_NUMBER = 2**(5*5) # number of total combinations possible in aruco code
number_of_dimensions = 2
Frequency = 9.5
NUMBER_PARTICLES = 100
translation_noise = 0.1
rotation_noise = 0.1
noise_factor = 1
minimum_move = 0
Sensor_noise = 0.1
Odom_noise = 0.1
validity_threshold = 50
circle = np.arange(0, 2*np.pi, 0.1)
o_size = 0.3
line = np.arange(0, o_size, o_size)
fig, ax = plt.subplots()
robot_line, = ax.plot([0], [0], color='black', marker='o', markersize=12)
robot_orientation, = ax.plot(line, line, color='lime', marker='.', markersize=2, linewidth=2)
marker_line, = ax.plot(circle, circle, color='red', marker='.', markersize=8, linestyle="")
robot_path, = ax.plot([0], [0], color='black', marker='.', markersize=2, linewidth=0.2)
path_map, = plt.plot(x_map, y_map, color='grey', marker='*', markersize=8, linestyle="")
x_f = [circle]*NUMBER_MARKERS
y_f = [circle]*NUMBER_MARKERS
plt.ion()
plt.xlim(-10, 20)
plt.ylim(-20, 10)
plt.xlabel('X', fontsize=10) # X axis label
plt.ylabel('Y', fontsize=10) # Y axis label
plt.title('FastSlam 2.0')
#plt.legend()
plt.grid(True) # Enabling gridding
def drawing_plot(particles):
Max = 0
Max_id = 0
for i in range(NUMBER_PARTICLES):
if particles[i].get_weight() > Max:
Max = particles[i].get_weight()
Max_id = i
pose = particles[Max_id].get_position()
x = pose[0]
y = pose[1]
o = pose[2]
x_o = x + o_size*np.cos(o)
y_o = y + o_size*np.sin(o)
x_path, y_path = particles[Max_id].get_path()
plt.show(block=False)
robot_path.set_xdata(x_path)
robot_path.set_ydata(y_path)
ax.draw_artist(ax.patch)
ax.draw_artist(robot_path)
robot_line.set_xdata(x)
robot_line.set_ydata(y)
ax.draw_artist(ax.patch)
ax.draw_artist(robot_line)
robot_orientation.set_xdata([x, x_o])
robot_orientation.set_ydata([y, y_o])
ax.draw_artist(ax.patch)
ax.draw_artist(robot_orientation)
Landmarkers = particles[Max_id].get_landmarkers()
i = 0
for marker in Landmarkers:
if marker == None:
x_f[i] = KEY_NUMBER + circle
y_f[i] = KEY_NUMBER + circle
i += 1
continue
pose_m = marker.get_marker_position()
x_m = pose_m[0]
y_m = pose_m[1]
std_m = marker.get_marker_covariance()
x_std_m = std_m[0][0]
y_std_m = std_m[1][1]
x_f[i] = x_m + x_std_m * np.cos(circle)
y_f[i] = y_m + y_std_m * np.sin(circle)
i += 1
marker_line.set_xdata(x_f)
marker_line.set_ydata(y_f)
ax.draw_artist(ax.patch)
ax.draw_artist(marker_line)
fig.canvas.flush_events()
def resample_particles(particles, updated_marker):
# Returns a new set of particles obtained by performing stochastic universal sampling, according to the particle weights.
# distance between pointers
step = 1.0/NUMBER_PARTICLES
# random start of first pointer
r = np.random.uniform(0,step)
# where we are along the weights
c = particles[0].get_weight()
# index of weight container and corresponding particle
i = 0
index = 0
new_particles = []
#loop over all particle weights
for particle in particles:
#go through the weights until you find the particle
u = r + index*step
while u > c:
i = i + 1
c = c + particles[i].get_weight()
#add that particle
if i == index:
new_particle = particles[i]
new_particle.set_weight(step)
else:
new_particle = particles[i].copy(updated_marker)
#new_particle = copy.deepcopy(particles[i])
#new_particle.set_weight(step)
new_particles.append(new_particle)
#increase the threshold
index += 1
del particles
return new_particles
class Particle():
#each particle has a pose(x,y,o), a weight(w) and a series of kalman filters for every landmark
#in the beggining all particles are in the origin frame of the world (0,0,0)
def __init__(self):
self.X_robot = np.array([0, 0, 0], dtype='float64').transpose()
self.weight = 1.0/NUMBER_PARTICLES
self.Landmarkers = [None]*NUMBER_MARKERS
self.x_path = np.array([0], dtype='float64')
self.y_path = np.array([0], dtype='float64')
def get_kalman_filters(self, marker_id, Z):
if self.Landmarkers[marker_id] == None:
self.Landmarkers[marker_id] = KalmanFilter()
return self.Landmarkers[marker_id]
def particle_prediction(self, motion_model):
#if the robot moves we just add the motion model to the previous pose to predict the particle position
x = 0
y = 1
o = 2
noise = np.array([np.random.normal(0,translation_noise), np.random.normal(0,translation_noise), np.random.normal(0,rotation_noise)], dtype='float64').transpose()
noise = noise*motion_model*noise_factor
self.X_robot = self.X_robot + motion_model + noise
while self.X_robot[o] > np.pi:
self.X_robot[o] = self.X_robot[o] - 2*np.pi
while self.X_robot[o] < -np.pi:
self.X_robot[o] = self.X_robot[o] + 2*np.pi
self.x_path = np.insert(self.x_path, 0, self.X_robot[x])
self.y_path = np.insert(self.y_path, 0, self.X_robot[y])
return self.X_robot
def update_weight(self, marker_id):
std = self.Landmarkers[marker_id].get_marker_covariance()
dev = self.Landmarkers[marker_id].get_marker_validity()
fact = np.sqrt(np.linalg.det(2* np.pi * std))
expo = - np.dot(dev.T, np.linalg.inv(std).dot(dev))/2
self.weight = self.weight / fact * np.exp(expo)
def get_weight(self):
return self.weight
def normalize_weight(self, total_weight):
self.weight = self.weight / total_weight
def set_weight(self, new_weight):
self.weight = new_weight
def get_position(self):
return self.X_robot
def get_landmarkers(self):
return self.Landmarkers
def get_path(self):
return self.x_path, self.y_path
def copy(self, updated_marker):
new_particle = Particle()
del new_particle.x_path
del new_particle.y_path
del new_particle.Landmarkers
del new_particle.X_robot
new_particle.x_path = np.copy(self.x_path)
new_particle.y_path = np.copy(self.y_path)
for i in range(len(self.Landmarkers)):
if self.Landmarkers[i] != None and updated_marker[i] == True:
self.Landmarkers[i] = self.Landmarkers[i].copy()
new_particle.Landmarkers = self.Landmarkers
new_particle.X_robot = np.copy(self.X_robot)
return new_particle
class KalmanFilter():
def __init__(self):
# X_ espected value of landmarks' position (x,y)
# X_robot (x, y, yaw)
# H gradient of markers' relative position to robot (h:(x_m,y_m) -> (distance, orientation); H = dh/dX_ and X_ = X(k+1|k))
# S covariance matrix markers' position
# Q covariance matrix markers' measurement
# V diference between measurement and estimated markers' position
self.first = True
self.R_t = np.identity(number_of_dimensions, dtype='float64')*Sensor_noise #sensor noise
self.P_t = np.identity(number_of_dimensions+1, dtype='float64')*Odom_noise #sensor noise
def compute_G(self, X_robot):
x = 0 # x position
y = 1 # y position
y = self.X_[y] - X_robot[y]
x = self.X_[x] - X_robot[x]
# compute H
denominator = x**2 + y**2
g_o11 = x / np.sqrt(denominator)
g_o12 = y / np.sqrt(denominator)
g_o21 = -y / denominator
g_o22 = x / denominator
self.G_o = np.array([[g_o11, g_o12], [g_o21, g_o22]])
g_s11 = -g_o11
g_s12 = -g_o12
g_s21 = -g_o21
g_s22 = -g_o22
self.G_s = np.array([[g_s21, g_s22, 0], [g_s21, g_s22, -1]])
def Apply_EKF(self, X_robot, Z):
x = 0 # x position
y = 1 # y position
o = 2 # o orientaio
d = 0 # distance measured
fi = 1 # orientaion of the measurement
if self.first == True:
# the angle is in the direction y to x, reverse of the usual x to y
angle = (X_robot[o] + Z[fi]) | self.S = np.linalg.inv(self.G_o.dot(np.linalg.inv(self.R_t).dot(self.G_o.T)))
self.V = np.array([0, 0], dtype='float64').transpose()
self.L_t = np.identity(number_of_dimensions, dtype='float64')
else:
# Prediction
y = self.X_[y] - X_robot[y]
x = self.X_[x] - X_robot[x]
d = np.sqrt(x**2 + y**2) # distance
fi = np.arctan2(y, x) - X_robot[o] # direction
while fi > np.pi:
fi = fi - 2*np.pi
while fi < -np.pi:
fi = fi + 2*np.pi
Z_ = np.array([d, fi], dtype='float64').transpose()
self.compute_G(X_robot)
self.Q = self.G_o.dot(self.S.dot(self.G_o.T)) + self.R_t
# Observation
self.V = np.subtract(Z, Z_) # Z = [d, teta]
def Update(self):
# Update
if self.first == False:
# K kalman gain
K = self.S.dot(self.G_o.T.dot(np.linalg.inv(self.Q)))
self.X_ = self.X_ + K.dot(self.V)
self.L_t = self.G_s.dot(self.P_t.dot(self.G_s.T)) + self.G_o.dot(self.S.dot(self.G_o.T)) + self.R_t
self.S = (np.identity(number_of_dimensions)- K.dot(self.G_o)).dot(self.S)
else:
self.first = False
def get_marker_position(self):
return self.X_
def get_marker_covariance(self):
return self.L_t
def get_marker_validity(self):
return self.V
def measurement_validition(self):
return np.dot(self.V.T, np.linalg.inv(self.L_t)).dot(self.V)
def copy(self):
new_KF = KalmanFilter()
new_KF.X_ = np.copy(self.X_)
new_KF.S = np.copy(self.S)
new_KF.L_t = np.copy(self.L_t)
new_KF.V = np.copy(self.V)
new_KF.first = False
return new_KF
class markers():
def __init__(self):
self.could_it_read = False
self.z_distance_left_eye_to_robot_wheel = camara_distance_z
self.x_distance_left_eye_to_robot_wheel = camara_distance_x
self.markers_info = [None]*NUMBER_MARKERS
self.list_ids = np.ones(NUMBER_MARKERS, dtype='int32')*KEY_NUMBER
def callback_Markers(self, data):
# static tf could be applied here: z = z + z_distance_left_eye_to_robot_wheel, x = x + x_distance_left_eye_to_robot_wheel
for i in range(NUMBER_MARKERS):
try:
marker_info = data.markers.pop()
except:
break
self.list_ids[i] = marker_info.id
self.markers_info[marker_info.id] = marker_info
def get_measerment(self, index):
x = self.markers_info[index].pose.pose.position.x # right-left
z = self.markers_info[index].pose.pose.position.z # front-back
# position of the marker relative to base_link
z = z + self.z_distance_left_eye_to_robot_wheel
x = x + self.x_distance_left_eye_to_robot_wheel
marker_distance = np.sqrt(z**2+x**2)
marker_direction = np.arctan(x/z)
return np.array([marker_distance, -marker_direction], dtype='float64').transpose()
def get_list_ids(self):
return self.list_ids
def reset_list_ids(self):
i = 0
while self.list_ids[i] != KEY_NUMBER:
self.list_ids[i] = KEY_NUMBER
i += 1
def marker_info(self, index):
return self.markers_info[index]
class odom():
def __init__(self):
self.read_move = np.array([0, 0, 0], dtype='float64').transpose()
self.first_read = True
def callback_odom(self, data):
# robo_frame
frame_id = data.header.frame_id # odom
child_frame_id = data.child_frame_id # base_link
# pose
x = data.pose.pose.position.x # front-back
y = data.pose.pose.position.y # right-left
orientation_x = data.pose.pose.orientation.x
orientation_y = data.pose.pose.orientation.y
orientation_z = data.pose.pose.orientation.z
orientation_w = data.pose.pose.orientation.w
roll, pitch, yaw = tf.transformations.euler_from_quaternion((orientation_x, orientation_y, orientation_z, orientation_w))
if self.first_read == True:
self.last_position = np.array([x, y, yaw], dtype='float64').transpose()
self.total_movement = np.array([0, 0, 0], dtype='float64').transpose()
self.first_read = False
self.odom_position = np.array([x, y, yaw], dtype='float64').transpose()
self.movement = np.subtract(self.odom_position, self.last_position)
self.total_movement = np.add(self.total_movement, np.absolute(self.movement))
if self.movement[2] > np.pi:
self.movement[2] = 2*np.pi - self.movement[2]
if self.movement[2] < -np.pi:
self.movement[2] = - 2*np.pi - self.movement[2]
self.last_position = self.odom_position
self.read_move = np.add(self.read_move, self.movement)
def actual_movement(self):
return self.read_move
def get_movement(self):
msg = self.read_move
self.read_move = np.array([0, 0, 0], dtype='float64').transpose()
return msg
def get_total_movement(self):
return self.total_movement
def FastSlam():
odom_measurement = odom()
particles = [Particle() for i in range(NUMBER_PARTICLES)]
updated_marker = [False]*NUMBER_MARKERS
marker_measurement = markers()
rospy.init_node('FastSlam', anonymous=True)
frequency = rospy.Rate(Frequency)
rospy.Subscriber("RosAria/pose", Odometry, odom_measurement.callback_odom)
#Subscriber of aruco publisher topic with arucos observations
rospy.Subscriber('aruco_marker_publisher/markers', MarkerArray, marker_measurement.callback_Markers)
while not rospy.is_shutdown():
start_time = time.time()
motion_model = odom_measurement.actual_movement()
landmarkers_ids = marker_measurement.get_list_ids()
if landmarkers_ids[0] != KEY_NUMBER and np.linalg.norm(motion_model) > minimum_move:
total_weight = 0
motion_model = odom_measurement.get_movement()
for i in range(NUMBER_PARTICLES):
expected_state = particles[i].particle_prediction(motion_model)
for marker_id in landmarkers_ids:
if marker_id == KEY_NUMBER:
break
updated_marker[marker_id] = True
landmarker_measurement = marker_measurement.get_measerment(marker_id)
kalman_filter = particles[i].get_kalman_filters(marker_id, landmarker_measurement)
kalman_filter.Apply_EKF(expected_state, landmarker_measurement)
validity_info = kalman_filter.measurement_validition()
particles[i].update_weight(marker_id)
##############################################################
if np.linalg.norm(validity_info) < validity_threshold:
##############################################################
kalman_filter.Update()
total_weight += particles[i].get_weight()
sum_weights = 0
for i in range(NUMBER_PARTICLES):
particles[i].normalize_weight(total_weight)
sum_weights += particles[i].get_weight()**2
drawing_plot(particles)
neff = 1.0/sum_weights
if neff < float(NUMBER_PARTICLES)/2:
particles = resample_particles(particles, updated_marker)
del updated_marker
updated_marker = [False]*NUMBER_MARKERS
marker_measurement.reset_list_ids()
elapsed_time = time.time() - start_time
if elapsed_time > 9*10**-2:
print elapsed_time
frequency.sleep()
if __name__ == '__main__':
FastSlam() |
self.X_ = np.array([X_robot[x] + Z[d]*np.cos(angle), X_robot[y] + Z[d]*np.sin(angle)], dtype='float64').transpose() # first landmark position
self.compute_G(X_robot) | random_line_split |
hellweg.py | # -*- coding: utf-8 -*-
u"""Hellweg execution template.
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcollections
from pykern import pkio
from pykern.pkdebug import pkdc, pkdp
from rslinac import solver
from sirepo import simulation_db
from sirepo.template import template_common, hellweg_dump_reader
import math
import numpy as np
import os.path
import py.path
import re
HELLWEG_DUMP_FILE = 'all-data.bin'
HELLWEG_SUMMARY_FILE = 'output.txt'
HELLWEG_INI_FILE = 'defaults.ini'
HELLWEG_INPUT_FILE = 'input.txt'
#: Simulation type
SIM_TYPE = 'hellweg'
WANT_BROWSER_FRAME_CACHE = True
# lattice element is required so make it very short and wide drift
_DEFAULT_DRIFT_ELEMENT = 'DRIFT 1e-16 1e+16 2' + "\n"
_HELLWEG_PARSED_FILE = 'PARSED.TXT'
_REPORT_STYLE_FIELDS = ['colorMap', 'notes']
_SCHEMA = simulation_db.get_schema(SIM_TYPE)
def background_percent_complete(report, run_dir, is_running):
if is_running:
return {
'percentComplete': 0,
'frameCount': 0,
}
dump_file = _dump_file(run_dir)
if os.path.exists(dump_file):
beam_header = hellweg_dump_reader.beam_header(dump_file)
last_update_time = int(os.path.getmtime(dump_file))
frame_count = beam_header.NPoints
return {
'lastUpdateTime': last_update_time,
'percentComplete': 100,
'frameCount': frame_count,
'summaryData': _summary_text(run_dir),
}
return {
'percentComplete': 100,
'frameCount': 0,
'error': _parse_error_message(run_dir)
}
def extract_beam_histrogram(report, run_dir, frame):
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
points = hellweg_dump_reader.get_points(beam_info, report.reportType)
hist, edges = np.histogram(points, template_common.histogram_bins(report.histogramBins))
return {
'title': _report_title(report.reportType, 'BeamHistogramReportType', beam_info),
'x_range': [edges[0], edges[-1]],
'y_label': 'Number of Particles',
'x_label': hellweg_dump_reader.get_label(report.reportType),
'points': hist.T.tolist(),
}
def extract_beam_report(report, run_dir, frame):
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
model = data.models.beamAnimation
model.update(report)
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
x, y = report.reportType.split('-')
values = [
hellweg_dump_reader.get_points(beam_info, x),
hellweg_dump_reader.get_points(beam_info, y),
]
model['x'] = x
model['y'] = y
return template_common.heatmap(values, model, {
'x_label': hellweg_dump_reader.get_label(x),
'y_label': hellweg_dump_reader.get_label(y),
'title': _report_title(report.reportType, 'BeamReportType', beam_info),
'z_label': 'Number of Particles',
'summaryData': _summary_text(run_dir),
})
def extract_parameter_report(report, run_dir):
s = solver.BeamSolver(
os.path.join(str(run_dir), HELLWEG_INI_FILE),
os.path.join(str(run_dir), HELLWEG_INPUT_FILE))
s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))
y1_var, y2_var = report.reportType.split('-')
x_field = 'z'
x = s.get_structure_parameters(_parameter_index(x_field))
y1 = s.get_structure_parameters(_parameter_index(y1_var))
y1_extent = [np.min(y1), np.max(y1)]
y2 = s.get_structure_parameters(_parameter_index(y2_var))
y2_extent = [np.min(y2), np.max(y2)]
return {
'title': _enum_text('ParameterReportType', report.reportType),
'x_range': [x[0], x[-1]],
'y_label': hellweg_dump_reader.get_parameter_label(y1_var),
'x_label': hellweg_dump_reader.get_parameter_label(x_field),
'x_points': x,
'points': [
y1,
y2,
],
'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1], y2_extent[1])],
'y1_title': hellweg_dump_reader.get_parameter_title(y1_var),
'y2_title': hellweg_dump_reader.get_parameter_title(y2_var),
}
def extract_particle_report(report, run_dir):
x_field = 'z0'
particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir), report.reportType, int(report.renderCount))
x = particle_info['z_values']
return {
'title': _enum_text('ParticleReportType', report.reportType),
'x_range': [np.min(x), np.max(x)],
'y_label': hellweg_dump_reader.get_label(report.reportType),
'x_label': hellweg_dump_reader.get_label(x_field),
'x_points': x,
'points': particle_info['y_values'],
'y_range': particle_info['y_range'],
}
def fixup_old_data(data):
for m in ('beamAnimation', 'beamHistogramAnimation', 'parameterAnimation', 'particleAnimation'):
if m not in data.models:
data.models[m] = pkcollections.Dict({})
template_common.update_model_defaults(data.models[m], m, _SCHEMA)
if 'solenoidFile' not in data['models']['solenoid']:
data['models']['solenoid']['solenoidFile'] = ''
if 'beamDefinition' not in data['models']['beam']:
beam = data['models']['beam']
beam['beamDefinition'] = 'transverse_longitude'
beam['cstCompress'] = '0'
beam['transversalFile2d'] = ''
beam['transversalFile4d'] = ''
beam['longitudinalFile1d'] = ''
beam['longitudinalFile2d'] = ''
beam['cstFile'] = ''
template_common.organize_example(data)
def get_animation_name(data):
return 'animation'
def get_application_data(data):
if data['method'] == 'compute_particle_ranges':
return template_common.compute_field_range(data, _compute_range_across_files)
assert False, 'unknown application data method: {}'.format(data['method'])
def lib_files(data, source_lib):
return template_common.filename_to_path(_simulation_files(data), source_lib)
def get_simulation_frame(run_dir, data, model_data):
frame_index = int(data['frameIndex'])
if data['modelName'] == 'beamAnimation':
args = template_common.parse_animation_args(
data,
{
'1': ['reportType', 'histogramBins', 'startTime'],
'': ['reportType', 'histogramBins', 'plotRangeType', 'horizontalSize', 'horizontalOffset', 'verticalSize', 'verticalOffset', 'isRunning', 'startTime'],
},
)
return extract_beam_report(args, run_dir, frame_index)
elif data['modelName'] == 'beamHistogramAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'histogramBins', 'startTime']},
)
return extract_beam_histrogram(args, run_dir, frame_index)
elif data['modelName'] == 'particleAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'renderCount', 'startTime']},
)
return extract_particle_report(args, run_dir)
elif data['modelName'] == 'parameterAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'startTime']},
)
return extract_parameter_report(args, run_dir)
raise RuntimeError('unknown animation model: {}'.format(data['modelName']))
def models_related_to_report(data):
"""What models are required for this data['report']
Args:
data (dict): simulation
Returns:
list: Named models, model fields or values (dict, list) that affect report
"""
r = data['report']
if r == 'animation':
return []
res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [
'beam',
'ellipticalDistribution',
'energyPhaseDistribution',
'solenoid',
'sphericalDistribution',
'twissDistribution',
]
for f in template_common.lib_files(data):
res.append(f.mtime())
return res
def python_source_for_model(data, model):
return '''
from rslinac import solver
{}
with open('input.txt', 'w') as f:
f.write(input_file)
with open('defaults.ini', 'w') as f:
f.write(ini_file)
s = solver.BeamSolver('defaults.ini', 'input.txt')
s.solve()
s.save_output('output.txt')
'''.format(_generate_parameters_file(data, is_parallel=len(data.models.beamline)))
def remove_last_frame(run_dir):
pass
def validate_delete_file(data, filename, file_type):
"""Returns True if the filename is in use by the simulation data."""
return filename in _simulation_files(data)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(
data,
run_dir,
is_parallel,
),
)
def _compute_range_across_files(run_dir, data):
res = {}
for v in _SCHEMA.enum.BeamReportType:
x, y = v[0].split('-')
res[x] = []
res[y] = []
dump_file = _dump_file(run_dir)
if not os.path.exists(dump_file):
return res
beam_header = hellweg_dump_reader.beam_header(dump_file)
for frame in xrange(beam_header.NPoints):
beam_info = hellweg_dump_reader.beam_info(dump_file, frame)
for field in res:
values = hellweg_dump_reader.get_points(beam_info, field)
if not len(values):
pass
elif len(res[field]):
res[field][0] = min(min(values), res[field][0])
res[field][1] = max(max(values), res[field][1])
else:
res[field] = [min(values), max(values)]
return res
def _dump_file(run_dir):
return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)
def _enum_text(enum_name, v):
enum_values = _SCHEMA['enum'][enum_name]
for e in enum_values:
if e[0] == v:
return e[1]
raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))
def | (models):
# BEAM SPH2D 0.564 -15 5 NORM2D 0.30 0.0000001 90 180
beam_def = models.beam.beamDefinition
if beam_def == 'transverse_longitude':
return 'BEAM {} {}'.format(_generate_transverse_dist(models), _generate_longitude_dist(models))
if beam_def == 'cst_pit':
return 'BEAM CST_PIT {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
'COMPRESS' if models.beam.cstCompress else '',
)
if beam_def == 'cst_pid':
return 'BEAM CST_PID {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
raise RuntimeError('invalid beam def: {}'.format(beam_def))
def _generate_cell_params(el):
#TODO(pjm): add an option field to select auto-calculate
if el.attenuation == 0 and el.aperture == 0:
return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant)
return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant, el.attenuation, el.aperture)
def _generate_charge(models):
if models.beam.spaceCharge == 'none':
return ''
return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.beam.spaceChargeCore)
def _generate_current(models):
return 'CURRENT {} {}'.format(models.beam.current, models.beam.numberOfParticles)
def _generate_energy_phase_distribution(dist):
return '{} {} {}'.format(
dist.meanPhase,
dist.phaseLength,
dist.phaseDeviation if dist.distributionType == 'gaussian' else '',
)
def _generate_lattice(models):
res = ''
for el in models.beamline:
if el.type == 'powerElement':
res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.phaseShift)
elif el.type == 'cellElement':
res += 'CELL {}'.format(_generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'cellsElement':
res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'driftElement':
res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)
has_cell_or_drift = True
elif el.type == 'saveElement':
#TODO(pjm): implement this
pass
else:
raise RuntimeError('unknown element type: {}'.format(el.type))
res += "\n"
return res
def _generate_longitude_dist(models):
dist_type = models.beam.longitudinalDistribution
if dist_type == 'norm2d':
dist = models.energyPhaseDistribution
if dist.distributionType == 'uniform':
return 'NORM2D {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.meanPhase, dist.phaseLength)
if dist.distributionType == 'gaussian':
return 'NORM2D {} {} {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.energyDeviation, dist.meanPhase, dist.phaseLength, dist.phaseDeviation)
raise RuntimeError('unknown longitudinal distribution type: {}'.format(models.longitudinalDistribution.distributionType))
if dist_type == 'file1d':
return 'FILE1D {} {}'.format(
template_common.lib_file_name('beam', 'longitudinalFile1d', models.beam.longitudinalFile1d),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
raise RuntimeError('unknown longitudinal distribution: {}'.format(models.beam.longitudinalDistribution))
def _generate_options(models):
if models.simulationSettings.allowBackwardWaves == '1':
return 'OPTIONS REVERSE'
return ''
def _generate_parameters_file(data, run_dir=None, is_parallel=False):
template_common.validate_models(data, _SCHEMA)
v = template_common.flatten_data(data['models'], {})
v['optionsCommand'] = _generate_options(data['models'])
v['solenoidCommand'] = _generate_solenoid(data['models'])
v['beamCommand'] = _generate_beam(data['models'])
v['currentCommand'] = _generate_current(data['models'])
v['chargeCommand'] = _generate_charge(data['models'])
if is_parallel:
v['latticeCommands'] = _generate_lattice(data['models'])
else:
v['latticeCommands'] = _DEFAULT_DRIFT_ELEMENT
return template_common.render_jinja(SIM_TYPE, v)
def _generate_solenoid(models):
solenoid = models.solenoid
if solenoid.sourceDefinition == 'none':
return ''
if solenoid.sourceDefinition == 'values':
#TODO(pjm): latest version also has solenoid.fringeRegion
return 'SOLENOID {} {} {}'.format(
solenoid.fieldStrength, solenoid.length, solenoid.z0)
if solenoid.sourceDefinition == 'file':
return 'SOLENOID {}'.format(
template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
raise RuntimeError('unknown solenoidDefinition: {}'.format(solenoid.sourceDefinition))
def _generate_transverse_dist(models):
dist_type = models.beam.transversalDistribution
if dist_type == 'twiss4d':
dist = models.twissDistribution
return 'TWISS4D {} {} {} {} {} {}'.format(
dist.horizontalAlpha, dist.horizontalBeta, dist.horizontalEmittance,
dist.verticalAlpha, dist.verticalBeta, dist.verticalEmittance)
if dist_type == 'sph2d':
dist = models.sphericalDistribution
if dist.curvature == 'flat':
dist.curvatureFactor = 0
return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.curvatureFactor, dist.thermalEmittance)
if dist_type == 'ell2d':
dist = models.ellipticalDistribution
return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.rotationAngle, dist.rmsDeviationFactor)
beam = models.beam
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
if dist_type == 'file4d':
return 'FILE4D {}'.format(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))
def _parameter_index(name):
return hellweg_dump_reader.parameter_index(name)
def _parse_error_message(run_dir):
path = os.path.join(str(run_dir), _HELLWEG_PARSED_FILE)
if not os.path.exists(path):
return 'No elements generated'
text = pkio.read_text(str(path))
for line in text.split("\n"):
match = re.search('^ERROR:\s(.*)$', line)
if match:
return match.group(1)
return 'No output generated'
def _report_title(report_type, enum_name, beam_info):
return '{}, z={:.4f} cm'.format(
_enum_text(enum_name, report_type),
100 * hellweg_dump_reader.get_parameter(beam_info, 'z'))
def _simulation_files(data):
res = []
solenoid = data.models.solenoid
if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:
res.append(template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
beam = data.models.beam
if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':
res.append(template_common.lib_file_name('beam', 'cstFile', beam.cstFile))
if beam.beamDefinition == 'transverse_longitude':
if beam.transversalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
elif beam.transversalDistribution == 'file4d':
res.append(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
if beam.longitudinalDistribution == 'file1d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile1d', beam.longitudinalFile1d))
if beam.longitudinalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile2d', beam.longitudinalFile2d))
return res
def _summary_text(run_dir):
return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))
| _generate_beam | identifier_name |
hellweg.py | # -*- coding: utf-8 -*-
u"""Hellweg execution template.
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcollections
from pykern import pkio
from pykern.pkdebug import pkdc, pkdp
from rslinac import solver
from sirepo import simulation_db
from sirepo.template import template_common, hellweg_dump_reader
import math
import numpy as np
import os.path
import py.path
import re
HELLWEG_DUMP_FILE = 'all-data.bin'
HELLWEG_SUMMARY_FILE = 'output.txt'
HELLWEG_INI_FILE = 'defaults.ini'
HELLWEG_INPUT_FILE = 'input.txt'
#: Simulation type
SIM_TYPE = 'hellweg'
WANT_BROWSER_FRAME_CACHE = True
# lattice element is required so make it very short and wide drift
_DEFAULT_DRIFT_ELEMENT = 'DRIFT 1e-16 1e+16 2' + "\n"
_HELLWEG_PARSED_FILE = 'PARSED.TXT'
_REPORT_STYLE_FIELDS = ['colorMap', 'notes']
_SCHEMA = simulation_db.get_schema(SIM_TYPE)
def background_percent_complete(report, run_dir, is_running):
if is_running:
return {
'percentComplete': 0,
'frameCount': 0,
}
dump_file = _dump_file(run_dir)
if os.path.exists(dump_file):
beam_header = hellweg_dump_reader.beam_header(dump_file)
last_update_time = int(os.path.getmtime(dump_file))
frame_count = beam_header.NPoints
return {
'lastUpdateTime': last_update_time,
'percentComplete': 100,
'frameCount': frame_count,
'summaryData': _summary_text(run_dir),
}
return {
'percentComplete': 100,
'frameCount': 0,
'error': _parse_error_message(run_dir)
}
def extract_beam_histrogram(report, run_dir, frame):
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
points = hellweg_dump_reader.get_points(beam_info, report.reportType)
hist, edges = np.histogram(points, template_common.histogram_bins(report.histogramBins))
return {
'title': _report_title(report.reportType, 'BeamHistogramReportType', beam_info),
'x_range': [edges[0], edges[-1]],
'y_label': 'Number of Particles',
'x_label': hellweg_dump_reader.get_label(report.reportType),
'points': hist.T.tolist(),
}
def extract_beam_report(report, run_dir, frame):
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
model = data.models.beamAnimation
model.update(report)
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
x, y = report.reportType.split('-')
values = [
hellweg_dump_reader.get_points(beam_info, x),
hellweg_dump_reader.get_points(beam_info, y),
]
model['x'] = x
model['y'] = y
return template_common.heatmap(values, model, {
'x_label': hellweg_dump_reader.get_label(x),
'y_label': hellweg_dump_reader.get_label(y),
'title': _report_title(report.reportType, 'BeamReportType', beam_info),
'z_label': 'Number of Particles',
'summaryData': _summary_text(run_dir),
})
def extract_parameter_report(report, run_dir):
s = solver.BeamSolver(
os.path.join(str(run_dir), HELLWEG_INI_FILE),
os.path.join(str(run_dir), HELLWEG_INPUT_FILE))
s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))
y1_var, y2_var = report.reportType.split('-')
x_field = 'z'
x = s.get_structure_parameters(_parameter_index(x_field))
y1 = s.get_structure_parameters(_parameter_index(y1_var))
y1_extent = [np.min(y1), np.max(y1)]
y2 = s.get_structure_parameters(_parameter_index(y2_var))
y2_extent = [np.min(y2), np.max(y2)]
return {
'title': _enum_text('ParameterReportType', report.reportType),
'x_range': [x[0], x[-1]],
'y_label': hellweg_dump_reader.get_parameter_label(y1_var),
'x_label': hellweg_dump_reader.get_parameter_label(x_field),
'x_points': x,
'points': [
y1,
y2,
],
'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1], y2_extent[1])],
'y1_title': hellweg_dump_reader.get_parameter_title(y1_var),
'y2_title': hellweg_dump_reader.get_parameter_title(y2_var),
}
def extract_particle_report(report, run_dir):
x_field = 'z0'
particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir), report.reportType, int(report.renderCount))
x = particle_info['z_values']
return {
'title': _enum_text('ParticleReportType', report.reportType),
'x_range': [np.min(x), np.max(x)],
'y_label': hellweg_dump_reader.get_label(report.reportType),
'x_label': hellweg_dump_reader.get_label(x_field),
'x_points': x,
'points': particle_info['y_values'],
'y_range': particle_info['y_range'],
}
def fixup_old_data(data):
for m in ('beamAnimation', 'beamHistogramAnimation', 'parameterAnimation', 'particleAnimation'):
if m not in data.models:
data.models[m] = pkcollections.Dict({})
template_common.update_model_defaults(data.models[m], m, _SCHEMA)
if 'solenoidFile' not in data['models']['solenoid']:
data['models']['solenoid']['solenoidFile'] = ''
if 'beamDefinition' not in data['models']['beam']:
beam = data['models']['beam']
beam['beamDefinition'] = 'transverse_longitude'
beam['cstCompress'] = '0'
beam['transversalFile2d'] = ''
beam['transversalFile4d'] = ''
beam['longitudinalFile1d'] = ''
beam['longitudinalFile2d'] = ''
beam['cstFile'] = ''
template_common.organize_example(data)
def get_animation_name(data):
return 'animation'
def get_application_data(data):
|
def lib_files(data, source_lib):
return template_common.filename_to_path(_simulation_files(data), source_lib)
def get_simulation_frame(run_dir, data, model_data):
frame_index = int(data['frameIndex'])
if data['modelName'] == 'beamAnimation':
args = template_common.parse_animation_args(
data,
{
'1': ['reportType', 'histogramBins', 'startTime'],
'': ['reportType', 'histogramBins', 'plotRangeType', 'horizontalSize', 'horizontalOffset', 'verticalSize', 'verticalOffset', 'isRunning', 'startTime'],
},
)
return extract_beam_report(args, run_dir, frame_index)
elif data['modelName'] == 'beamHistogramAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'histogramBins', 'startTime']},
)
return extract_beam_histrogram(args, run_dir, frame_index)
elif data['modelName'] == 'particleAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'renderCount', 'startTime']},
)
return extract_particle_report(args, run_dir)
elif data['modelName'] == 'parameterAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'startTime']},
)
return extract_parameter_report(args, run_dir)
raise RuntimeError('unknown animation model: {}'.format(data['modelName']))
def models_related_to_report(data):
"""What models are required for this data['report']
Args:
data (dict): simulation
Returns:
list: Named models, model fields or values (dict, list) that affect report
"""
r = data['report']
if r == 'animation':
return []
res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [
'beam',
'ellipticalDistribution',
'energyPhaseDistribution',
'solenoid',
'sphericalDistribution',
'twissDistribution',
]
for f in template_common.lib_files(data):
res.append(f.mtime())
return res
def python_source_for_model(data, model):
return '''
from rslinac import solver
{}
with open('input.txt', 'w') as f:
f.write(input_file)
with open('defaults.ini', 'w') as f:
f.write(ini_file)
s = solver.BeamSolver('defaults.ini', 'input.txt')
s.solve()
s.save_output('output.txt')
'''.format(_generate_parameters_file(data, is_parallel=len(data.models.beamline)))
def remove_last_frame(run_dir):
pass
def validate_delete_file(data, filename, file_type):
"""Returns True if the filename is in use by the simulation data."""
return filename in _simulation_files(data)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(
data,
run_dir,
is_parallel,
),
)
def _compute_range_across_files(run_dir, data):
res = {}
for v in _SCHEMA.enum.BeamReportType:
x, y = v[0].split('-')
res[x] = []
res[y] = []
dump_file = _dump_file(run_dir)
if not os.path.exists(dump_file):
return res
beam_header = hellweg_dump_reader.beam_header(dump_file)
for frame in xrange(beam_header.NPoints):
beam_info = hellweg_dump_reader.beam_info(dump_file, frame)
for field in res:
values = hellweg_dump_reader.get_points(beam_info, field)
if not len(values):
pass
elif len(res[field]):
res[field][0] = min(min(values), res[field][0])
res[field][1] = max(max(values), res[field][1])
else:
res[field] = [min(values), max(values)]
return res
def _dump_file(run_dir):
return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)
def _enum_text(enum_name, v):
enum_values = _SCHEMA['enum'][enum_name]
for e in enum_values:
if e[0] == v:
return e[1]
raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))
def _generate_beam(models):
# BEAM SPH2D 0.564 -15 5 NORM2D 0.30 0.0000001 90 180
beam_def = models.beam.beamDefinition
if beam_def == 'transverse_longitude':
return 'BEAM {} {}'.format(_generate_transverse_dist(models), _generate_longitude_dist(models))
if beam_def == 'cst_pit':
return 'BEAM CST_PIT {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
'COMPRESS' if models.beam.cstCompress else '',
)
if beam_def == 'cst_pid':
return 'BEAM CST_PID {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
raise RuntimeError('invalid beam def: {}'.format(beam_def))
def _generate_cell_params(el):
#TODO(pjm): add an option field to select auto-calculate
if el.attenuation == 0 and el.aperture == 0:
return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant)
return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant, el.attenuation, el.aperture)
def _generate_charge(models):
if models.beam.spaceCharge == 'none':
return ''
return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.beam.spaceChargeCore)
def _generate_current(models):
return 'CURRENT {} {}'.format(models.beam.current, models.beam.numberOfParticles)
def _generate_energy_phase_distribution(dist):
return '{} {} {}'.format(
dist.meanPhase,
dist.phaseLength,
dist.phaseDeviation if dist.distributionType == 'gaussian' else '',
)
def _generate_lattice(models):
res = ''
for el in models.beamline:
if el.type == 'powerElement':
res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.phaseShift)
elif el.type == 'cellElement':
res += 'CELL {}'.format(_generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'cellsElement':
res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'driftElement':
res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)
has_cell_or_drift = True
elif el.type == 'saveElement':
#TODO(pjm): implement this
pass
else:
raise RuntimeError('unknown element type: {}'.format(el.type))
res += "\n"
return res
def _generate_longitude_dist(models):
dist_type = models.beam.longitudinalDistribution
if dist_type == 'norm2d':
dist = models.energyPhaseDistribution
if dist.distributionType == 'uniform':
return 'NORM2D {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.meanPhase, dist.phaseLength)
if dist.distributionType == 'gaussian':
return 'NORM2D {} {} {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.energyDeviation, dist.meanPhase, dist.phaseLength, dist.phaseDeviation)
raise RuntimeError('unknown longitudinal distribution type: {}'.format(models.longitudinalDistribution.distributionType))
if dist_type == 'file1d':
return 'FILE1D {} {}'.format(
template_common.lib_file_name('beam', 'longitudinalFile1d', models.beam.longitudinalFile1d),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
raise RuntimeError('unknown longitudinal distribution: {}'.format(models.beam.longitudinalDistribution))
def _generate_options(models):
if models.simulationSettings.allowBackwardWaves == '1':
return 'OPTIONS REVERSE'
return ''
def _generate_parameters_file(data, run_dir=None, is_parallel=False):
template_common.validate_models(data, _SCHEMA)
v = template_common.flatten_data(data['models'], {})
v['optionsCommand'] = _generate_options(data['models'])
v['solenoidCommand'] = _generate_solenoid(data['models'])
v['beamCommand'] = _generate_beam(data['models'])
v['currentCommand'] = _generate_current(data['models'])
v['chargeCommand'] = _generate_charge(data['models'])
if is_parallel:
v['latticeCommands'] = _generate_lattice(data['models'])
else:
v['latticeCommands'] = _DEFAULT_DRIFT_ELEMENT
return template_common.render_jinja(SIM_TYPE, v)
def _generate_solenoid(models):
solenoid = models.solenoid
if solenoid.sourceDefinition == 'none':
return ''
if solenoid.sourceDefinition == 'values':
#TODO(pjm): latest version also has solenoid.fringeRegion
return 'SOLENOID {} {} {}'.format(
solenoid.fieldStrength, solenoid.length, solenoid.z0)
if solenoid.sourceDefinition == 'file':
return 'SOLENOID {}'.format(
template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
raise RuntimeError('unknown solenoidDefinition: {}'.format(solenoid.sourceDefinition))
def _generate_transverse_dist(models):
dist_type = models.beam.transversalDistribution
if dist_type == 'twiss4d':
dist = models.twissDistribution
return 'TWISS4D {} {} {} {} {} {}'.format(
dist.horizontalAlpha, dist.horizontalBeta, dist.horizontalEmittance,
dist.verticalAlpha, dist.verticalBeta, dist.verticalEmittance)
if dist_type == 'sph2d':
dist = models.sphericalDistribution
if dist.curvature == 'flat':
dist.curvatureFactor = 0
return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.curvatureFactor, dist.thermalEmittance)
if dist_type == 'ell2d':
dist = models.ellipticalDistribution
return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.rotationAngle, dist.rmsDeviationFactor)
beam = models.beam
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
if dist_type == 'file4d':
return 'FILE4D {}'.format(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))
def _parameter_index(name):
return hellweg_dump_reader.parameter_index(name)
def _parse_error_message(run_dir):
path = os.path.join(str(run_dir), _HELLWEG_PARSED_FILE)
if not os.path.exists(path):
return 'No elements generated'
text = pkio.read_text(str(path))
for line in text.split("\n"):
match = re.search('^ERROR:\s(.*)$', line)
if match:
return match.group(1)
return 'No output generated'
def _report_title(report_type, enum_name, beam_info):
return '{}, z={:.4f} cm'.format(
_enum_text(enum_name, report_type),
100 * hellweg_dump_reader.get_parameter(beam_info, 'z'))
def _simulation_files(data):
res = []
solenoid = data.models.solenoid
if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:
res.append(template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
beam = data.models.beam
if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':
res.append(template_common.lib_file_name('beam', 'cstFile', beam.cstFile))
if beam.beamDefinition == 'transverse_longitude':
if beam.transversalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
elif beam.transversalDistribution == 'file4d':
res.append(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
if beam.longitudinalDistribution == 'file1d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile1d', beam.longitudinalFile1d))
if beam.longitudinalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile2d', beam.longitudinalFile2d))
return res
def _summary_text(run_dir):
return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))
| if data['method'] == 'compute_particle_ranges':
return template_common.compute_field_range(data, _compute_range_across_files)
assert False, 'unknown application data method: {}'.format(data['method']) | identifier_body |
hellweg.py | # -*- coding: utf-8 -*-
u"""Hellweg execution template.
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcollections
from pykern import pkio
from pykern.pkdebug import pkdc, pkdp
from rslinac import solver
from sirepo import simulation_db
from sirepo.template import template_common, hellweg_dump_reader
import math
import numpy as np
import os.path
import py.path
import re
HELLWEG_DUMP_FILE = 'all-data.bin'
HELLWEG_SUMMARY_FILE = 'output.txt'
HELLWEG_INI_FILE = 'defaults.ini'
HELLWEG_INPUT_FILE = 'input.txt'
#: Simulation type
SIM_TYPE = 'hellweg'
WANT_BROWSER_FRAME_CACHE = True
# lattice element is required so make it very short and wide drift
_DEFAULT_DRIFT_ELEMENT = 'DRIFT 1e-16 1e+16 2' + "\n"
_HELLWEG_PARSED_FILE = 'PARSED.TXT'
_REPORT_STYLE_FIELDS = ['colorMap', 'notes']
_SCHEMA = simulation_db.get_schema(SIM_TYPE)
def background_percent_complete(report, run_dir, is_running):
if is_running:
return {
'percentComplete': 0,
'frameCount': 0,
}
dump_file = _dump_file(run_dir)
if os.path.exists(dump_file):
beam_header = hellweg_dump_reader.beam_header(dump_file)
last_update_time = int(os.path.getmtime(dump_file))
frame_count = beam_header.NPoints
return {
'lastUpdateTime': last_update_time,
'percentComplete': 100,
'frameCount': frame_count,
'summaryData': _summary_text(run_dir),
}
return {
'percentComplete': 100,
'frameCount': 0, | def extract_beam_histrogram(report, run_dir, frame):
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
points = hellweg_dump_reader.get_points(beam_info, report.reportType)
hist, edges = np.histogram(points, template_common.histogram_bins(report.histogramBins))
return {
'title': _report_title(report.reportType, 'BeamHistogramReportType', beam_info),
'x_range': [edges[0], edges[-1]],
'y_label': 'Number of Particles',
'x_label': hellweg_dump_reader.get_label(report.reportType),
'points': hist.T.tolist(),
}
def extract_beam_report(report, run_dir, frame):
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
model = data.models.beamAnimation
model.update(report)
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
x, y = report.reportType.split('-')
values = [
hellweg_dump_reader.get_points(beam_info, x),
hellweg_dump_reader.get_points(beam_info, y),
]
model['x'] = x
model['y'] = y
return template_common.heatmap(values, model, {
'x_label': hellweg_dump_reader.get_label(x),
'y_label': hellweg_dump_reader.get_label(y),
'title': _report_title(report.reportType, 'BeamReportType', beam_info),
'z_label': 'Number of Particles',
'summaryData': _summary_text(run_dir),
})
def extract_parameter_report(report, run_dir):
s = solver.BeamSolver(
os.path.join(str(run_dir), HELLWEG_INI_FILE),
os.path.join(str(run_dir), HELLWEG_INPUT_FILE))
s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))
y1_var, y2_var = report.reportType.split('-')
x_field = 'z'
x = s.get_structure_parameters(_parameter_index(x_field))
y1 = s.get_structure_parameters(_parameter_index(y1_var))
y1_extent = [np.min(y1), np.max(y1)]
y2 = s.get_structure_parameters(_parameter_index(y2_var))
y2_extent = [np.min(y2), np.max(y2)]
return {
'title': _enum_text('ParameterReportType', report.reportType),
'x_range': [x[0], x[-1]],
'y_label': hellweg_dump_reader.get_parameter_label(y1_var),
'x_label': hellweg_dump_reader.get_parameter_label(x_field),
'x_points': x,
'points': [
y1,
y2,
],
'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1], y2_extent[1])],
'y1_title': hellweg_dump_reader.get_parameter_title(y1_var),
'y2_title': hellweg_dump_reader.get_parameter_title(y2_var),
}
def extract_particle_report(report, run_dir):
x_field = 'z0'
particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir), report.reportType, int(report.renderCount))
x = particle_info['z_values']
return {
'title': _enum_text('ParticleReportType', report.reportType),
'x_range': [np.min(x), np.max(x)],
'y_label': hellweg_dump_reader.get_label(report.reportType),
'x_label': hellweg_dump_reader.get_label(x_field),
'x_points': x,
'points': particle_info['y_values'],
'y_range': particle_info['y_range'],
}
def fixup_old_data(data):
for m in ('beamAnimation', 'beamHistogramAnimation', 'parameterAnimation', 'particleAnimation'):
if m not in data.models:
data.models[m] = pkcollections.Dict({})
template_common.update_model_defaults(data.models[m], m, _SCHEMA)
if 'solenoidFile' not in data['models']['solenoid']:
data['models']['solenoid']['solenoidFile'] = ''
if 'beamDefinition' not in data['models']['beam']:
beam = data['models']['beam']
beam['beamDefinition'] = 'transverse_longitude'
beam['cstCompress'] = '0'
beam['transversalFile2d'] = ''
beam['transversalFile4d'] = ''
beam['longitudinalFile1d'] = ''
beam['longitudinalFile2d'] = ''
beam['cstFile'] = ''
template_common.organize_example(data)
def get_animation_name(data):
return 'animation'
def get_application_data(data):
if data['method'] == 'compute_particle_ranges':
return template_common.compute_field_range(data, _compute_range_across_files)
assert False, 'unknown application data method: {}'.format(data['method'])
def lib_files(data, source_lib):
return template_common.filename_to_path(_simulation_files(data), source_lib)
def get_simulation_frame(run_dir, data, model_data):
frame_index = int(data['frameIndex'])
if data['modelName'] == 'beamAnimation':
args = template_common.parse_animation_args(
data,
{
'1': ['reportType', 'histogramBins', 'startTime'],
'': ['reportType', 'histogramBins', 'plotRangeType', 'horizontalSize', 'horizontalOffset', 'verticalSize', 'verticalOffset', 'isRunning', 'startTime'],
},
)
return extract_beam_report(args, run_dir, frame_index)
elif data['modelName'] == 'beamHistogramAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'histogramBins', 'startTime']},
)
return extract_beam_histrogram(args, run_dir, frame_index)
elif data['modelName'] == 'particleAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'renderCount', 'startTime']},
)
return extract_particle_report(args, run_dir)
elif data['modelName'] == 'parameterAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'startTime']},
)
return extract_parameter_report(args, run_dir)
raise RuntimeError('unknown animation model: {}'.format(data['modelName']))
def models_related_to_report(data):
"""What models are required for this data['report']
Args:
data (dict): simulation
Returns:
list: Named models, model fields or values (dict, list) that affect report
"""
r = data['report']
if r == 'animation':
return []
res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [
'beam',
'ellipticalDistribution',
'energyPhaseDistribution',
'solenoid',
'sphericalDistribution',
'twissDistribution',
]
for f in template_common.lib_files(data):
res.append(f.mtime())
return res
def python_source_for_model(data, model):
return '''
from rslinac import solver
{}
with open('input.txt', 'w') as f:
f.write(input_file)
with open('defaults.ini', 'w') as f:
f.write(ini_file)
s = solver.BeamSolver('defaults.ini', 'input.txt')
s.solve()
s.save_output('output.txt')
'''.format(_generate_parameters_file(data, is_parallel=len(data.models.beamline)))
def remove_last_frame(run_dir):
pass
def validate_delete_file(data, filename, file_type):
"""Returns True if the filename is in use by the simulation data."""
return filename in _simulation_files(data)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(
data,
run_dir,
is_parallel,
),
)
def _compute_range_across_files(run_dir, data):
res = {}
for v in _SCHEMA.enum.BeamReportType:
x, y = v[0].split('-')
res[x] = []
res[y] = []
dump_file = _dump_file(run_dir)
if not os.path.exists(dump_file):
return res
beam_header = hellweg_dump_reader.beam_header(dump_file)
for frame in xrange(beam_header.NPoints):
beam_info = hellweg_dump_reader.beam_info(dump_file, frame)
for field in res:
values = hellweg_dump_reader.get_points(beam_info, field)
if not len(values):
pass
elif len(res[field]):
res[field][0] = min(min(values), res[field][0])
res[field][1] = max(max(values), res[field][1])
else:
res[field] = [min(values), max(values)]
return res
def _dump_file(run_dir):
return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)
def _enum_text(enum_name, v):
enum_values = _SCHEMA['enum'][enum_name]
for e in enum_values:
if e[0] == v:
return e[1]
raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))
def _generate_beam(models):
# BEAM SPH2D 0.564 -15 5 NORM2D 0.30 0.0000001 90 180
beam_def = models.beam.beamDefinition
if beam_def == 'transverse_longitude':
return 'BEAM {} {}'.format(_generate_transverse_dist(models), _generate_longitude_dist(models))
if beam_def == 'cst_pit':
return 'BEAM CST_PIT {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
'COMPRESS' if models.beam.cstCompress else '',
)
if beam_def == 'cst_pid':
return 'BEAM CST_PID {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
raise RuntimeError('invalid beam def: {}'.format(beam_def))
def _generate_cell_params(el):
#TODO(pjm): add an option field to select auto-calculate
if el.attenuation == 0 and el.aperture == 0:
return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant)
return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant, el.attenuation, el.aperture)
def _generate_charge(models):
if models.beam.spaceCharge == 'none':
return ''
return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.beam.spaceChargeCore)
def _generate_current(models):
return 'CURRENT {} {}'.format(models.beam.current, models.beam.numberOfParticles)
def _generate_energy_phase_distribution(dist):
return '{} {} {}'.format(
dist.meanPhase,
dist.phaseLength,
dist.phaseDeviation if dist.distributionType == 'gaussian' else '',
)
def _generate_lattice(models):
res = ''
for el in models.beamline:
if el.type == 'powerElement':
res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.phaseShift)
elif el.type == 'cellElement':
res += 'CELL {}'.format(_generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'cellsElement':
res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'driftElement':
res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)
has_cell_or_drift = True
elif el.type == 'saveElement':
#TODO(pjm): implement this
pass
else:
raise RuntimeError('unknown element type: {}'.format(el.type))
res += "\n"
return res
def _generate_longitude_dist(models):
dist_type = models.beam.longitudinalDistribution
if dist_type == 'norm2d':
dist = models.energyPhaseDistribution
if dist.distributionType == 'uniform':
return 'NORM2D {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.meanPhase, dist.phaseLength)
if dist.distributionType == 'gaussian':
return 'NORM2D {} {} {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.energyDeviation, dist.meanPhase, dist.phaseLength, dist.phaseDeviation)
raise RuntimeError('unknown longitudinal distribution type: {}'.format(models.longitudinalDistribution.distributionType))
if dist_type == 'file1d':
return 'FILE1D {} {}'.format(
template_common.lib_file_name('beam', 'longitudinalFile1d', models.beam.longitudinalFile1d),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
raise RuntimeError('unknown longitudinal distribution: {}'.format(models.beam.longitudinalDistribution))
def _generate_options(models):
if models.simulationSettings.allowBackwardWaves == '1':
return 'OPTIONS REVERSE'
return ''
def _generate_parameters_file(data, run_dir=None, is_parallel=False):
template_common.validate_models(data, _SCHEMA)
v = template_common.flatten_data(data['models'], {})
v['optionsCommand'] = _generate_options(data['models'])
v['solenoidCommand'] = _generate_solenoid(data['models'])
v['beamCommand'] = _generate_beam(data['models'])
v['currentCommand'] = _generate_current(data['models'])
v['chargeCommand'] = _generate_charge(data['models'])
if is_parallel:
v['latticeCommands'] = _generate_lattice(data['models'])
else:
v['latticeCommands'] = _DEFAULT_DRIFT_ELEMENT
return template_common.render_jinja(SIM_TYPE, v)
def _generate_solenoid(models):
solenoid = models.solenoid
if solenoid.sourceDefinition == 'none':
return ''
if solenoid.sourceDefinition == 'values':
#TODO(pjm): latest version also has solenoid.fringeRegion
return 'SOLENOID {} {} {}'.format(
solenoid.fieldStrength, solenoid.length, solenoid.z0)
if solenoid.sourceDefinition == 'file':
return 'SOLENOID {}'.format(
template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
raise RuntimeError('unknown solenoidDefinition: {}'.format(solenoid.sourceDefinition))
def _generate_transverse_dist(models):
dist_type = models.beam.transversalDistribution
if dist_type == 'twiss4d':
dist = models.twissDistribution
return 'TWISS4D {} {} {} {} {} {}'.format(
dist.horizontalAlpha, dist.horizontalBeta, dist.horizontalEmittance,
dist.verticalAlpha, dist.verticalBeta, dist.verticalEmittance)
if dist_type == 'sph2d':
dist = models.sphericalDistribution
if dist.curvature == 'flat':
dist.curvatureFactor = 0
return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.curvatureFactor, dist.thermalEmittance)
if dist_type == 'ell2d':
dist = models.ellipticalDistribution
return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.rotationAngle, dist.rmsDeviationFactor)
beam = models.beam
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
if dist_type == 'file4d':
return 'FILE4D {}'.format(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))
def _parameter_index(name):
return hellweg_dump_reader.parameter_index(name)
def _parse_error_message(run_dir):
path = os.path.join(str(run_dir), _HELLWEG_PARSED_FILE)
if not os.path.exists(path):
return 'No elements generated'
text = pkio.read_text(str(path))
for line in text.split("\n"):
match = re.search('^ERROR:\s(.*)$', line)
if match:
return match.group(1)
return 'No output generated'
def _report_title(report_type, enum_name, beam_info):
return '{}, z={:.4f} cm'.format(
_enum_text(enum_name, report_type),
100 * hellweg_dump_reader.get_parameter(beam_info, 'z'))
def _simulation_files(data):
res = []
solenoid = data.models.solenoid
if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:
res.append(template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
beam = data.models.beam
if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':
res.append(template_common.lib_file_name('beam', 'cstFile', beam.cstFile))
if beam.beamDefinition == 'transverse_longitude':
if beam.transversalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
elif beam.transversalDistribution == 'file4d':
res.append(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
if beam.longitudinalDistribution == 'file1d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile1d', beam.longitudinalFile1d))
if beam.longitudinalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile2d', beam.longitudinalFile2d))
return res
def _summary_text(run_dir):
return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE)) | 'error': _parse_error_message(run_dir)
}
| random_line_split |
hellweg.py | # -*- coding: utf-8 -*-
u"""Hellweg execution template.
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcollections
from pykern import pkio
from pykern.pkdebug import pkdc, pkdp
from rslinac import solver
from sirepo import simulation_db
from sirepo.template import template_common, hellweg_dump_reader
import math
import numpy as np
import os.path
import py.path
import re
HELLWEG_DUMP_FILE = 'all-data.bin'
HELLWEG_SUMMARY_FILE = 'output.txt'
HELLWEG_INI_FILE = 'defaults.ini'
HELLWEG_INPUT_FILE = 'input.txt'
#: Simulation type
SIM_TYPE = 'hellweg'
WANT_BROWSER_FRAME_CACHE = True
# lattice element is required so make it very short and wide drift
_DEFAULT_DRIFT_ELEMENT = 'DRIFT 1e-16 1e+16 2' + "\n"
_HELLWEG_PARSED_FILE = 'PARSED.TXT'
_REPORT_STYLE_FIELDS = ['colorMap', 'notes']
_SCHEMA = simulation_db.get_schema(SIM_TYPE)
def background_percent_complete(report, run_dir, is_running):
if is_running:
return {
'percentComplete': 0,
'frameCount': 0,
}
dump_file = _dump_file(run_dir)
if os.path.exists(dump_file):
beam_header = hellweg_dump_reader.beam_header(dump_file)
last_update_time = int(os.path.getmtime(dump_file))
frame_count = beam_header.NPoints
return {
'lastUpdateTime': last_update_time,
'percentComplete': 100,
'frameCount': frame_count,
'summaryData': _summary_text(run_dir),
}
return {
'percentComplete': 100,
'frameCount': 0,
'error': _parse_error_message(run_dir)
}
def extract_beam_histrogram(report, run_dir, frame):
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
points = hellweg_dump_reader.get_points(beam_info, report.reportType)
hist, edges = np.histogram(points, template_common.histogram_bins(report.histogramBins))
return {
'title': _report_title(report.reportType, 'BeamHistogramReportType', beam_info),
'x_range': [edges[0], edges[-1]],
'y_label': 'Number of Particles',
'x_label': hellweg_dump_reader.get_label(report.reportType),
'points': hist.T.tolist(),
}
def extract_beam_report(report, run_dir, frame):
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
model = data.models.beamAnimation
model.update(report)
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
x, y = report.reportType.split('-')
values = [
hellweg_dump_reader.get_points(beam_info, x),
hellweg_dump_reader.get_points(beam_info, y),
]
model['x'] = x
model['y'] = y
return template_common.heatmap(values, model, {
'x_label': hellweg_dump_reader.get_label(x),
'y_label': hellweg_dump_reader.get_label(y),
'title': _report_title(report.reportType, 'BeamReportType', beam_info),
'z_label': 'Number of Particles',
'summaryData': _summary_text(run_dir),
})
def extract_parameter_report(report, run_dir):
s = solver.BeamSolver(
os.path.join(str(run_dir), HELLWEG_INI_FILE),
os.path.join(str(run_dir), HELLWEG_INPUT_FILE))
s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))
y1_var, y2_var = report.reportType.split('-')
x_field = 'z'
x = s.get_structure_parameters(_parameter_index(x_field))
y1 = s.get_structure_parameters(_parameter_index(y1_var))
y1_extent = [np.min(y1), np.max(y1)]
y2 = s.get_structure_parameters(_parameter_index(y2_var))
y2_extent = [np.min(y2), np.max(y2)]
return {
'title': _enum_text('ParameterReportType', report.reportType),
'x_range': [x[0], x[-1]],
'y_label': hellweg_dump_reader.get_parameter_label(y1_var),
'x_label': hellweg_dump_reader.get_parameter_label(x_field),
'x_points': x,
'points': [
y1,
y2,
],
'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1], y2_extent[1])],
'y1_title': hellweg_dump_reader.get_parameter_title(y1_var),
'y2_title': hellweg_dump_reader.get_parameter_title(y2_var),
}
def extract_particle_report(report, run_dir):
x_field = 'z0'
particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir), report.reportType, int(report.renderCount))
x = particle_info['z_values']
return {
'title': _enum_text('ParticleReportType', report.reportType),
'x_range': [np.min(x), np.max(x)],
'y_label': hellweg_dump_reader.get_label(report.reportType),
'x_label': hellweg_dump_reader.get_label(x_field),
'x_points': x,
'points': particle_info['y_values'],
'y_range': particle_info['y_range'],
}
def fixup_old_data(data):
for m in ('beamAnimation', 'beamHistogramAnimation', 'parameterAnimation', 'particleAnimation'):
if m not in data.models:
data.models[m] = pkcollections.Dict({})
template_common.update_model_defaults(data.models[m], m, _SCHEMA)
if 'solenoidFile' not in data['models']['solenoid']:
data['models']['solenoid']['solenoidFile'] = ''
if 'beamDefinition' not in data['models']['beam']:
beam = data['models']['beam']
beam['beamDefinition'] = 'transverse_longitude'
beam['cstCompress'] = '0'
beam['transversalFile2d'] = ''
beam['transversalFile4d'] = ''
beam['longitudinalFile1d'] = ''
beam['longitudinalFile2d'] = ''
beam['cstFile'] = ''
template_common.organize_example(data)
def get_animation_name(data):
return 'animation'
def get_application_data(data):
if data['method'] == 'compute_particle_ranges':
return template_common.compute_field_range(data, _compute_range_across_files)
assert False, 'unknown application data method: {}'.format(data['method'])
def lib_files(data, source_lib):
return template_common.filename_to_path(_simulation_files(data), source_lib)
def get_simulation_frame(run_dir, data, model_data):
frame_index = int(data['frameIndex'])
if data['modelName'] == 'beamAnimation':
args = template_common.parse_animation_args(
data,
{
'1': ['reportType', 'histogramBins', 'startTime'],
'': ['reportType', 'histogramBins', 'plotRangeType', 'horizontalSize', 'horizontalOffset', 'verticalSize', 'verticalOffset', 'isRunning', 'startTime'],
},
)
return extract_beam_report(args, run_dir, frame_index)
elif data['modelName'] == 'beamHistogramAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'histogramBins', 'startTime']},
)
return extract_beam_histrogram(args, run_dir, frame_index)
elif data['modelName'] == 'particleAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'renderCount', 'startTime']},
)
return extract_particle_report(args, run_dir)
elif data['modelName'] == 'parameterAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'startTime']},
)
return extract_parameter_report(args, run_dir)
raise RuntimeError('unknown animation model: {}'.format(data['modelName']))
def models_related_to_report(data):
"""What models are required for this data['report']
Args:
data (dict): simulation
Returns:
list: Named models, model fields or values (dict, list) that affect report
"""
r = data['report']
if r == 'animation':
return []
res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [
'beam',
'ellipticalDistribution',
'energyPhaseDistribution',
'solenoid',
'sphericalDistribution',
'twissDistribution',
]
for f in template_common.lib_files(data):
res.append(f.mtime())
return res
def python_source_for_model(data, model):
return '''
from rslinac import solver
{}
with open('input.txt', 'w') as f:
f.write(input_file)
with open('defaults.ini', 'w') as f:
f.write(ini_file)
s = solver.BeamSolver('defaults.ini', 'input.txt')
s.solve()
s.save_output('output.txt')
'''.format(_generate_parameters_file(data, is_parallel=len(data.models.beamline)))
def remove_last_frame(run_dir):
pass
def validate_delete_file(data, filename, file_type):
"""Returns True if the filename is in use by the simulation data."""
return filename in _simulation_files(data)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(
data,
run_dir,
is_parallel,
),
)
def _compute_range_across_files(run_dir, data):
res = {}
for v in _SCHEMA.enum.BeamReportType:
x, y = v[0].split('-')
res[x] = []
res[y] = []
dump_file = _dump_file(run_dir)
if not os.path.exists(dump_file):
return res
beam_header = hellweg_dump_reader.beam_header(dump_file)
for frame in xrange(beam_header.NPoints):
beam_info = hellweg_dump_reader.beam_info(dump_file, frame)
for field in res:
values = hellweg_dump_reader.get_points(beam_info, field)
if not len(values):
pass
elif len(res[field]):
res[field][0] = min(min(values), res[field][0])
res[field][1] = max(max(values), res[field][1])
else:
res[field] = [min(values), max(values)]
return res
def _dump_file(run_dir):
return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)
def _enum_text(enum_name, v):
enum_values = _SCHEMA['enum'][enum_name]
for e in enum_values:
if e[0] == v:
return e[1]
raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))
def _generate_beam(models):
# BEAM SPH2D 0.564 -15 5 NORM2D 0.30 0.0000001 90 180
beam_def = models.beam.beamDefinition
if beam_def == 'transverse_longitude':
return 'BEAM {} {}'.format(_generate_transverse_dist(models), _generate_longitude_dist(models))
if beam_def == 'cst_pit':
return 'BEAM CST_PIT {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
'COMPRESS' if models.beam.cstCompress else '',
)
if beam_def == 'cst_pid':
return 'BEAM CST_PID {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
raise RuntimeError('invalid beam def: {}'.format(beam_def))
def _generate_cell_params(el):
#TODO(pjm): add an option field to select auto-calculate
if el.attenuation == 0 and el.aperture == 0:
return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant)
return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant, el.attenuation, el.aperture)
def _generate_charge(models):
if models.beam.spaceCharge == 'none':
return ''
return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.beam.spaceChargeCore)
def _generate_current(models):
return 'CURRENT {} {}'.format(models.beam.current, models.beam.numberOfParticles)
def _generate_energy_phase_distribution(dist):
return '{} {} {}'.format(
dist.meanPhase,
dist.phaseLength,
dist.phaseDeviation if dist.distributionType == 'gaussian' else '',
)
def _generate_lattice(models):
res = ''
for el in models.beamline:
if el.type == 'powerElement':
res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.phaseShift)
elif el.type == 'cellElement':
res += 'CELL {}'.format(_generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'cellsElement':
res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'driftElement':
res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)
has_cell_or_drift = True
elif el.type == 'saveElement':
#TODO(pjm): implement this
pass
else:
raise RuntimeError('unknown element type: {}'.format(el.type))
res += "\n"
return res
def _generate_longitude_dist(models):
dist_type = models.beam.longitudinalDistribution
if dist_type == 'norm2d':
dist = models.energyPhaseDistribution
if dist.distributionType == 'uniform':
return 'NORM2D {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.meanPhase, dist.phaseLength)
if dist.distributionType == 'gaussian':
return 'NORM2D {} {} {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.energyDeviation, dist.meanPhase, dist.phaseLength, dist.phaseDeviation)
raise RuntimeError('unknown longitudinal distribution type: {}'.format(models.longitudinalDistribution.distributionType))
if dist_type == 'file1d':
return 'FILE1D {} {}'.format(
template_common.lib_file_name('beam', 'longitudinalFile1d', models.beam.longitudinalFile1d),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
raise RuntimeError('unknown longitudinal distribution: {}'.format(models.beam.longitudinalDistribution))
def _generate_options(models):
if models.simulationSettings.allowBackwardWaves == '1':
return 'OPTIONS REVERSE'
return ''
def _generate_parameters_file(data, run_dir=None, is_parallel=False):
template_common.validate_models(data, _SCHEMA)
v = template_common.flatten_data(data['models'], {})
v['optionsCommand'] = _generate_options(data['models'])
v['solenoidCommand'] = _generate_solenoid(data['models'])
v['beamCommand'] = _generate_beam(data['models'])
v['currentCommand'] = _generate_current(data['models'])
v['chargeCommand'] = _generate_charge(data['models'])
if is_parallel:
|
else:
v['latticeCommands'] = _DEFAULT_DRIFT_ELEMENT
return template_common.render_jinja(SIM_TYPE, v)
def _generate_solenoid(models):
solenoid = models.solenoid
if solenoid.sourceDefinition == 'none':
return ''
if solenoid.sourceDefinition == 'values':
#TODO(pjm): latest version also has solenoid.fringeRegion
return 'SOLENOID {} {} {}'.format(
solenoid.fieldStrength, solenoid.length, solenoid.z0)
if solenoid.sourceDefinition == 'file':
return 'SOLENOID {}'.format(
template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
raise RuntimeError('unknown solenoidDefinition: {}'.format(solenoid.sourceDefinition))
def _generate_transverse_dist(models):
dist_type = models.beam.transversalDistribution
if dist_type == 'twiss4d':
dist = models.twissDistribution
return 'TWISS4D {} {} {} {} {} {}'.format(
dist.horizontalAlpha, dist.horizontalBeta, dist.horizontalEmittance,
dist.verticalAlpha, dist.verticalBeta, dist.verticalEmittance)
if dist_type == 'sph2d':
dist = models.sphericalDistribution
if dist.curvature == 'flat':
dist.curvatureFactor = 0
return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.curvatureFactor, dist.thermalEmittance)
if dist_type == 'ell2d':
dist = models.ellipticalDistribution
return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.rotationAngle, dist.rmsDeviationFactor)
beam = models.beam
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
if dist_type == 'file4d':
return 'FILE4D {}'.format(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))
def _parameter_index(name):
return hellweg_dump_reader.parameter_index(name)
def _parse_error_message(run_dir):
path = os.path.join(str(run_dir), _HELLWEG_PARSED_FILE)
if not os.path.exists(path):
return 'No elements generated'
text = pkio.read_text(str(path))
for line in text.split("\n"):
match = re.search('^ERROR:\s(.*)$', line)
if match:
return match.group(1)
return 'No output generated'
def _report_title(report_type, enum_name, beam_info):
return '{}, z={:.4f} cm'.format(
_enum_text(enum_name, report_type),
100 * hellweg_dump_reader.get_parameter(beam_info, 'z'))
def _simulation_files(data):
res = []
solenoid = data.models.solenoid
if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:
res.append(template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
beam = data.models.beam
if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':
res.append(template_common.lib_file_name('beam', 'cstFile', beam.cstFile))
if beam.beamDefinition == 'transverse_longitude':
if beam.transversalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
elif beam.transversalDistribution == 'file4d':
res.append(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
if beam.longitudinalDistribution == 'file1d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile1d', beam.longitudinalFile1d))
if beam.longitudinalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile2d', beam.longitudinalFile2d))
return res
def _summary_text(run_dir):
return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))
| v['latticeCommands'] = _generate_lattice(data['models']) | conditional_block |
lib.rs | //! # python-config-rs
//!
//! Just like the `python3-config` script that's installed
//! with your Python distribution, `python-config-rs` helps you
//! find information about your Python distribution.
//!
//! ```no_run
//! use python_config::PythonConfig;
//!
//! let cfg = PythonConfig::new(); // Python 3
//!
//! // Print include directories
//! println!("Includes: {}", cfg.includes().unwrap());
//! // Print installation prefix
//! println!("Installation prefix: {}", cfg.prefix().unwrap());
//! ```
//!
//! `python-config` may be most useful in your `build.rs`
//! script, or in any application where you need to find
//!
//! - the location of Python libraries
//! - the include directory for Python headers
//! - any of the things available via `python-config`
//!
//! Essentially, this is a reimplementation of the
//! `python3-config` script with a Rust interface. We work
//! directly with your Python interpreter, just in case
//! a `python-config` script is not on your system.
//!
//! We provide a new binary, `python3-config`, in case (for whatever
//! reason) you'd like to use this version of `python3-config`
//! instead of the distribution's script. We have tests that
//! show our script takes the exact same inputs and returns
//! the exact same outputs. Note that the tests only work if
//! you have a Python 3 distribution that includes a
//! `python3-config` script.
//!
//! ## 3 > 2
//!
//! We make the choice for you: by default, we favor Python 3
//! over Python 2. If you need Python 2 support, use the more
//! explicit interface to create the corresponding `PythonConfig`
//! handle. Note that, while the Python 2 interface should work,
//! it's gone through significantly less testing.
//!
//! The `python3-config` binary in this crate is Python 3 only.
mod cmdr;
#[macro_use]
mod script;
use cmdr::SysCommand;
use semver;
use std::io;
use std::path::{self, PathBuf};
/// Selectable Python version
#[derive(PartialEq, Eq, Debug)]
pub enum Version {
/// Python 3
Three,
/// Python 2
Two,
}
/// Describes a few possible errors from the `PythonConfig` interface
#[derive(Debug)]
pub enum Error {
/// An I/O error occured while interfacing the interpreter
IO(io::Error),
/// This function is for Python 3 only
///
/// This will be the return error for methods returning
/// a [`Py3Only<T>`](type.Py3Only.html) type.
Python3Only,
/// Other, one-off errors, with reasoning provided as a string
Other(&'static str),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::IO(err)
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> Self {
match err {
Error::IO(err) => err,
Error::Python3Only => io::Error::new(
io::ErrorKind::Other,
"this function is only available for Python 3",
),
Error::Other(why) => io::Error::new(io::ErrorKind::Other, why),
}
}
}
/// The result type denoting a return `T` or
/// an [`Error`](enum.Error.html).
pub type PyResult<T> = Result<T, Error>;
/// The result type denotes that this function
/// is only available when interfacing a Python 3
/// interpreter.
///
/// It's the same as the normal [`PyResult`](type.PyResult.html)
/// used throughout this module, but it's just a little
/// type hint.
pub type Py3Only<T> = Result<T, Error>;
#[inline]
fn other_err(what: &'static str) -> Error {
Error::Other(what)
}
/// Defines the script with a common prelude of imports
/// and helper functions. Returns a single string that
/// represents the script.
fn build_script(lines: &[&str]) -> String {
let mut script = String::new();
script.push_str("from __future__ import print_function\n");
script.push_str("import sysconfig\n");
script.push_str("pyver = sysconfig.get_config_var('VERSION')\n");
script.push_str("getvar = sysconfig.get_config_var\n");
script.push_str(&lines.join("\n"));
script
}
/// Exposes Python configuration information
pub struct PythonConfig {
/// The commander that provides responses to our commands
cmdr: SysCommand,
/// The version of the Python interpreter we're using
ver: Version,
}
impl Default for PythonConfig {
fn default() -> PythonConfig {
PythonConfig::new()
}
}
impl PythonConfig {
/// Create a new `PythonConfig` that uses the system installed Python 3
/// interpreter to query configuration information.
pub fn new() -> Self {
PythonConfig::version(Version::Three)
}
/// Create a new `PythonConfig` that uses the system installed Python
/// of version `version`.
///
/// # Example
///
/// ```
/// use python_config::{PythonConfig, Version};
///
/// // Use the system-wide Python3 interpreter
/// let cfg = PythonConfig::version(Version::Three);
/// ```
pub fn version(version: Version) -> Self {
match version {
Version::Three => Self::with_commander(version, SysCommand::new("python3")),
Version::Two => Self::with_commander(version, SysCommand::new("python2")),
}
}
fn with_commander(ver: Version, cmdr: SysCommand) -> Self |
fn is_py3(&self) -> Result<(), Error> {
if self.ver != Version::Three {
Err(Error::Python3Only)
} else {
Ok(())
}
}
/// Create a `PythonConfig` that uses the interpreter at the path `interpreter`.
///
/// This fails if the path cannot be represented as a string, or if a query
/// for the Python version fails.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::interpreter("/usr/local/bin/python3");
/// assert!(cfg.is_ok());
/// ```
pub fn interpreter<P: AsRef<path::Path>>(interpreter: P) -> PyResult<Self> {
let cmdr = SysCommand::new(
interpreter
.as_ref()
.to_str()
.ok_or_else(|| other_err("unable to coerce interpreter path to string"))?,
);
// Assume Python 3 unless the semver tells us otherwise
let mut cfg = PythonConfig {
cmdr,
ver: Version::Three,
};
if cfg.semantic_version()?.major == 2 {
cfg.ver = Version::Two;
}
Ok(cfg)
}
/// Returns the Python version string
///
/// This is the raw return of `python --version`. Consider using
/// [`semantic_version`](struct.PythonConfig.html#method.semantic_version)
/// for something more useful.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints something like 'Python 3.7.4'
/// println!("{}", cfg.version_raw().unwrap());
/// ```
pub fn version_raw(&self) -> PyResult<String> {
self.cmdr.commands(&["--version"]).map_err(From::from)
}
/// Returns the Python version as a semver
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints semver "3.7.4"
/// println!("{}", cfg.semantic_version().unwrap());
/// ```
pub fn semantic_version(&self) -> PyResult<semver::Version> {
self.version_raw()
.and_then(|resp| {
let mut witer = resp.split_whitespace();
witer.next(); // 'Python'
let ver = witer.next().ok_or_else(|| {
other_err("expected --version to return a string resembling 'Python X.Y.Z'")
})?;
semver::Version::parse(ver).map_err(|_| other_err("unable to parse semver"))
})
.map_err(From::from)
}
fn script(&self, lines: &[&str]) -> PyResult<String> {
self.cmdr
.commands(&["-c", &build_script(lines)])
.map_err(From::from)
}
/// Returns the installation prefix of the Python interpreter as a string.
///
/// The prefix is dependent on the host operating system.
/// On macOS, depending on how Python is installed, it will return
/// a string resembling
/// `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix().unwrap());
/// ```
pub fn prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('prefix'))"])
}
/// Like [`prefix`](#method.prefix), but returns
/// the installation prefix as a `PathBuf`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix_path().unwrap().display());
/// ```
pub fn prefix_path(&self) -> PyResult<PathBuf> {
self.prefix().map(PathBuf::from)
}
/// Returns the executable path prefix for the Python interpreter as a string
///
/// The path is dependent on the host OS and the installation path
/// of the Python interpreter. On macOS, the string may resemble something
/// like `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
pub fn exec_prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('exec_prefix'))"])
}
/// Like [`exec_prefix`](#method.exec_prefix), but
/// returns the executable prefix as a `PathBuf`.
pub fn exec_prefix_path(&self) -> PyResult<PathBuf> {
self.exec_prefix().map(PathBuf::from)
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. This is a space-delimited
/// string of paths prefixed with `-I`.
///
/// The single string may resemble something lke the following
/// (on macOS)
///
/// ```text
/// -I/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/include/python3.7m
/// ```
///
/// Note that the same path may appear more than once.
pub fn includes(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
"print(' '.join(flags))",
])
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. Unlike [`includes`](#method.includes),
/// this is simply a collection of paths. Note that the same
/// path may appear more than once.
pub fn include_paths(&self) -> PyResult<Vec<PathBuf>> {
self.script(&[
"print(sysconfig.get_path('include'))",
"print(sysconfig.get_path('platinclude'))",
])
.map(|resp| resp.lines().map(PathBuf::from).collect())
}
/// All the flags useful for C compilation. This includes the include
/// paths (see [`includes`](#method.includes)) as well as other compiler
/// flags for this target. The return is a string with spaces separating
/// the flags.
pub fn cflags(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
linux_line!("flags.extend(getvar('BASECFLAGS').split())"),
linux_line!("flags.extend(getvar('CONFIGURE_CFLAGS').split())"),
macos_line!("flags.extend(getvar('CFLAGS').split())"),
"print(' '.join(flags))",
])
}
/// Returns linker flags required for linking this Python
/// distribution. All libraries / frameworks have the appropriate `-l`
/// or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn libs(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"print(' '.join(libs))",
])
}
/// Returns linker flags required for creating
/// a shared library for this Python distribution. All libraries / frameworks
/// have the appropriate `-L`, `-l`, or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -L/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7/lib/python3.7/config-3.7m-darwin -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn ldflags(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
linux_line!["libs.insert(0, '-L' + getvar('exec_prefix') + '/lib')"],
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"if not getvar('Py_ENABLED_SHARED'):",
tab!("libs.insert(0, '-L' + getvar('LIBPL'))"),
"if not getvar('PYTHONFRAMEWORK'):",
tab!("libs.extend(getvar('LINKFORSHARED').split())"),
"print(' '.join(libs))",
])
}
/// Returns a string that represents the file extension for this distribution's library
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
///
/// On macOS, the string may resemble something like `.cpython-37m-darwin.so`.
pub fn extension_suffix(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('EXT_SUFFIX'))"])?;
Ok(resp)
}
/// The ABI flags specified when building this Python distribution
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn abi_flags(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["import sys", "print(sys.abiflags)"])?;
Ok(resp)
}
/// The location of the distribution's actual `python3-config` script
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('LIBPL'))"])?;
Ok(resp)
}
/// Like [`config_dir`](#method.config_dir), but returns the path to
/// the distribution's `python-config` script as a `PathBuf`.
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir_path(&self) -> Py3Only<PathBuf> {
self.config_dir().map(PathBuf::from)
}
}
#[cfg(test)]
mod tests {
//! The tests only show that, under normal circumstances, there
//! are no errors returned from the public API.
use super::PythonConfig;
use std::path::PathBuf;
macro_rules! pycfgtest {
($ident:ident) => {
#[test]
fn $ident() {
assert!(PythonConfig::new().$ident().is_ok());
}
};
}
pycfgtest!(version_raw);
pycfgtest!(semantic_version);
pycfgtest!(prefix);
pycfgtest!(prefix_path);
pycfgtest!(exec_prefix);
pycfgtest!(exec_prefix_path);
pycfgtest!(includes);
pycfgtest!(include_paths);
pycfgtest!(cflags);
pycfgtest!(libs);
pycfgtest!(ldflags);
pycfgtest!(extension_suffix);
pycfgtest!(abi_flags);
pycfgtest!(config_dir);
pycfgtest!(config_dir_path);
// Shows that includes and include_paths return the same things
// just in different types.
#[test]
fn include_paths_same() {
let cfg = PythonConfig::new();
let include_str = cfg.includes().unwrap();
assert!(!include_str.is_empty());
let paths: Vec<PathBuf> = include_str
.split(" ")
.map(|include| {
// Drop the '-I' characters before each path
PathBuf::from(&include[2..])
})
.collect();
let actual = cfg.include_paths().unwrap();
assert_eq!(actual, paths);
}
}
| {
PythonConfig { cmdr, ver }
} | identifier_body |
lib.rs | //! # python-config-rs
//!
//! Just like the `python3-config` script that's installed
//! with your Python distribution, `python-config-rs` helps you
//! find information about your Python distribution.
//!
//! ```no_run
//! use python_config::PythonConfig;
//!
//! let cfg = PythonConfig::new(); // Python 3
//!
//! // Print include directories
//! println!("Includes: {}", cfg.includes().unwrap());
//! // Print installation prefix
//! println!("Installation prefix: {}", cfg.prefix().unwrap());
//! ```
//!
//! `python-config` may be most useful in your `build.rs`
//! script, or in any application where you need to find
//!
//! - the location of Python libraries
//! - the include directory for Python headers
//! - any of the things available via `python-config`
//!
//! Essentially, this is a reimplementation of the
//! `python3-config` script with a Rust interface. We work
//! directly with your Python interpreter, just in case
//! a `python-config` script is not on your system.
//!
//! We provide a new binary, `python3-config`, in case (for whatever
//! reason) you'd like to use this version of `python3-config`
//! instead of the distribution's script. We have tests that
//! show our script takes the exact same inputs and returns
//! the exact same outputs. Note that the tests only work if
//! you have a Python 3 distribution that includes a
//! `python3-config` script.
//!
//! ## 3 > 2
//!
//! We make the choice for you: by default, we favor Python 3
//! over Python 2. If you need Python 2 support, use the more
//! explicit interface to create the corresponding `PythonConfig`
//! handle. Note that, while the Python 2 interface should work,
//! it's gone through significantly less testing.
//!
//! The `python3-config` binary in this crate is Python 3 only.
mod cmdr;
#[macro_use]
mod script;
use cmdr::SysCommand;
use semver;
use std::io;
use std::path::{self, PathBuf};
/// Selectable Python version
#[derive(PartialEq, Eq, Debug)]
pub enum Version {
/// Python 3
Three,
/// Python 2
Two,
}
/// Describes a few possible errors from the `PythonConfig` interface
#[derive(Debug)]
pub enum Error {
/// An I/O error occured while interfacing the interpreter
IO(io::Error),
/// This function is for Python 3 only
///
/// This will be the return error for methods returning
/// a [`Py3Only<T>`](type.Py3Only.html) type.
Python3Only,
/// Other, one-off errors, with reasoning provided as a string
Other(&'static str),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::IO(err)
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> Self {
match err {
Error::IO(err) => err,
Error::Python3Only => io::Error::new(
io::ErrorKind::Other,
"this function is only available for Python 3",
),
Error::Other(why) => io::Error::new(io::ErrorKind::Other, why),
}
}
}
/// The result type denoting a return `T` or
/// an [`Error`](enum.Error.html).
pub type PyResult<T> = Result<T, Error>;
/// The result type denotes that this function
/// is only available when interfacing a Python 3
/// interpreter.
///
/// It's the same as the normal [`PyResult`](type.PyResult.html)
/// used throughout this module, but it's just a little
/// type hint.
pub type Py3Only<T> = Result<T, Error>;
#[inline]
fn other_err(what: &'static str) -> Error {
Error::Other(what)
}
/// Defines the script with a common prelude of imports
/// and helper functions. Returns a single string that
/// represents the script.
fn build_script(lines: &[&str]) -> String {
let mut script = String::new();
script.push_str("from __future__ import print_function\n");
script.push_str("import sysconfig\n");
script.push_str("pyver = sysconfig.get_config_var('VERSION')\n");
script.push_str("getvar = sysconfig.get_config_var\n");
script.push_str(&lines.join("\n"));
script
}
/// Exposes Python configuration information
pub struct PythonConfig {
/// The commander that provides responses to our commands
cmdr: SysCommand,
/// The version of the Python interpreter we're using
ver: Version,
}
impl Default for PythonConfig {
fn default() -> PythonConfig {
PythonConfig::new()
}
}
impl PythonConfig {
/// Create a new `PythonConfig` that uses the system installed Python 3
/// interpreter to query configuration information.
pub fn new() -> Self {
PythonConfig::version(Version::Three)
}
/// Create a new `PythonConfig` that uses the system installed Python
/// of version `version`.
///
/// # Example
///
/// ```
/// use python_config::{PythonConfig, Version};
///
/// // Use the system-wide Python3 interpreter
/// let cfg = PythonConfig::version(Version::Three);
/// ```
pub fn version(version: Version) -> Self {
match version {
Version::Three => Self::with_commander(version, SysCommand::new("python3")),
Version::Two => Self::with_commander(version, SysCommand::new("python2")),
}
}
fn with_commander(ver: Version, cmdr: SysCommand) -> Self {
PythonConfig { cmdr, ver }
}
fn is_py3(&self) -> Result<(), Error> {
if self.ver != Version::Three {
Err(Error::Python3Only)
} else {
Ok(())
}
}
/// Create a `PythonConfig` that uses the interpreter at the path `interpreter`.
///
/// This fails if the path cannot be represented as a string, or if a query
/// for the Python version fails.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::interpreter("/usr/local/bin/python3");
/// assert!(cfg.is_ok());
/// ```
pub fn interpreter<P: AsRef<path::Path>>(interpreter: P) -> PyResult<Self> {
let cmdr = SysCommand::new(
interpreter
.as_ref()
.to_str()
.ok_or_else(|| other_err("unable to coerce interpreter path to string"))?,
);
// Assume Python 3 unless the semver tells us otherwise
let mut cfg = PythonConfig {
cmdr,
ver: Version::Three,
};
if cfg.semantic_version()?.major == 2 {
cfg.ver = Version::Two;
}
Ok(cfg)
}
/// Returns the Python version string
///
/// This is the raw return of `python --version`. Consider using
/// [`semantic_version`](struct.PythonConfig.html#method.semantic_version)
/// for something more useful.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints something like 'Python 3.7.4'
/// println!("{}", cfg.version_raw().unwrap());
/// ```
pub fn version_raw(&self) -> PyResult<String> {
self.cmdr.commands(&["--version"]).map_err(From::from)
}
/// Returns the Python version as a semver
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints semver "3.7.4"
/// println!("{}", cfg.semantic_version().unwrap());
/// ```
pub fn semantic_version(&self) -> PyResult<semver::Version> {
self.version_raw()
.and_then(|resp| {
let mut witer = resp.split_whitespace();
witer.next(); // 'Python'
let ver = witer.next().ok_or_else(|| {
other_err("expected --version to return a string resembling 'Python X.Y.Z'")
})?;
semver::Version::parse(ver).map_err(|_| other_err("unable to parse semver"))
})
.map_err(From::from)
}
fn script(&self, lines: &[&str]) -> PyResult<String> {
self.cmdr
.commands(&["-c", &build_script(lines)])
.map_err(From::from)
}
/// Returns the installation prefix of the Python interpreter as a string.
///
/// The prefix is dependent on the host operating system.
/// On macOS, depending on how Python is installed, it will return
/// a string resembling
/// `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix().unwrap());
/// ```
pub fn prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('prefix'))"])
}
/// Like [`prefix`](#method.prefix), but returns
/// the installation prefix as a `PathBuf`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix_path().unwrap().display());
/// ```
pub fn prefix_path(&self) -> PyResult<PathBuf> {
self.prefix().map(PathBuf::from)
}
/// Returns the executable path prefix for the Python interpreter as a string
///
/// The path is dependent on the host OS and the installation path
/// of the Python interpreter. On macOS, the string may resemble something
/// like `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
pub fn exec_prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('exec_prefix'))"])
}
/// Like [`exec_prefix`](#method.exec_prefix), but
/// returns the executable prefix as a `PathBuf`.
pub fn exec_prefix_path(&self) -> PyResult<PathBuf> {
self.exec_prefix().map(PathBuf::from)
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. This is a space-delimited
/// string of paths prefixed with `-I`.
///
/// The single string may resemble something lke the following
/// (on macOS)
///
/// ```text
/// -I/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/include/python3.7m
/// ```
///
/// Note that the same path may appear more than once.
pub fn includes(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
"print(' '.join(flags))",
])
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. Unlike [`includes`](#method.includes),
/// this is simply a collection of paths. Note that the same
/// path may appear more than once.
pub fn include_paths(&self) -> PyResult<Vec<PathBuf>> {
self.script(&[
"print(sysconfig.get_path('include'))",
"print(sysconfig.get_path('platinclude'))",
])
.map(|resp| resp.lines().map(PathBuf::from).collect())
}
/// All the flags useful for C compilation. This includes the include
/// paths (see [`includes`](#method.includes)) as well as other compiler
/// flags for this target. The return is a string with spaces separating
/// the flags.
pub fn cflags(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
linux_line!("flags.extend(getvar('BASECFLAGS').split())"),
linux_line!("flags.extend(getvar('CONFIGURE_CFLAGS').split())"),
macos_line!("flags.extend(getvar('CFLAGS').split())"),
"print(' '.join(flags))",
])
}
/// Returns linker flags required for linking this Python
/// distribution. All libraries / frameworks have the appropriate `-l`
/// or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn libs(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"print(' '.join(libs))",
])
}
/// Returns linker flags required for creating
/// a shared library for this Python distribution. All libraries / frameworks
/// have the appropriate `-L`, `-l`, or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -L/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7/lib/python3.7/config-3.7m-darwin -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn ldflags(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
linux_line!["libs.insert(0, '-L' + getvar('exec_prefix') + '/lib')"],
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"if not getvar('Py_ENABLED_SHARED'):",
tab!("libs.insert(0, '-L' + getvar('LIBPL'))"),
"if not getvar('PYTHONFRAMEWORK'):",
tab!("libs.extend(getvar('LINKFORSHARED').split())"),
"print(' '.join(libs))",
])
}
/// Returns a string that represents the file extension for this distribution's library
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
///
/// On macOS, the string may resemble something like `.cpython-37m-darwin.so`.
pub fn extension_suffix(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('EXT_SUFFIX'))"])?;
Ok(resp)
}
/// The ABI flags specified when building this Python distribution
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn abi_flags(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["import sys", "print(sys.abiflags)"])?;
Ok(resp)
}
/// The location of the distribution's actual `python3-config` script
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('LIBPL'))"])?;
Ok(resp)
}
/// Like [`config_dir`](#method.config_dir), but returns the path to
/// the distribution's `python-config` script as a `PathBuf`.
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir_path(&self) -> Py3Only<PathBuf> {
self.config_dir().map(PathBuf::from)
}
}
#[cfg(test)]
mod tests {
//! The tests only show that, under normal circumstances, there
//! are no errors returned from the public API.
use super::PythonConfig;
use std::path::PathBuf;
macro_rules! pycfgtest {
($ident:ident) => {
#[test]
fn $ident() {
assert!(PythonConfig::new().$ident().is_ok());
}
};
}
pycfgtest!(version_raw);
pycfgtest!(semantic_version);
pycfgtest!(prefix);
pycfgtest!(prefix_path);
pycfgtest!(exec_prefix);
pycfgtest!(exec_prefix_path);
pycfgtest!(includes);
pycfgtest!(include_paths);
pycfgtest!(cflags);
pycfgtest!(libs);
pycfgtest!(ldflags);
pycfgtest!(extension_suffix);
pycfgtest!(abi_flags);
pycfgtest!(config_dir);
pycfgtest!(config_dir_path);
// Shows that includes and include_paths return the same things
// just in different types.
#[test]
fn | () {
let cfg = PythonConfig::new();
let include_str = cfg.includes().unwrap();
assert!(!include_str.is_empty());
let paths: Vec<PathBuf> = include_str
.split(" ")
.map(|include| {
// Drop the '-I' characters before each path
PathBuf::from(&include[2..])
})
.collect();
let actual = cfg.include_paths().unwrap();
assert_eq!(actual, paths);
}
}
| include_paths_same | identifier_name |
lib.rs | //! # python-config-rs
//!
//! Just like the `python3-config` script that's installed
//! with your Python distribution, `python-config-rs` helps you
//! find information about your Python distribution.
//!
//! ```no_run
//! use python_config::PythonConfig;
//!
//! let cfg = PythonConfig::new(); // Python 3
//!
//! // Print include directories
//! println!("Includes: {}", cfg.includes().unwrap());
//! // Print installation prefix
//! println!("Installation prefix: {}", cfg.prefix().unwrap());
//! ```
//!
//! `python-config` may be most useful in your `build.rs`
//! script, or in any application where you need to find
//!
//! - the location of Python libraries
//! - the include directory for Python headers
//! - any of the things available via `python-config`
//!
//! Essentially, this is a reimplementation of the
//! `python3-config` script with a Rust interface. We work
//! directly with your Python interpreter, just in case
//! a `python-config` script is not on your system.
//!
//! We provide a new binary, `python3-config`, in case (for whatever
//! reason) you'd like to use this version of `python3-config`
//! instead of the distribution's script. We have tests that
//! show our script takes the exact same inputs and returns
//! the exact same outputs. Note that the tests only work if
//! you have a Python 3 distribution that includes a
//! `python3-config` script.
//!
//! ## 3 > 2
//!
//! We make the choice for you: by default, we favor Python 3
//! over Python 2. If you need Python 2 support, use the more
//! explicit interface to create the corresponding `PythonConfig`
//! handle. Note that, while the Python 2 interface should work,
//! it's gone through significantly less testing.
//!
//! The `python3-config` binary in this crate is Python 3 only.
mod cmdr;
#[macro_use]
mod script;
use cmdr::SysCommand;
use semver;
use std::io;
use std::path::{self, PathBuf};
/// Selectable Python version
#[derive(PartialEq, Eq, Debug)]
pub enum Version {
/// Python 3
Three,
/// Python 2
Two,
}
/// Describes a few possible errors from the `PythonConfig` interface
#[derive(Debug)]
pub enum Error {
/// An I/O error occured while interfacing the interpreter
IO(io::Error),
/// This function is for Python 3 only
///
/// This will be the return error for methods returning
/// a [`Py3Only<T>`](type.Py3Only.html) type.
Python3Only,
/// Other, one-off errors, with reasoning provided as a string
Other(&'static str),
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Self {
Error::IO(err)
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> Self {
match err {
Error::IO(err) => err,
Error::Python3Only => io::Error::new(
io::ErrorKind::Other,
"this function is only available for Python 3",
),
Error::Other(why) => io::Error::new(io::ErrorKind::Other, why),
}
}
}
/// The result type denoting a return `T` or | /// The result type denotes that this function
/// is only available when interfacing a Python 3
/// interpreter.
///
/// It's the same as the normal [`PyResult`](type.PyResult.html)
/// used throughout this module, but it's just a little
/// type hint.
pub type Py3Only<T> = Result<T, Error>;
#[inline]
fn other_err(what: &'static str) -> Error {
Error::Other(what)
}
/// Defines the script with a common prelude of imports
/// and helper functions. Returns a single string that
/// represents the script.
fn build_script(lines: &[&str]) -> String {
let mut script = String::new();
script.push_str("from __future__ import print_function\n");
script.push_str("import sysconfig\n");
script.push_str("pyver = sysconfig.get_config_var('VERSION')\n");
script.push_str("getvar = sysconfig.get_config_var\n");
script.push_str(&lines.join("\n"));
script
}
/// Exposes Python configuration information
pub struct PythonConfig {
/// The commander that provides responses to our commands
cmdr: SysCommand,
/// The version of the Python interpreter we're using
ver: Version,
}
impl Default for PythonConfig {
fn default() -> PythonConfig {
PythonConfig::new()
}
}
impl PythonConfig {
/// Create a new `PythonConfig` that uses the system installed Python 3
/// interpreter to query configuration information.
pub fn new() -> Self {
PythonConfig::version(Version::Three)
}
/// Create a new `PythonConfig` that uses the system installed Python
/// of version `version`.
///
/// # Example
///
/// ```
/// use python_config::{PythonConfig, Version};
///
/// // Use the system-wide Python3 interpreter
/// let cfg = PythonConfig::version(Version::Three);
/// ```
pub fn version(version: Version) -> Self {
match version {
Version::Three => Self::with_commander(version, SysCommand::new("python3")),
Version::Two => Self::with_commander(version, SysCommand::new("python2")),
}
}
fn with_commander(ver: Version, cmdr: SysCommand) -> Self {
PythonConfig { cmdr, ver }
}
fn is_py3(&self) -> Result<(), Error> {
if self.ver != Version::Three {
Err(Error::Python3Only)
} else {
Ok(())
}
}
/// Create a `PythonConfig` that uses the interpreter at the path `interpreter`.
///
/// This fails if the path cannot be represented as a string, or if a query
/// for the Python version fails.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::interpreter("/usr/local/bin/python3");
/// assert!(cfg.is_ok());
/// ```
pub fn interpreter<P: AsRef<path::Path>>(interpreter: P) -> PyResult<Self> {
let cmdr = SysCommand::new(
interpreter
.as_ref()
.to_str()
.ok_or_else(|| other_err("unable to coerce interpreter path to string"))?,
);
// Assume Python 3 unless the semver tells us otherwise
let mut cfg = PythonConfig {
cmdr,
ver: Version::Three,
};
if cfg.semantic_version()?.major == 2 {
cfg.ver = Version::Two;
}
Ok(cfg)
}
/// Returns the Python version string
///
/// This is the raw return of `python --version`. Consider using
/// [`semantic_version`](struct.PythonConfig.html#method.semantic_version)
/// for something more useful.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints something like 'Python 3.7.4'
/// println!("{}", cfg.version_raw().unwrap());
/// ```
pub fn version_raw(&self) -> PyResult<String> {
self.cmdr.commands(&["--version"]).map_err(From::from)
}
/// Returns the Python version as a semver
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// // Prints semver "3.7.4"
/// println!("{}", cfg.semantic_version().unwrap());
/// ```
pub fn semantic_version(&self) -> PyResult<semver::Version> {
self.version_raw()
.and_then(|resp| {
let mut witer = resp.split_whitespace();
witer.next(); // 'Python'
let ver = witer.next().ok_or_else(|| {
other_err("expected --version to return a string resembling 'Python X.Y.Z'")
})?;
semver::Version::parse(ver).map_err(|_| other_err("unable to parse semver"))
})
.map_err(From::from)
}
fn script(&self, lines: &[&str]) -> PyResult<String> {
self.cmdr
.commands(&["-c", &build_script(lines)])
.map_err(From::from)
}
/// Returns the installation prefix of the Python interpreter as a string.
///
/// The prefix is dependent on the host operating system.
/// On macOS, depending on how Python is installed, it will return
/// a string resembling
/// `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix().unwrap());
/// ```
pub fn prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('prefix'))"])
}
/// Like [`prefix`](#method.prefix), but returns
/// the installation prefix as a `PathBuf`.
///
/// # Example
///
/// ```no_run
/// use python_config::PythonConfig;
///
/// let cfg = PythonConfig::new();
/// println!("{}", cfg.prefix_path().unwrap().display());
/// ```
pub fn prefix_path(&self) -> PyResult<PathBuf> {
self.prefix().map(PathBuf::from)
}
/// Returns the executable path prefix for the Python interpreter as a string
///
/// The path is dependent on the host OS and the installation path
/// of the Python interpreter. On macOS, the string may resemble something
/// like `/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7`.
pub fn exec_prefix(&self) -> PyResult<String> {
self.script(&["print(getvar('exec_prefix'))"])
}
/// Like [`exec_prefix`](#method.exec_prefix), but
/// returns the executable prefix as a `PathBuf`.
pub fn exec_prefix_path(&self) -> PyResult<PathBuf> {
self.exec_prefix().map(PathBuf::from)
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. This is a space-delimited
/// string of paths prefixed with `-I`.
///
/// The single string may resemble something lke the following
/// (on macOS)
///
/// ```text
/// -I/usr/local/Cellar/python/3.7.4/Frameworks/Python.framework/Versions/3.7/include/python3.7m
/// ```
///
/// Note that the same path may appear more than once.
pub fn includes(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
"print(' '.join(flags))",
])
}
/// Returns a list of paths that represent the include paths
/// for the distribution's headers. Unlike [`includes`](#method.includes),
/// this is simply a collection of paths. Note that the same
/// path may appear more than once.
pub fn include_paths(&self) -> PyResult<Vec<PathBuf>> {
self.script(&[
"print(sysconfig.get_path('include'))",
"print(sysconfig.get_path('platinclude'))",
])
.map(|resp| resp.lines().map(PathBuf::from).collect())
}
/// All the flags useful for C compilation. This includes the include
/// paths (see [`includes`](#method.includes)) as well as other compiler
/// flags for this target. The return is a string with spaces separating
/// the flags.
pub fn cflags(&self) -> PyResult<String> {
self.script(&[
"flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')]",
linux_line!("flags.extend(getvar('BASECFLAGS').split())"),
linux_line!("flags.extend(getvar('CONFIGURE_CFLAGS').split())"),
macos_line!("flags.extend(getvar('CFLAGS').split())"),
"print(' '.join(flags))",
])
}
/// Returns linker flags required for linking this Python
/// distribution. All libraries / frameworks have the appropriate `-l`
/// or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn libs(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"print(' '.join(libs))",
])
}
/// Returns linker flags required for creating
/// a shared library for this Python distribution. All libraries / frameworks
/// have the appropriate `-L`, `-l`, or `-framework` prefixes.
///
/// On macOS, the single string may resemble something like
///
/// ```text
/// -L/usr/local/opt/python/Frameworks/Python.framework/Versions/3.7/lib/python3.7/config-3.7m-darwin -lpython3.7m -ldl -framework CoreFoundation
/// ```
pub fn ldflags(&self) -> PyResult<String> {
self.script(&[
"import sys",
"libs = ['-lpython' + pyver + sys.abiflags]",
linux_line!["libs.insert(0, '-L' + getvar('exec_prefix') + '/lib')"],
"libs += getvar('LIBS').split()",
"libs += getvar('SYSLIBS').split()",
"if not getvar('Py_ENABLED_SHARED'):",
tab!("libs.insert(0, '-L' + getvar('LIBPL'))"),
"if not getvar('PYTHONFRAMEWORK'):",
tab!("libs.extend(getvar('LINKFORSHARED').split())"),
"print(' '.join(libs))",
])
}
/// Returns a string that represents the file extension for this distribution's library
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
///
/// On macOS, the string may resemble something like `.cpython-37m-darwin.so`.
pub fn extension_suffix(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('EXT_SUFFIX'))"])?;
Ok(resp)
}
/// The ABI flags specified when building this Python distribution
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn abi_flags(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["import sys", "print(sys.abiflags)"])?;
Ok(resp)
}
/// The location of the distribution's actual `python3-config` script
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir(&self) -> Py3Only<String> {
self.is_py3()?;
let resp = self.script(&["print(getvar('LIBPL'))"])?;
Ok(resp)
}
/// Like [`config_dir`](#method.config_dir), but returns the path to
/// the distribution's `python-config` script as a `PathBuf`.
///
/// This is only available when your interpreter is a Python 3 interpreter! This is for
/// feature parity with the `python3-config` script.
pub fn config_dir_path(&self) -> Py3Only<PathBuf> {
self.config_dir().map(PathBuf::from)
}
}
#[cfg(test)]
mod tests {
//! The tests only show that, under normal circumstances, there
//! are no errors returned from the public API.
use super::PythonConfig;
use std::path::PathBuf;
macro_rules! pycfgtest {
($ident:ident) => {
#[test]
fn $ident() {
assert!(PythonConfig::new().$ident().is_ok());
}
};
}
pycfgtest!(version_raw);
pycfgtest!(semantic_version);
pycfgtest!(prefix);
pycfgtest!(prefix_path);
pycfgtest!(exec_prefix);
pycfgtest!(exec_prefix_path);
pycfgtest!(includes);
pycfgtest!(include_paths);
pycfgtest!(cflags);
pycfgtest!(libs);
pycfgtest!(ldflags);
pycfgtest!(extension_suffix);
pycfgtest!(abi_flags);
pycfgtest!(config_dir);
pycfgtest!(config_dir_path);
// Shows that includes and include_paths return the same things
// just in different types.
#[test]
fn include_paths_same() {
let cfg = PythonConfig::new();
let include_str = cfg.includes().unwrap();
assert!(!include_str.is_empty());
let paths: Vec<PathBuf> = include_str
.split(" ")
.map(|include| {
// Drop the '-I' characters before each path
PathBuf::from(&include[2..])
})
.collect();
let actual = cfg.include_paths().unwrap();
assert_eq!(actual, paths);
}
} | /// an [`Error`](enum.Error.html).
pub type PyResult<T> = Result<T, Error>;
| random_line_split |
forward.go | package forward
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
"time"
"github.com/coreos/go-iptables/iptables"
"github.com/lxc/lxd"
"github.com/lxc/lxd/shared" | type PortMappings struct {
// Name of the container - May be left empty in YAML config file
Name string `yaml:"name,omitempty"`
// Protocol should be "tcp" or "udp"
Protocol string `yaml:"protocol"`
// Ports is a mapping of host ports as keys to container ports as values
Ports map[string]int `yaml:",inline"`
}
// NewPortMappings initializes and returns an empty PortMappings struct
func NewPortMappings() PortMappings {
p := PortMappings{}
p.Ports = map[string]int{}
return p
}
// Config represents the Config File format that can be stored in YAML format
type Config struct {
Forwards map[string][]PortMappings `yaml:",inline"`
}
// NewConfig creates and returns initialized config
func NewConfig() Config {
c := Config{}
c.Forwards = map[string][]PortMappings{}
return c
}
// LoadYAMLConfig loads a YAML Port Forwarding config file and builds the appropriate config
func LoadYAMLConfig(path string) (config Config, err error) {
yml, err := ioutil.ReadFile(path)
if err != nil {
return config, err
}
err = yaml.Unmarshal(yml, &config)
return config, err
}
// Validate checks a config for correctness. Currently provides the following checks:
// * For each container, makes sure an equal number of Host and Container Ports are provided
// * Makes sure no Host port is used more than once.
func (c Config) Validate() (bool, error) {
// First do some sanity checks
hostPorts := map[string]interface{}{}
for container, portForwards := range c.Forwards {
for _, portForward := range portForwards {
// Make sure that port lists were actually provided
if len(portForward.Ports) == 0 {
return false, fmt.Errorf("No ports provided for container %s", container)
}
for hPort := range portForward.Ports {
_, err := strconv.Atoi(hPort)
if err != nil {
return false, fmt.Errorf("Invalid port %s provided for container %s", hPort, container)
}
// Can only forward a port from the host to one container, check to ensure no duplicate host ports
fullPort := portForward.Protocol + ":" + hPort
_, ok := hostPorts[fullPort]
if ok {
return false, fmt.Errorf("Port %s has already been mapped", fullPort)
}
hostPorts[fullPort] = nil
portForward.Name = container
}
}
}
return true, nil
}
// Forwarder represents a port forwarding client that can setup and teardown port forwarding for LXD containers
type Forwarder struct {
Config
*lxd.Client
}
const (
// ContainerStarted matches the text used in monitoring for a Container Starting up
ContainerStarted = "ContainerStart"
// ContainerStopped matches the text used in monitoring for a Container shutting down or being stopped
ContainerStopped = "ContainerStop"
// IPTable is the table that all IPTable rules should be added to
IPTable = "nat"
)
// NewForwarder validates the provided config then creates and returns port forward client
func NewForwarder(config Config) (*Forwarder, error) {
_, err := config.Validate()
if err != nil {
return nil, err
}
c := Forwarder{}
c.Client, err = lxd.NewClient(&lxd.DefaultConfig, "local")
if err != nil {
return nil, err
}
c.Config = config
return &c, nil
}
// Forward enables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Forward() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ForwardContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to forward ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// Reverse disables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Reverse() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ReverseContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to remove forwarding of ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// ForwardContainer turns on port forwarding for the provided container
// Uses iptables to place ipv4 and ipv6 port forwarding rules
func (f Forwarder) ForwardContainer(container string) error {
_, ok := f.Config.Forwards[container]
if !ok {
return fmt.Errorf("No port rules provided for %s", container)
}
state, err := f.ContainerState(container)
if err != nil {
return fmt.Errorf("unable to get container state for container %s: %s", container, err)
}
if state.StatusCode != shared.Running {
return fmt.Errorf("Container %s is not currently running", container)
}
// Get list of IP addresses on the container to forward to
ip4Addresses := []string{}
ip6Addresses := []string{}
for name, network := range state.Network {
if strings.Contains(name, "eth") || strings.Contains(name, "enp") {
// TODO: Can map interface in container to bridge being used, find standard way to find which interfaces on host bridge is tied to
for _, address := range network.Addresses {
switch address.Family {
case "inet":
ip4Addresses = append(ip4Addresses, address.Address)
case "inet6":
ip6Addresses = append(ip6Addresses, address.Address)
}
}
}
}
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
// Create a new custom chain for the IPTable rules for just this container
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
err = iptable.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = iptable.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
// Tell IPTables when to use our custom chain
err = iptable.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv4, Src)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv6, Src)...)
if err != nil {
return err
}
// Set up rules within the custom chain of the actual port forwardings
for _, portForwards := range f.Config.Forwards[container] {
protocol := portForwards.Protocol
for hostPort, containerPort := range portForwards.Ports {
for _, address := range ip4Addresses {
iptable.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Dst)...)
iptable.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Src)...)
}
for _, address := range ip6Addresses {
ip6table.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Dst)...)
ip6table.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Src)...)
}
}
}
return nil
}
// ReverseContainer removes port forwarding for the provided container
func (f Forwarder) ReverseContainer(container string) error {
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
iptable.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable, "POSTROUTING", getChainForwardRule(container, IPv4, Src)...)
ip6table.Delete(IPTable, "POSTROUTING", getChainForwardRule(container, IPv6, Src)...)
iptable.ClearChain(IPTable, customDstChain)
iptable.DeleteChain(IPTable, customDstChain)
iptable.ClearChain(IPTable, customSrcChain)
iptable.DeleteChain(IPTable, customSrcChain)
ip6table.ClearChain(IPTable, customDstChain)
ip6table.DeleteChain(IPTable, customDstChain)
ip6table.ClearChain(IPTable, customSrcChain)
ip6table.DeleteChain(IPTable, customSrcChain)
return nil
}
// Watch monitors LXD events and identifies when containers named in the config are stopped or started,
// and disables or enables port forwarding respecitvely
func (f Forwarder) Watch() {
handler := func(i interface{}) {
var container string
var message string
var context map[string]interface{}
data := i.(map[string]interface{})
metadata := data["metadata"].(map[string]interface{})
tmp, ok := metadata["context"]
if ok {
context = tmp.(map[string]interface{})
}
tmp, ok = context["container"]
if ok {
container = tmp.(string)
}
_, ok = f.Forwards[container]
if ok {
tmp, ok := metadata["message"]
if ok {
message = tmp.(string)
}
switch message {
case ContainerStarted:
go func() {
// Wait a few seconds for the newly running container to get an IP address
time.Sleep(2 * time.Second)
f.ForwardContainer(container)
}()
case ContainerStopped:
f.ReverseContainer(container)
}
}
}
f.Monitor([]string{}, handler)
} | "gopkg.in/yaml.v2"
)
// PortMappings contains information for mapping ports from a host to a container | random_line_split |
forward.go | package forward
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
"time"
"github.com/coreos/go-iptables/iptables"
"github.com/lxc/lxd"
"github.com/lxc/lxd/shared"
"gopkg.in/yaml.v2"
)
// PortMappings contains information for mapping ports from a host to a container
type PortMappings struct {
// Name of the container - May be left empty in YAML config file
Name string `yaml:"name,omitempty"`
// Protocol should be "tcp" or "udp"
Protocol string `yaml:"protocol"`
// Ports is a mapping of host ports as keys to container ports as values
Ports map[string]int `yaml:",inline"`
}
// NewPortMappings initializes and returns an empty PortMappings struct
func NewPortMappings() PortMappings {
p := PortMappings{}
p.Ports = map[string]int{}
return p
}
// Config represents the Config File format that can be stored in YAML format
type Config struct {
Forwards map[string][]PortMappings `yaml:",inline"`
}
// NewConfig creates and returns initialized config
func NewConfig() Config {
c := Config{}
c.Forwards = map[string][]PortMappings{}
return c
}
// LoadYAMLConfig loads a YAML Port Forwarding config file and builds the appropriate config
func LoadYAMLConfig(path string) (config Config, err error) {
yml, err := ioutil.ReadFile(path)
if err != nil {
return config, err
}
err = yaml.Unmarshal(yml, &config)
return config, err
}
// Validate checks a config for correctness. Currently provides the following checks:
// * For each container, makes sure an equal number of Host and Container Ports are provided
// * Makes sure no Host port is used more than once.
func (c Config) Validate() (bool, error) {
// First do some sanity checks
hostPorts := map[string]interface{}{}
for container, portForwards := range c.Forwards {
for _, portForward := range portForwards {
// Make sure that port lists were actually provided
if len(portForward.Ports) == 0 {
return false, fmt.Errorf("No ports provided for container %s", container)
}
for hPort := range portForward.Ports {
_, err := strconv.Atoi(hPort)
if err != nil {
return false, fmt.Errorf("Invalid port %s provided for container %s", hPort, container)
}
// Can only forward a port from the host to one container, check to ensure no duplicate host ports
fullPort := portForward.Protocol + ":" + hPort
_, ok := hostPorts[fullPort]
if ok {
return false, fmt.Errorf("Port %s has already been mapped", fullPort)
}
hostPorts[fullPort] = nil
portForward.Name = container
}
}
}
return true, nil
}
// Forwarder represents a port forwarding client that can setup and teardown port forwarding for LXD containers
type Forwarder struct {
Config
*lxd.Client
}
const (
// ContainerStarted matches the text used in monitoring for a Container Starting up
ContainerStarted = "ContainerStart"
// ContainerStopped matches the text used in monitoring for a Container shutting down or being stopped
ContainerStopped = "ContainerStop"
// IPTable is the table that all IPTable rules should be added to
IPTable = "nat"
)
// NewForwarder validates the provided config then creates and returns port forward client
func NewForwarder(config Config) (*Forwarder, error) {
_, err := config.Validate()
if err != nil {
return nil, err
}
c := Forwarder{}
c.Client, err = lxd.NewClient(&lxd.DefaultConfig, "local")
if err != nil {
return nil, err
}
c.Config = config
return &c, nil
}
// Forward enables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Forward() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ForwardContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to forward ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// Reverse disables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Reverse() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ReverseContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to remove forwarding of ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// ForwardContainer turns on port forwarding for the provided container
// Uses iptables to place ipv4 and ipv6 port forwarding rules
func (f Forwarder) ForwardContainer(container string) error {
_, ok := f.Config.Forwards[container]
if !ok {
return fmt.Errorf("No port rules provided for %s", container)
}
state, err := f.ContainerState(container)
if err != nil {
return fmt.Errorf("unable to get container state for container %s: %s", container, err)
}
if state.StatusCode != shared.Running {
return fmt.Errorf("Container %s is not currently running", container)
}
// Get list of IP addresses on the container to forward to
ip4Addresses := []string{}
ip6Addresses := []string{}
for name, network := range state.Network {
if strings.Contains(name, "eth") || strings.Contains(name, "enp") {
// TODO: Can map interface in container to bridge being used, find standard way to find which interfaces on host bridge is tied to
for _, address := range network.Addresses {
switch address.Family {
case "inet":
ip4Addresses = append(ip4Addresses, address.Address)
case "inet6":
ip6Addresses = append(ip6Addresses, address.Address)
}
}
}
}
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
// Create a new custom chain for the IPTable rules for just this container
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
err = iptable.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = iptable.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
// Tell IPTables when to use our custom chain
err = iptable.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv4, Src)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv6, Src)...)
if err != nil {
return err
}
// Set up rules within the custom chain of the actual port forwardings
for _, portForwards := range f.Config.Forwards[container] {
protocol := portForwards.Protocol
for hostPort, containerPort := range portForwards.Ports {
for _, address := range ip4Addresses {
iptable.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Dst)...)
iptable.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Src)...)
}
for _, address := range ip6Addresses {
ip6table.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Dst)...)
ip6table.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Src)...)
}
}
}
return nil
}
// ReverseContainer removes port forwarding for the provided container
func (f Forwarder) ReverseContainer(container string) error {
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
iptable.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable, "POSTROUTING", getChainForwardRule(container, IPv4, Src)...)
ip6table.Delete(IPTable, "POSTROUTING", getChainForwardRule(container, IPv6, Src)...)
iptable.ClearChain(IPTable, customDstChain)
iptable.DeleteChain(IPTable, customDstChain)
iptable.ClearChain(IPTable, customSrcChain)
iptable.DeleteChain(IPTable, customSrcChain)
ip6table.ClearChain(IPTable, customDstChain)
ip6table.DeleteChain(IPTable, customDstChain)
ip6table.ClearChain(IPTable, customSrcChain)
ip6table.DeleteChain(IPTable, customSrcChain)
return nil
}
// Watch monitors LXD events and identifies when containers named in the config are stopped or started,
// and disables or enables port forwarding respecitvely
func (f Forwarder) | () {
handler := func(i interface{}) {
var container string
var message string
var context map[string]interface{}
data := i.(map[string]interface{})
metadata := data["metadata"].(map[string]interface{})
tmp, ok := metadata["context"]
if ok {
context = tmp.(map[string]interface{})
}
tmp, ok = context["container"]
if ok {
container = tmp.(string)
}
_, ok = f.Forwards[container]
if ok {
tmp, ok := metadata["message"]
if ok {
message = tmp.(string)
}
switch message {
case ContainerStarted:
go func() {
// Wait a few seconds for the newly running container to get an IP address
time.Sleep(2 * time.Second)
f.ForwardContainer(container)
}()
case ContainerStopped:
f.ReverseContainer(container)
}
}
}
f.Monitor([]string{}, handler)
}
| Watch | identifier_name |
forward.go | package forward
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
"time"
"github.com/coreos/go-iptables/iptables"
"github.com/lxc/lxd"
"github.com/lxc/lxd/shared"
"gopkg.in/yaml.v2"
)
// PortMappings contains information for mapping ports from a host to a container
type PortMappings struct {
// Name of the container - May be left empty in YAML config file
Name string `yaml:"name,omitempty"`
// Protocol should be "tcp" or "udp"
Protocol string `yaml:"protocol"`
// Ports is a mapping of host ports as keys to container ports as values
Ports map[string]int `yaml:",inline"`
}
// NewPortMappings initializes and returns an empty PortMappings struct
func NewPortMappings() PortMappings {
p := PortMappings{}
p.Ports = map[string]int{}
return p
}
// Config represents the Config File format that can be stored in YAML format
type Config struct {
Forwards map[string][]PortMappings `yaml:",inline"`
}
// NewConfig creates and returns initialized config
func NewConfig() Config {
c := Config{}
c.Forwards = map[string][]PortMappings{}
return c
}
// LoadYAMLConfig loads a YAML Port Forwarding config file and builds the appropriate config
func LoadYAMLConfig(path string) (config Config, err error) {
yml, err := ioutil.ReadFile(path)
if err != nil {
return config, err
}
err = yaml.Unmarshal(yml, &config)
return config, err
}
// Validate checks a config for correctness. Currently provides the following checks:
// * For each container, makes sure an equal number of Host and Container Ports are provided
// * Makes sure no Host port is used more than once.
func (c Config) Validate() (bool, error) {
// First do some sanity checks
hostPorts := map[string]interface{}{}
for container, portForwards := range c.Forwards {
for _, portForward := range portForwards {
// Make sure that port lists were actually provided
if len(portForward.Ports) == 0 {
return false, fmt.Errorf("No ports provided for container %s", container)
}
for hPort := range portForward.Ports {
_, err := strconv.Atoi(hPort)
if err != nil {
return false, fmt.Errorf("Invalid port %s provided for container %s", hPort, container)
}
// Can only forward a port from the host to one container, check to ensure no duplicate host ports
fullPort := portForward.Protocol + ":" + hPort
_, ok := hostPorts[fullPort]
if ok {
return false, fmt.Errorf("Port %s has already been mapped", fullPort)
}
hostPorts[fullPort] = nil
portForward.Name = container
}
}
}
return true, nil
}
// Forwarder represents a port forwarding client that can setup and teardown port forwarding for LXD containers
type Forwarder struct {
Config
*lxd.Client
}
const (
// ContainerStarted matches the text used in monitoring for a Container Starting up
ContainerStarted = "ContainerStart"
// ContainerStopped matches the text used in monitoring for a Container shutting down or being stopped
ContainerStopped = "ContainerStop"
// IPTable is the table that all IPTable rules should be added to
IPTable = "nat"
)
// NewForwarder validates the provided config then creates and returns port forward client
func NewForwarder(config Config) (*Forwarder, error) {
_, err := config.Validate()
if err != nil {
return nil, err
}
c := Forwarder{}
c.Client, err = lxd.NewClient(&lxd.DefaultConfig, "local")
if err != nil {
return nil, err
}
c.Config = config
return &c, nil
}
// Forward enables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Forward() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ForwardContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to forward ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// Reverse disables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Reverse() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ReverseContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to remove forwarding of ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// ForwardContainer turns on port forwarding for the provided container
// Uses iptables to place ipv4 and ipv6 port forwarding rules
func (f Forwarder) ForwardContainer(container string) error {
_, ok := f.Config.Forwards[container]
if !ok {
return fmt.Errorf("No port rules provided for %s", container)
}
state, err := f.ContainerState(container)
if err != nil {
return fmt.Errorf("unable to get container state for container %s: %s", container, err)
}
if state.StatusCode != shared.Running {
return fmt.Errorf("Container %s is not currently running", container)
}
// Get list of IP addresses on the container to forward to
ip4Addresses := []string{}
ip6Addresses := []string{}
for name, network := range state.Network {
if strings.Contains(name, "eth") || strings.Contains(name, "enp") {
// TODO: Can map interface in container to bridge being used, find standard way to find which interfaces on host bridge is tied to
for _, address := range network.Addresses {
switch address.Family {
case "inet":
ip4Addresses = append(ip4Addresses, address.Address)
case "inet6":
ip6Addresses = append(ip6Addresses, address.Address)
}
}
}
}
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
// Create a new custom chain for the IPTable rules for just this container
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
err = iptable.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = iptable.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
// Tell IPTables when to use our custom chain
err = iptable.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv4, Src)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv6, Src)...)
if err != nil {
return err
}
// Set up rules within the custom chain of the actual port forwardings
for _, portForwards := range f.Config.Forwards[container] {
protocol := portForwards.Protocol
for hostPort, containerPort := range portForwards.Ports {
for _, address := range ip4Addresses {
iptable.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Dst)...)
iptable.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Src)...)
}
for _, address := range ip6Addresses {
ip6table.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Dst)...)
ip6table.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Src)...)
}
}
}
return nil
}
// ReverseContainer removes port forwarding for the provided container
func (f Forwarder) ReverseContainer(container string) error {
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
iptable.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable, "POSTROUTING", getChainForwardRule(container, IPv4, Src)...)
ip6table.Delete(IPTable, "POSTROUTING", getChainForwardRule(container, IPv6, Src)...)
iptable.ClearChain(IPTable, customDstChain)
iptable.DeleteChain(IPTable, customDstChain)
iptable.ClearChain(IPTable, customSrcChain)
iptable.DeleteChain(IPTable, customSrcChain)
ip6table.ClearChain(IPTable, customDstChain)
ip6table.DeleteChain(IPTable, customDstChain)
ip6table.ClearChain(IPTable, customSrcChain)
ip6table.DeleteChain(IPTable, customSrcChain)
return nil
}
// Watch monitors LXD events and identifies when containers named in the config are stopped or started,
// and disables or enables port forwarding respecitvely
func (f Forwarder) Watch() | {
handler := func(i interface{}) {
var container string
var message string
var context map[string]interface{}
data := i.(map[string]interface{})
metadata := data["metadata"].(map[string]interface{})
tmp, ok := metadata["context"]
if ok {
context = tmp.(map[string]interface{})
}
tmp, ok = context["container"]
if ok {
container = tmp.(string)
}
_, ok = f.Forwards[container]
if ok {
tmp, ok := metadata["message"]
if ok {
message = tmp.(string)
}
switch message {
case ContainerStarted:
go func() {
// Wait a few seconds for the newly running container to get an IP address
time.Sleep(2 * time.Second)
f.ForwardContainer(container)
}()
case ContainerStopped:
f.ReverseContainer(container)
}
}
}
f.Monitor([]string{}, handler)
} | identifier_body | |
forward.go | package forward
import (
"fmt"
"io/ioutil"
"strconv"
"strings"
"time"
"github.com/coreos/go-iptables/iptables"
"github.com/lxc/lxd"
"github.com/lxc/lxd/shared"
"gopkg.in/yaml.v2"
)
// PortMappings contains information for mapping ports from a host to a container
type PortMappings struct {
// Name of the container - May be left empty in YAML config file
Name string `yaml:"name,omitempty"`
// Protocol should be "tcp" or "udp"
Protocol string `yaml:"protocol"`
// Ports is a mapping of host ports as keys to container ports as values
Ports map[string]int `yaml:",inline"`
}
// NewPortMappings initializes and returns an empty PortMappings struct
func NewPortMappings() PortMappings {
p := PortMappings{}
p.Ports = map[string]int{}
return p
}
// Config represents the Config File format that can be stored in YAML format
type Config struct {
Forwards map[string][]PortMappings `yaml:",inline"`
}
// NewConfig creates and returns initialized config
func NewConfig() Config {
c := Config{}
c.Forwards = map[string][]PortMappings{}
return c
}
// LoadYAMLConfig loads a YAML Port Forwarding config file and builds the appropriate config
func LoadYAMLConfig(path string) (config Config, err error) {
yml, err := ioutil.ReadFile(path)
if err != nil |
err = yaml.Unmarshal(yml, &config)
return config, err
}
// Validate checks a config for correctness. Currently provides the following checks:
// * For each container, makes sure an equal number of Host and Container Ports are provided
// * Makes sure no Host port is used more than once.
func (c Config) Validate() (bool, error) {
// First do some sanity checks
hostPorts := map[string]interface{}{}
for container, portForwards := range c.Forwards {
for _, portForward := range portForwards {
// Make sure that port lists were actually provided
if len(portForward.Ports) == 0 {
return false, fmt.Errorf("No ports provided for container %s", container)
}
for hPort := range portForward.Ports {
_, err := strconv.Atoi(hPort)
if err != nil {
return false, fmt.Errorf("Invalid port %s provided for container %s", hPort, container)
}
// Can only forward a port from the host to one container, check to ensure no duplicate host ports
fullPort := portForward.Protocol + ":" + hPort
_, ok := hostPorts[fullPort]
if ok {
return false, fmt.Errorf("Port %s has already been mapped", fullPort)
}
hostPorts[fullPort] = nil
portForward.Name = container
}
}
}
return true, nil
}
// Forwarder represents a port forwarding client that can setup and teardown port forwarding for LXD containers
type Forwarder struct {
Config
*lxd.Client
}
const (
// ContainerStarted matches the text used in monitoring for a Container Starting up
ContainerStarted = "ContainerStart"
// ContainerStopped matches the text used in monitoring for a Container shutting down or being stopped
ContainerStopped = "ContainerStop"
// IPTable is the table that all IPTable rules should be added to
IPTable = "nat"
)
// NewForwarder validates the provided config then creates and returns port forward client
func NewForwarder(config Config) (*Forwarder, error) {
_, err := config.Validate()
if err != nil {
return nil, err
}
c := Forwarder{}
c.Client, err = lxd.NewClient(&lxd.DefaultConfig, "local")
if err != nil {
return nil, err
}
c.Config = config
return &c, nil
}
// Forward enables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Forward() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ForwardContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to forward ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// Reverse disables forwarding for all containers and port mappings provided in the client config
func (f Forwarder) Reverse() error {
errs := []string{}
for container := range f.Config.Forwards {
err := f.ReverseContainer(container)
if err != nil {
errs = append(errs, container)
}
}
var err error
if len(errs) > 0 {
err = fmt.Errorf("Unable to remove forwarding of ports for containers %s", strings.Join(errs, ", "))
}
return err
}
// ForwardContainer turns on port forwarding for the provided container
// Uses iptables to place ipv4 and ipv6 port forwarding rules
func (f Forwarder) ForwardContainer(container string) error {
_, ok := f.Config.Forwards[container]
if !ok {
return fmt.Errorf("No port rules provided for %s", container)
}
state, err := f.ContainerState(container)
if err != nil {
return fmt.Errorf("unable to get container state for container %s: %s", container, err)
}
if state.StatusCode != shared.Running {
return fmt.Errorf("Container %s is not currently running", container)
}
// Get list of IP addresses on the container to forward to
ip4Addresses := []string{}
ip6Addresses := []string{}
for name, network := range state.Network {
if strings.Contains(name, "eth") || strings.Contains(name, "enp") {
// TODO: Can map interface in container to bridge being used, find standard way to find which interfaces on host bridge is tied to
for _, address := range network.Addresses {
switch address.Family {
case "inet":
ip4Addresses = append(ip4Addresses, address.Address)
case "inet6":
ip6Addresses = append(ip6Addresses, address.Address)
}
}
}
}
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
// Create a new custom chain for the IPTable rules for just this container
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
err = iptable.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = iptable.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customDstChain)
if err != nil {
return err
}
err = ip6table.NewChain(IPTable, customSrcChain)
if err != nil {
return err
}
// Tell IPTables when to use our custom chain
err = iptable.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "PREROUTING", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv4, Dst)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "OUTPUT", 1, getChainForwardRule(container, IPv6, Dst)...)
if err != nil {
return err
}
err = iptable.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv4, Src)...)
if err != nil {
return err
}
err = ip6table.Insert(IPTable, "POSTROUTING", 1, getChainForwardRule(container, IPv6, Src)...)
if err != nil {
return err
}
// Set up rules within the custom chain of the actual port forwardings
for _, portForwards := range f.Config.Forwards[container] {
protocol := portForwards.Protocol
for hostPort, containerPort := range portForwards.Ports {
for _, address := range ip4Addresses {
iptable.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Dst)...)
iptable.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv4, Src)...)
}
for _, address := range ip6Addresses {
ip6table.Append(IPTable, customDstChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Dst)...)
ip6table.Append(IPTable, customSrcChain, getPortForwardRule(protocol, address, strconv.Itoa(containerPort), hostPort, IPv6, Src)...)
}
}
}
return nil
}
// ReverseContainer removes port forwarding for the provided container
func (f Forwarder) ReverseContainer(container string) error {
customDstChain := getChain(container, Dst)
customSrcChain := getChain(container, Src)
iptable, err := iptables.New()
if err != nil {
return err
}
ip6table, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
if err != nil {
return err
}
iptable.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "PREROUTING", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv4, Dst)...)
ip6table.Delete(IPTable, "OUTPUT", getChainForwardRule(container, IPv6, Dst)...)
iptable.Delete(IPTable, "POSTROUTING", getChainForwardRule(container, IPv4, Src)...)
ip6table.Delete(IPTable, "POSTROUTING", getChainForwardRule(container, IPv6, Src)...)
iptable.ClearChain(IPTable, customDstChain)
iptable.DeleteChain(IPTable, customDstChain)
iptable.ClearChain(IPTable, customSrcChain)
iptable.DeleteChain(IPTable, customSrcChain)
ip6table.ClearChain(IPTable, customDstChain)
ip6table.DeleteChain(IPTable, customDstChain)
ip6table.ClearChain(IPTable, customSrcChain)
ip6table.DeleteChain(IPTable, customSrcChain)
return nil
}
// Watch monitors LXD events and identifies when containers named in the config are stopped or started,
// and disables or enables port forwarding respecitvely
func (f Forwarder) Watch() {
handler := func(i interface{}) {
var container string
var message string
var context map[string]interface{}
data := i.(map[string]interface{})
metadata := data["metadata"].(map[string]interface{})
tmp, ok := metadata["context"]
if ok {
context = tmp.(map[string]interface{})
}
tmp, ok = context["container"]
if ok {
container = tmp.(string)
}
_, ok = f.Forwards[container]
if ok {
tmp, ok := metadata["message"]
if ok {
message = tmp.(string)
}
switch message {
case ContainerStarted:
go func() {
// Wait a few seconds for the newly running container to get an IP address
time.Sleep(2 * time.Second)
f.ForwardContainer(container)
}()
case ContainerStopped:
f.ReverseContainer(container)
}
}
}
f.Monitor([]string{}, handler)
}
| {
return config, err
} | conditional_block |
login.py | """A module for handling the project login related tasks."""
import base64
import binascii
import re
import time
import typing
# aiohttp
import aiohttp.web
import aiohttp_session
from multidict import MultiDictProxy
from oidcrp.exception import OidcServiceError
from swift_browser_ui.ui._convenience import (
disable_cache,
get_availability_from_token,
)
from swift_browser_ui.ui.settings import setd
HAKA_ENDPOINT = (
"{endpoint}/auth/OS-FEDERATION/identity_providers"
"/haka/protocols/saml2/websso?origin={origin}"
).format
HAKA_OIDC_ENDPOINT = (
"{endpoint}/auth/OS-FEDERATION/identity_providers"
"/{oidc}/protocols/openid/websso?origin={origin}"
).format
async def oidc_start(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Redirect to OpenID Connect provider."""
try:
oidc = request.app["oidc_client"].begin("oidc")
except Exception as e:
# This can be caused if config is improperly configured, and
# oidcrp is unable to fetch oidc configuration from the given URL
request.app["Log"].error(f"OIDC authorization request failed: {e}")
raise aiohttp.web.HTTPInternalServerError(
reason="OIDC authorization request failed."
)
response = aiohttp.web.Response(status=302, reason="Redirection to login")
response.headers["Location"] = oidc["url"]
return response
async def oidc_end(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Finalize OIDC login and create a new session with the data from the OIDC provicer."""
# Response from AAI must have the query params `state` and `code`
if "state" in request.query and "code" in request.query:
request.app["Log"].debug("AAI response contained the correct params.")
params = {"state": request.query["state"], "code": request.query["code"]}
else:
reason = f"AAI response is missing mandatory params, received: {request.query}"
raise aiohttp.web.HTTPBadRequest(reason=reason)
# Verify oidc_state and retrieve auth session
try:
oidc_session = request.app["oidc_client"].get_session_information(params["state"])
except KeyError as e:
# This exception is raised if the RPHandler doesn't have the supplied "state"
request.app["Log"].error(f"OIDC not initialised: {e}")
raise aiohttp.web.HTTPForbidden(reason="Bad OIDC session.")
oidc_session["auth_request"]["code"] = params["code"]
# finalize requests id_token and access_token with code, validates them and requests userinfo data
try:
oidc_result = request.app["oidc_client"].finalize(
oidc_session["iss"], oidc_session["auth_request"]
)
except KeyError as e:
request.app["Log"].error(f"Issuer {oidc_session['iss']} not found: {e}.")
raise aiohttp.web.HTTPBadRequest(reason="Token issuer not found.")
except OidcServiceError as e:
# This exception is raised if RPHandler encounters an error due to:
# 1. "code" is wrong, so token request failed
# 2. token validation failed
# 3. userinfo request failed
request.app["Log"].error(f"OIDC Callback failed with: {e}")
raise aiohttp.web.HTTPBadRequest(reason="Invalid OIDC callback.")
session = await aiohttp_session.new_session(request)
session["at"] = time.time()
session["referer"] = request.url.host
session["oidc"] = {
"userinfo": oidc_result["userinfo"].to_dict(),
"state": oidc_result["state"],
"access_token": oidc_result["token"],
}
csc_projects: typing.List[typing.Any] | None = _get_projects_from_userinfo(
session["oidc"]["userinfo"]
)
# add entry to session only if the OIDC provider has csc-projects in userinfo
if csc_projects is not None:
session["csc-projects"] = csc_projects
request.app["Log"].debug(session["oidc"])
response = aiohttp.web.Response(
status=302, headers={"Location": "/login"}, reason="Redirection to login"
)
if session["oidc"]["userinfo"].get("homeFederation", "") == "Haka":
response.headers["Location"] = HAKA_OIDC_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]),
oidc=str(setd["keystone_oidc_provider"]),
origin=str(setd["set_origin_address"]),
)
return response
async def handle_login(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Create new session cookie for the user."""
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
response = aiohttp.web.Response(status=302, reason="Redirection to login")
# Add a cookie for navigating
if "navto" in request.query.keys():
response.set_cookie("NAV_TO", request.query["navto"], expires=str(3600))
if setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" in session:
response = aiohttp.web.FileResponse(
str(setd["static_directory"]) + "/login2step.html"
)
else:
response.headers["Location"] = "/"
else:
response.headers["Location"] = "/login/front"
return response
async def sso_query_begin(
request: typing.Union[aiohttp.web.Request, None]
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Display login page and initiate federated keystone authentication."""
# Return the form based login page if the service isn't trusted
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
if request and setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" not in session:
return aiohttp.web.Response(status=302, headers={"Location": "/"})
if not setd["has_trust"]:
response = aiohttp.web.FileResponse(str(setd["static_directory"]) + "/login.html")
return disable_cache(response)
response = aiohttp.web.Response(
status=302,
)
response.headers["Location"] = HAKA_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]), origin=str(setd["set_origin_address"])
)
return response
async def sso_query_begin_oidc(
request: typing.Union[aiohttp.web.Request, None]
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Initiate a federated Keystone authentication with OIDC."""
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
if request and setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" not in session:
return aiohttp.web.Response(status=302, headers={"Location": "/"})
if not setd["has_trust"]:
response = aiohttp.web.FileResponse(str(setd["static_directory"]) + "/login.html")
return disable_cache(response)
return aiohttp.web.Response(
status=302,
headers={
"Location": HAKA_OIDC_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]),
oidc=str(setd["keystone_oidc_provider"]),
origin=str(setd["set_origin_address"]),
),
},
)
def test_token(
formdata: MultiDictProxy[typing.Union[str, bytes, aiohttp.web.FileField]],
request: aiohttp.web.Request,
) -> str:
"""Validate unscoped token."""
unscoped: typing.Union[str, None] = None
log = request.app["Log"]
if "token" in formdata:
unscoped = str(formdata["token"])
log.debug(
f"Got OS token in formdata from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from form
if "token" in request.query and unscoped is None:
unscoped = request.query["token"]
log.debug(
"Got OS token in query string "
f"from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from headers
if "X-Auth-Token" in request.headers and unscoped is None:
unscoped = request.headers["X-Auth-Token"]
log.debug(
"Got OS token in http header "
f"from address {request.remote} :: {time.ctime()}"
)
if unscoped is None:
raise aiohttp.web.HTTPBadRequest(reason="Token missing from query")
if not (re.match("[a-f0-9]{32}", unscoped) and len(unscoped) == 32):
try:
# Check the magic byte matches a fernet token
if not base64.urlsafe_b64decode(unscoped.encode("utf-8"))[:1] == b"\x80":
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
# Handle failures in base64decode
except (binascii.Error, UnicodeDecodeError):
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
log.info("Got OS token in login return")
return unscoped
async def credentials_login_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure with classic POST."""
log = request.app["Log"]
client = request.app["api_client"]
log.info("Got login request with username, password")
form = await request.post()
try:
username = str(form["username"])
password = str(form["password"])
except KeyError:
raise aiohttp.web.HTTPBadRequest(reason="Username or password not provided")
# Get an unscoped token with credentials
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"password",
],
"password": {
"user": {
"name": username,
"domain": {
"name": "Default",
},
"password": password,
},
},
},
"scope": "unscoped",
},
},
) as resp:
if resp.status == 400:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPBadRequest(reason="No username or password provided.")
if resp.status == 401:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized(
reason="Wrong username or password, or no access to the service."
)
if resp.status != 201:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized
unscoped = resp.headers["X-Subject-Token"]
log.debug("Got token in password auth")
return await login_with_token(request, unscoped)
async def sso_query_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure return from SSO or user from POST."""
formdata = await request.post()
# Declare the unscoped token
unscoped = test_token(formdata, request)
return await login_with_token(request, unscoped)
async def login_with_token(
request: aiohttp.web.Request,
token: str,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Log in a session with token."""
# Establish connection and begin user session
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
response = aiohttp.web.Response(
status=303,
body=None,
)
client = request.app["api_client"]
session = (
await aiohttp_session.get_session(request)
if setd["oidc_enabled"]
else await aiohttp_session.new_session(request)
)
session["at"] = time.time()
session["referer"] = request.url.host
uname = ""
taint = True if setd["force_restricted_mode"] else False
# Check token availability
avail = await get_availability_from_token(token, client)
csc_projects = session.get("csc-projects", None)
session["projects"] = {}
# Scope a token for all accessible projects
for project in avail["projects"]:
# Filter out projects without a declared access if the OIDC provider supports it
project_without_prefix = project["name"].removeprefix("project_")
if isinstance(csc_projects, list) and project_without_prefix not in csc_projects:
request.app["Log"].debug(
"Project %r is not enabled for sd-connect, skipping",
project["name"],
)
continue
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"token",
],
"token": {
"id": token,
},
},
"scope": {"project": {"id": project["id"]}},
}
},
) as resp:
if resp.status == 401:
raise aiohttp.web.HTTPUnauthorized(reason="Token is not valid")
if resp.status == 403:
raise aiohttp.web.HTTPForbidden(reason="No access to service with token.")
ret = await resp.json()
request.app["Log"].debug(f"token output: {ret}")
obj_role = False
request.app["Log"].debug(f'roles: {ret["token"]["roles"]}')
for role in ret["token"]["roles"]:
if role["name"] in str(setd["os_accepted_roles"]).split(";"):
obj_role = True
if not obj_role:
continue
scoped = resp.headers["X-Subject-Token"]
# Use the first available public endpoint
endpoint = [
list(filter(lambda i: i["interface"] == "public", i["endpoints"]))[0]
for i in filter(
lambda i: i["type"] == "object-store", ret["token"]["catalog"]
)
][0]
request.app["Log"].debug(endpoint)
if not uname:
uname = ret["token"]["user"]["name"]
session["projects"][project["id"]] = {
"id": project["id"],
"name": project["name"],
"endpoint": endpoint["url"],
"token": scoped,
"tainted": True if setd["force_restricted_mode"] else False,
}
session["token"] = token
session["uname"] = uname
# the intersection of sdConnectProjects and Allas projects is empty
# in practice this might happen if there are sd connect projects that
# don't have Allas enabled
if not session["projects"]:
request.app["Log"].debug("possible sdConnectProjects and Allas projects mismatch")
raise aiohttp.web.HTTPForbidden(
reason="There are no projects available for this user."
)
session["taint"] = True if taint else False
session.changed()
if taint:
response.headers["Location"] = "/select"
return response
# Redirect to the browse page
if "NAV_TO" in request.cookies.keys():
response.headers["Location"] = request.cookies["NAV_TO"]
response.del_cookie("NAV_TO")
else:
response.headers["Location"] = "/browse"
return response
async def handle_project_lock(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Lock down to a specific project."""
log = request.app["Log"]
log.info("Call for locking down the project.")
session = await aiohttp_session.get_session(request)
project = request.match_info["project"]
# Ditch all projects that aren't the one specified if project is defined
if project in session["projects"]:
session["projects"] = dict(
filter(
lambda val: val[0] == project,
session["projects"].items(),
)
)
# If the project doesn't exist, allow all untainted projects
else:
session["projects"] = dict(
filter(lambda val: not val[1]["tainted"], session["projects"].items())
)
if not session["projects"]:
session.invalidate()
raise aiohttp.web.HTTPForbidden(reason="No untainted projects available.") | # The session is no longer tainted if it's been locked
session["taint"] = False
session.changed()
return aiohttp.web.Response(
status=303,
body=None,
headers={
"Location": "/browse",
},
)
async def handle_logout(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Properly kill the session for the user."""
log = request.app["Log"]
client = request.app["api_client"]
if not setd["set_session_devmode"]:
try:
session = await aiohttp_session.get_session(request)
log.info(f"Killing session {session.identity}")
for project in session["projects"]:
async with client.delete(
f"{setd['auth_endpoint_url']}/auth/tokens",
headers={
"X-Auth-Token": session["token"],
"X-Subject-Token": session["projects"][project]["token"],
},
):
pass
session.invalidate()
except aiohttp.web.HTTPUnauthorized:
log.info("Trying to log our an invalidated session")
raise aiohttp.web.HTTPUnauthorized
response = aiohttp.web.Response(status=303)
response.headers["Location"] = "/"
return response
def _get_projects_from_userinfo(
userinfo: typing.Dict[str, typing.Any],
) -> typing.List[typing.Any] | None:
"""Parse projects from userinfo.
:param userinfo: dict from userinfo containing user profile
:returns: None if userinfo doesn't contain csc-projects, or a list with "project_name"s
:raises HTTPUnauthorized in case no projects are available
"""
if "sdConnectProjects" in userinfo:
# Remove the possibly existing "project_" prefix
projects = [
p.removeprefix("project_") for p in userinfo["sdConnectProjects"].split(" ")
]
# we add this check in case the claim `sdConnectProjects does not exist`
# and we want to enforce this at deployment
elif setd["sdconnect_enabled"] and "sdConnectProjects" not in userinfo:
projects = []
else:
return None
if len(projects) == 0:
# No project group information received, aborting
raise aiohttp.web.HTTPUnauthorized(reason="User is not a member of any project.")
return projects | random_line_split | |
login.py | """A module for handling the project login related tasks."""
import base64
import binascii
import re
import time
import typing
# aiohttp
import aiohttp.web
import aiohttp_session
from multidict import MultiDictProxy
from oidcrp.exception import OidcServiceError
from swift_browser_ui.ui._convenience import (
disable_cache,
get_availability_from_token,
)
from swift_browser_ui.ui.settings import setd
HAKA_ENDPOINT = (
"{endpoint}/auth/OS-FEDERATION/identity_providers"
"/haka/protocols/saml2/websso?origin={origin}"
).format
HAKA_OIDC_ENDPOINT = (
"{endpoint}/auth/OS-FEDERATION/identity_providers"
"/{oidc}/protocols/openid/websso?origin={origin}"
).format
async def oidc_start(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Redirect to OpenID Connect provider."""
try:
oidc = request.app["oidc_client"].begin("oidc")
except Exception as e:
# This can be caused if config is improperly configured, and
# oidcrp is unable to fetch oidc configuration from the given URL
request.app["Log"].error(f"OIDC authorization request failed: {e}")
raise aiohttp.web.HTTPInternalServerError(
reason="OIDC authorization request failed."
)
response = aiohttp.web.Response(status=302, reason="Redirection to login")
response.headers["Location"] = oidc["url"]
return response
async def oidc_end(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Finalize OIDC login and create a new session with the data from the OIDC provicer."""
# Response from AAI must have the query params `state` and `code`
if "state" in request.query and "code" in request.query:
request.app["Log"].debug("AAI response contained the correct params.")
params = {"state": request.query["state"], "code": request.query["code"]}
else:
reason = f"AAI response is missing mandatory params, received: {request.query}"
raise aiohttp.web.HTTPBadRequest(reason=reason)
# Verify oidc_state and retrieve auth session
try:
oidc_session = request.app["oidc_client"].get_session_information(params["state"])
except KeyError as e:
# This exception is raised if the RPHandler doesn't have the supplied "state"
request.app["Log"].error(f"OIDC not initialised: {e}")
raise aiohttp.web.HTTPForbidden(reason="Bad OIDC session.")
oidc_session["auth_request"]["code"] = params["code"]
# finalize requests id_token and access_token with code, validates them and requests userinfo data
try:
oidc_result = request.app["oidc_client"].finalize(
oidc_session["iss"], oidc_session["auth_request"]
)
except KeyError as e:
request.app["Log"].error(f"Issuer {oidc_session['iss']} not found: {e}.")
raise aiohttp.web.HTTPBadRequest(reason="Token issuer not found.")
except OidcServiceError as e:
# This exception is raised if RPHandler encounters an error due to:
# 1. "code" is wrong, so token request failed
# 2. token validation failed
# 3. userinfo request failed
request.app["Log"].error(f"OIDC Callback failed with: {e}")
raise aiohttp.web.HTTPBadRequest(reason="Invalid OIDC callback.")
session = await aiohttp_session.new_session(request)
session["at"] = time.time()
session["referer"] = request.url.host
session["oidc"] = {
"userinfo": oidc_result["userinfo"].to_dict(),
"state": oidc_result["state"],
"access_token": oidc_result["token"],
}
csc_projects: typing.List[typing.Any] | None = _get_projects_from_userinfo(
session["oidc"]["userinfo"]
)
# add entry to session only if the OIDC provider has csc-projects in userinfo
if csc_projects is not None:
session["csc-projects"] = csc_projects
request.app["Log"].debug(session["oidc"])
response = aiohttp.web.Response(
status=302, headers={"Location": "/login"}, reason="Redirection to login"
)
if session["oidc"]["userinfo"].get("homeFederation", "") == "Haka":
response.headers["Location"] = HAKA_OIDC_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]),
oidc=str(setd["keystone_oidc_provider"]),
origin=str(setd["set_origin_address"]),
)
return response
async def handle_login(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Create new session cookie for the user."""
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
response = aiohttp.web.Response(status=302, reason="Redirection to login")
# Add a cookie for navigating
if "navto" in request.query.keys():
response.set_cookie("NAV_TO", request.query["navto"], expires=str(3600))
if setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" in session:
response = aiohttp.web.FileResponse(
str(setd["static_directory"]) + "/login2step.html"
)
else:
response.headers["Location"] = "/"
else:
response.headers["Location"] = "/login/front"
return response
async def sso_query_begin(
request: typing.Union[aiohttp.web.Request, None]
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Display login page and initiate federated keystone authentication."""
# Return the form based login page if the service isn't trusted
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
if request and setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" not in session:
return aiohttp.web.Response(status=302, headers={"Location": "/"})
if not setd["has_trust"]:
response = aiohttp.web.FileResponse(str(setd["static_directory"]) + "/login.html")
return disable_cache(response)
response = aiohttp.web.Response(
status=302,
)
response.headers["Location"] = HAKA_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]), origin=str(setd["set_origin_address"])
)
return response
async def sso_query_begin_oidc(
request: typing.Union[aiohttp.web.Request, None]
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Initiate a federated Keystone authentication with OIDC."""
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
if request and setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" not in session:
return aiohttp.web.Response(status=302, headers={"Location": "/"})
if not setd["has_trust"]:
response = aiohttp.web.FileResponse(str(setd["static_directory"]) + "/login.html")
return disable_cache(response)
return aiohttp.web.Response(
status=302,
headers={
"Location": HAKA_OIDC_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]),
oidc=str(setd["keystone_oidc_provider"]),
origin=str(setd["set_origin_address"]),
),
},
)
def test_token(
formdata: MultiDictProxy[typing.Union[str, bytes, aiohttp.web.FileField]],
request: aiohttp.web.Request,
) -> str:
"""Validate unscoped token."""
unscoped: typing.Union[str, None] = None
log = request.app["Log"]
if "token" in formdata:
unscoped = str(formdata["token"])
log.debug(
f"Got OS token in formdata from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from form
if "token" in request.query and unscoped is None:
unscoped = request.query["token"]
log.debug(
"Got OS token in query string "
f"from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from headers
if "X-Auth-Token" in request.headers and unscoped is None:
unscoped = request.headers["X-Auth-Token"]
log.debug(
"Got OS token in http header "
f"from address {request.remote} :: {time.ctime()}"
)
if unscoped is None:
raise aiohttp.web.HTTPBadRequest(reason="Token missing from query")
if not (re.match("[a-f0-9]{32}", unscoped) and len(unscoped) == 32):
try:
# Check the magic byte matches a fernet token
if not base64.urlsafe_b64decode(unscoped.encode("utf-8"))[:1] == b"\x80":
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
# Handle failures in base64decode
except (binascii.Error, UnicodeDecodeError):
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
log.info("Got OS token in login return")
return unscoped
async def credentials_login_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure with classic POST."""
log = request.app["Log"]
client = request.app["api_client"]
log.info("Got login request with username, password")
form = await request.post()
try:
username = str(form["username"])
password = str(form["password"])
except KeyError:
raise aiohttp.web.HTTPBadRequest(reason="Username or password not provided")
# Get an unscoped token with credentials
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"password",
],
"password": {
"user": {
"name": username,
"domain": {
"name": "Default",
},
"password": password,
},
},
},
"scope": "unscoped",
},
},
) as resp:
if resp.status == 400:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPBadRequest(reason="No username or password provided.")
if resp.status == 401:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized(
reason="Wrong username or password, or no access to the service."
)
if resp.status != 201:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized
unscoped = resp.headers["X-Subject-Token"]
log.debug("Got token in password auth")
return await login_with_token(request, unscoped)
async def sso_query_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure return from SSO or user from POST."""
formdata = await request.post()
# Declare the unscoped token
unscoped = test_token(formdata, request)
return await login_with_token(request, unscoped)
async def login_with_token(
request: aiohttp.web.Request,
token: str,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Log in a session with token."""
# Establish connection and begin user session
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
response = aiohttp.web.Response(
status=303,
body=None,
)
client = request.app["api_client"]
session = (
await aiohttp_session.get_session(request)
if setd["oidc_enabled"]
else await aiohttp_session.new_session(request)
)
session["at"] = time.time()
session["referer"] = request.url.host
uname = ""
taint = True if setd["force_restricted_mode"] else False
# Check token availability
avail = await get_availability_from_token(token, client)
csc_projects = session.get("csc-projects", None)
session["projects"] = {}
# Scope a token for all accessible projects
for project in avail["projects"]:
# Filter out projects without a declared access if the OIDC provider supports it
project_without_prefix = project["name"].removeprefix("project_")
if isinstance(csc_projects, list) and project_without_prefix not in csc_projects:
request.app["Log"].debug(
"Project %r is not enabled for sd-connect, skipping",
project["name"],
)
continue
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"token",
],
"token": {
"id": token,
},
},
"scope": {"project": {"id": project["id"]}},
}
},
) as resp:
if resp.status == 401:
raise aiohttp.web.HTTPUnauthorized(reason="Token is not valid")
if resp.status == 403:
raise aiohttp.web.HTTPForbidden(reason="No access to service with token.")
ret = await resp.json()
request.app["Log"].debug(f"token output: {ret}")
obj_role = False
request.app["Log"].debug(f'roles: {ret["token"]["roles"]}')
for role in ret["token"]["roles"]:
if role["name"] in str(setd["os_accepted_roles"]).split(";"):
obj_role = True
if not obj_role:
continue
scoped = resp.headers["X-Subject-Token"]
# Use the first available public endpoint
endpoint = [
list(filter(lambda i: i["interface"] == "public", i["endpoints"]))[0]
for i in filter(
lambda i: i["type"] == "object-store", ret["token"]["catalog"]
)
][0]
request.app["Log"].debug(endpoint)
if not uname:
uname = ret["token"]["user"]["name"]
session["projects"][project["id"]] = {
"id": project["id"],
"name": project["name"],
"endpoint": endpoint["url"],
"token": scoped,
"tainted": True if setd["force_restricted_mode"] else False,
}
session["token"] = token
session["uname"] = uname
# the intersection of sdConnectProjects and Allas projects is empty
# in practice this might happen if there are sd connect projects that
# don't have Allas enabled
if not session["projects"]:
request.app["Log"].debug("possible sdConnectProjects and Allas projects mismatch")
raise aiohttp.web.HTTPForbidden(
reason="There are no projects available for this user."
)
session["taint"] = True if taint else False
session.changed()
if taint:
response.headers["Location"] = "/select"
return response
# Redirect to the browse page
if "NAV_TO" in request.cookies.keys():
response.headers["Location"] = request.cookies["NAV_TO"]
response.del_cookie("NAV_TO")
else:
response.headers["Location"] = "/browse"
return response
async def handle_project_lock(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Lock down to a specific project."""
log = request.app["Log"]
log.info("Call for locking down the project.")
session = await aiohttp_session.get_session(request)
project = request.match_info["project"]
# Ditch all projects that aren't the one specified if project is defined
if project in session["projects"]:
session["projects"] = dict(
filter(
lambda val: val[0] == project,
session["projects"].items(),
)
)
# If the project doesn't exist, allow all untainted projects
else:
|
if not session["projects"]:
session.invalidate()
raise aiohttp.web.HTTPForbidden(reason="No untainted projects available.")
# The session is no longer tainted if it's been locked
session["taint"] = False
session.changed()
return aiohttp.web.Response(
status=303,
body=None,
headers={
"Location": "/browse",
},
)
async def handle_logout(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Properly kill the session for the user."""
log = request.app["Log"]
client = request.app["api_client"]
if not setd["set_session_devmode"]:
try:
session = await aiohttp_session.get_session(request)
log.info(f"Killing session {session.identity}")
for project in session["projects"]:
async with client.delete(
f"{setd['auth_endpoint_url']}/auth/tokens",
headers={
"X-Auth-Token": session["token"],
"X-Subject-Token": session["projects"][project]["token"],
},
):
pass
session.invalidate()
except aiohttp.web.HTTPUnauthorized:
log.info("Trying to log our an invalidated session")
raise aiohttp.web.HTTPUnauthorized
response = aiohttp.web.Response(status=303)
response.headers["Location"] = "/"
return response
def _get_projects_from_userinfo(
userinfo: typing.Dict[str, typing.Any],
) -> typing.List[typing.Any] | None:
"""Parse projects from userinfo.
:param userinfo: dict from userinfo containing user profile
:returns: None if userinfo doesn't contain csc-projects, or a list with "project_name"s
:raises HTTPUnauthorized in case no projects are available
"""
if "sdConnectProjects" in userinfo:
# Remove the possibly existing "project_" prefix
projects = [
p.removeprefix("project_") for p in userinfo["sdConnectProjects"].split(" ")
]
# we add this check in case the claim `sdConnectProjects does not exist`
# and we want to enforce this at deployment
elif setd["sdconnect_enabled"] and "sdConnectProjects" not in userinfo:
projects = []
else:
return None
if len(projects) == 0:
# No project group information received, aborting
raise aiohttp.web.HTTPUnauthorized(reason="User is not a member of any project.")
return projects
| session["projects"] = dict(
filter(lambda val: not val[1]["tainted"], session["projects"].items())
) | conditional_block |
login.py | """A module for handling the project login related tasks."""
import base64
import binascii
import re
import time
import typing
# aiohttp
import aiohttp.web
import aiohttp_session
from multidict import MultiDictProxy
from oidcrp.exception import OidcServiceError
from swift_browser_ui.ui._convenience import (
disable_cache,
get_availability_from_token,
)
from swift_browser_ui.ui.settings import setd
HAKA_ENDPOINT = (
"{endpoint}/auth/OS-FEDERATION/identity_providers"
"/haka/protocols/saml2/websso?origin={origin}"
).format
HAKA_OIDC_ENDPOINT = (
"{endpoint}/auth/OS-FEDERATION/identity_providers"
"/{oidc}/protocols/openid/websso?origin={origin}"
).format
async def oidc_start(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Redirect to OpenID Connect provider."""
try:
oidc = request.app["oidc_client"].begin("oidc")
except Exception as e:
# This can be caused if config is improperly configured, and
# oidcrp is unable to fetch oidc configuration from the given URL
request.app["Log"].error(f"OIDC authorization request failed: {e}")
raise aiohttp.web.HTTPInternalServerError(
reason="OIDC authorization request failed."
)
response = aiohttp.web.Response(status=302, reason="Redirection to login")
response.headers["Location"] = oidc["url"]
return response
async def oidc_end(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Finalize OIDC login and create a new session with the data from the OIDC provicer."""
# Response from AAI must have the query params `state` and `code`
if "state" in request.query and "code" in request.query:
request.app["Log"].debug("AAI response contained the correct params.")
params = {"state": request.query["state"], "code": request.query["code"]}
else:
reason = f"AAI response is missing mandatory params, received: {request.query}"
raise aiohttp.web.HTTPBadRequest(reason=reason)
# Verify oidc_state and retrieve auth session
try:
oidc_session = request.app["oidc_client"].get_session_information(params["state"])
except KeyError as e:
# This exception is raised if the RPHandler doesn't have the supplied "state"
request.app["Log"].error(f"OIDC not initialised: {e}")
raise aiohttp.web.HTTPForbidden(reason="Bad OIDC session.")
oidc_session["auth_request"]["code"] = params["code"]
# finalize requests id_token and access_token with code, validates them and requests userinfo data
try:
oidc_result = request.app["oidc_client"].finalize(
oidc_session["iss"], oidc_session["auth_request"]
)
except KeyError as e:
request.app["Log"].error(f"Issuer {oidc_session['iss']} not found: {e}.")
raise aiohttp.web.HTTPBadRequest(reason="Token issuer not found.")
except OidcServiceError as e:
# This exception is raised if RPHandler encounters an error due to:
# 1. "code" is wrong, so token request failed
# 2. token validation failed
# 3. userinfo request failed
request.app["Log"].error(f"OIDC Callback failed with: {e}")
raise aiohttp.web.HTTPBadRequest(reason="Invalid OIDC callback.")
session = await aiohttp_session.new_session(request)
session["at"] = time.time()
session["referer"] = request.url.host
session["oidc"] = {
"userinfo": oidc_result["userinfo"].to_dict(),
"state": oidc_result["state"],
"access_token": oidc_result["token"],
}
csc_projects: typing.List[typing.Any] | None = _get_projects_from_userinfo(
session["oidc"]["userinfo"]
)
# add entry to session only if the OIDC provider has csc-projects in userinfo
if csc_projects is not None:
session["csc-projects"] = csc_projects
request.app["Log"].debug(session["oidc"])
response = aiohttp.web.Response(
status=302, headers={"Location": "/login"}, reason="Redirection to login"
)
if session["oidc"]["userinfo"].get("homeFederation", "") == "Haka":
response.headers["Location"] = HAKA_OIDC_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]),
oidc=str(setd["keystone_oidc_provider"]),
origin=str(setd["set_origin_address"]),
)
return response
async def handle_login(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Create new session cookie for the user."""
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
response = aiohttp.web.Response(status=302, reason="Redirection to login")
# Add a cookie for navigating
if "navto" in request.query.keys():
response.set_cookie("NAV_TO", request.query["navto"], expires=str(3600))
if setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" in session:
response = aiohttp.web.FileResponse(
str(setd["static_directory"]) + "/login2step.html"
)
else:
response.headers["Location"] = "/"
else:
response.headers["Location"] = "/login/front"
return response
async def sso_query_begin(
request: typing.Union[aiohttp.web.Request, None]
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Display login page and initiate federated keystone authentication."""
# Return the form based login page if the service isn't trusted
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
if request and setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" not in session:
return aiohttp.web.Response(status=302, headers={"Location": "/"})
if not setd["has_trust"]:
response = aiohttp.web.FileResponse(str(setd["static_directory"]) + "/login.html")
return disable_cache(response)
response = aiohttp.web.Response(
status=302,
)
response.headers["Location"] = HAKA_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]), origin=str(setd["set_origin_address"])
)
return response
async def sso_query_begin_oidc(
request: typing.Union[aiohttp.web.Request, None]
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Initiate a federated Keystone authentication with OIDC."""
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
if request and setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" not in session:
return aiohttp.web.Response(status=302, headers={"Location": "/"})
if not setd["has_trust"]:
response = aiohttp.web.FileResponse(str(setd["static_directory"]) + "/login.html")
return disable_cache(response)
return aiohttp.web.Response(
status=302,
headers={
"Location": HAKA_OIDC_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]),
oidc=str(setd["keystone_oidc_provider"]),
origin=str(setd["set_origin_address"]),
),
},
)
def test_token(
formdata: MultiDictProxy[typing.Union[str, bytes, aiohttp.web.FileField]],
request: aiohttp.web.Request,
) -> str:
"""Validate unscoped token."""
unscoped: typing.Union[str, None] = None
log = request.app["Log"]
if "token" in formdata:
unscoped = str(formdata["token"])
log.debug(
f"Got OS token in formdata from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from form
if "token" in request.query and unscoped is None:
unscoped = request.query["token"]
log.debug(
"Got OS token in query string "
f"from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from headers
if "X-Auth-Token" in request.headers and unscoped is None:
unscoped = request.headers["X-Auth-Token"]
log.debug(
"Got OS token in http header "
f"from address {request.remote} :: {time.ctime()}"
)
if unscoped is None:
raise aiohttp.web.HTTPBadRequest(reason="Token missing from query")
if not (re.match("[a-f0-9]{32}", unscoped) and len(unscoped) == 32):
try:
# Check the magic byte matches a fernet token
if not base64.urlsafe_b64decode(unscoped.encode("utf-8"))[:1] == b"\x80":
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
# Handle failures in base64decode
except (binascii.Error, UnicodeDecodeError):
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
log.info("Got OS token in login return")
return unscoped
async def credentials_login_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure with classic POST."""
log = request.app["Log"]
client = request.app["api_client"]
log.info("Got login request with username, password")
form = await request.post()
try:
username = str(form["username"])
password = str(form["password"])
except KeyError:
raise aiohttp.web.HTTPBadRequest(reason="Username or password not provided")
# Get an unscoped token with credentials
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"password",
],
"password": {
"user": {
"name": username,
"domain": {
"name": "Default",
},
"password": password,
},
},
},
"scope": "unscoped",
},
},
) as resp:
if resp.status == 400:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPBadRequest(reason="No username or password provided.")
if resp.status == 401:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized(
reason="Wrong username or password, or no access to the service."
)
if resp.status != 201:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized
unscoped = resp.headers["X-Subject-Token"]
log.debug("Got token in password auth")
return await login_with_token(request, unscoped)
async def sso_query_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure return from SSO or user from POST."""
formdata = await request.post()
# Declare the unscoped token
unscoped = test_token(formdata, request)
return await login_with_token(request, unscoped)
async def login_with_token(
request: aiohttp.web.Request,
token: str,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Log in a session with token."""
# Establish connection and begin user session
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
response = aiohttp.web.Response(
status=303,
body=None,
)
client = request.app["api_client"]
session = (
await aiohttp_session.get_session(request)
if setd["oidc_enabled"]
else await aiohttp_session.new_session(request)
)
session["at"] = time.time()
session["referer"] = request.url.host
uname = ""
taint = True if setd["force_restricted_mode"] else False
# Check token availability
avail = await get_availability_from_token(token, client)
csc_projects = session.get("csc-projects", None)
session["projects"] = {}
# Scope a token for all accessible projects
for project in avail["projects"]:
# Filter out projects without a declared access if the OIDC provider supports it
project_without_prefix = project["name"].removeprefix("project_")
if isinstance(csc_projects, list) and project_without_prefix not in csc_projects:
request.app["Log"].debug(
"Project %r is not enabled for sd-connect, skipping",
project["name"],
)
continue
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"token",
],
"token": {
"id": token,
},
},
"scope": {"project": {"id": project["id"]}},
}
},
) as resp:
if resp.status == 401:
raise aiohttp.web.HTTPUnauthorized(reason="Token is not valid")
if resp.status == 403:
raise aiohttp.web.HTTPForbidden(reason="No access to service with token.")
ret = await resp.json()
request.app["Log"].debug(f"token output: {ret}")
obj_role = False
request.app["Log"].debug(f'roles: {ret["token"]["roles"]}')
for role in ret["token"]["roles"]:
if role["name"] in str(setd["os_accepted_roles"]).split(";"):
obj_role = True
if not obj_role:
continue
scoped = resp.headers["X-Subject-Token"]
# Use the first available public endpoint
endpoint = [
list(filter(lambda i: i["interface"] == "public", i["endpoints"]))[0]
for i in filter(
lambda i: i["type"] == "object-store", ret["token"]["catalog"]
)
][0]
request.app["Log"].debug(endpoint)
if not uname:
uname = ret["token"]["user"]["name"]
session["projects"][project["id"]] = {
"id": project["id"],
"name": project["name"],
"endpoint": endpoint["url"],
"token": scoped,
"tainted": True if setd["force_restricted_mode"] else False,
}
session["token"] = token
session["uname"] = uname
# the intersection of sdConnectProjects and Allas projects is empty
# in practice this might happen if there are sd connect projects that
# don't have Allas enabled
if not session["projects"]:
request.app["Log"].debug("possible sdConnectProjects and Allas projects mismatch")
raise aiohttp.web.HTTPForbidden(
reason="There are no projects available for this user."
)
session["taint"] = True if taint else False
session.changed()
if taint:
response.headers["Location"] = "/select"
return response
# Redirect to the browse page
if "NAV_TO" in request.cookies.keys():
response.headers["Location"] = request.cookies["NAV_TO"]
response.del_cookie("NAV_TO")
else:
response.headers["Location"] = "/browse"
return response
async def handle_project_lock(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Lock down to a specific project."""
log = request.app["Log"]
log.info("Call for locking down the project.")
session = await aiohttp_session.get_session(request)
project = request.match_info["project"]
# Ditch all projects that aren't the one specified if project is defined
if project in session["projects"]:
session["projects"] = dict(
filter(
lambda val: val[0] == project,
session["projects"].items(),
)
)
# If the project doesn't exist, allow all untainted projects
else:
session["projects"] = dict(
filter(lambda val: not val[1]["tainted"], session["projects"].items())
)
if not session["projects"]:
session.invalidate()
raise aiohttp.web.HTTPForbidden(reason="No untainted projects available.")
# The session is no longer tainted if it's been locked
session["taint"] = False
session.changed()
return aiohttp.web.Response(
status=303,
body=None,
headers={
"Location": "/browse",
},
)
async def handle_logout(request: aiohttp.web.Request) -> aiohttp.web.Response:
|
def _get_projects_from_userinfo(
userinfo: typing.Dict[str, typing.Any],
) -> typing.List[typing.Any] | None:
"""Parse projects from userinfo.
:param userinfo: dict from userinfo containing user profile
:returns: None if userinfo doesn't contain csc-projects, or a list with "project_name"s
:raises HTTPUnauthorized in case no projects are available
"""
if "sdConnectProjects" in userinfo:
# Remove the possibly existing "project_" prefix
projects = [
p.removeprefix("project_") for p in userinfo["sdConnectProjects"].split(" ")
]
# we add this check in case the claim `sdConnectProjects does not exist`
# and we want to enforce this at deployment
elif setd["sdconnect_enabled"] and "sdConnectProjects" not in userinfo:
projects = []
else:
return None
if len(projects) == 0:
# No project group information received, aborting
raise aiohttp.web.HTTPUnauthorized(reason="User is not a member of any project.")
return projects
| """Properly kill the session for the user."""
log = request.app["Log"]
client = request.app["api_client"]
if not setd["set_session_devmode"]:
try:
session = await aiohttp_session.get_session(request)
log.info(f"Killing session {session.identity}")
for project in session["projects"]:
async with client.delete(
f"{setd['auth_endpoint_url']}/auth/tokens",
headers={
"X-Auth-Token": session["token"],
"X-Subject-Token": session["projects"][project]["token"],
},
):
pass
session.invalidate()
except aiohttp.web.HTTPUnauthorized:
log.info("Trying to log our an invalidated session")
raise aiohttp.web.HTTPUnauthorized
response = aiohttp.web.Response(status=303)
response.headers["Location"] = "/"
return response | identifier_body |
login.py | """A module for handling the project login related tasks."""
import base64
import binascii
import re
import time
import typing
# aiohttp
import aiohttp.web
import aiohttp_session
from multidict import MultiDictProxy
from oidcrp.exception import OidcServiceError
from swift_browser_ui.ui._convenience import (
disable_cache,
get_availability_from_token,
)
from swift_browser_ui.ui.settings import setd
HAKA_ENDPOINT = (
"{endpoint}/auth/OS-FEDERATION/identity_providers"
"/haka/protocols/saml2/websso?origin={origin}"
).format
HAKA_OIDC_ENDPOINT = (
"{endpoint}/auth/OS-FEDERATION/identity_providers"
"/{oidc}/protocols/openid/websso?origin={origin}"
).format
async def oidc_start(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Redirect to OpenID Connect provider."""
try:
oidc = request.app["oidc_client"].begin("oidc")
except Exception as e:
# This can be caused if config is improperly configured, and
# oidcrp is unable to fetch oidc configuration from the given URL
request.app["Log"].error(f"OIDC authorization request failed: {e}")
raise aiohttp.web.HTTPInternalServerError(
reason="OIDC authorization request failed."
)
response = aiohttp.web.Response(status=302, reason="Redirection to login")
response.headers["Location"] = oidc["url"]
return response
async def oidc_end(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Finalize OIDC login and create a new session with the data from the OIDC provicer."""
# Response from AAI must have the query params `state` and `code`
if "state" in request.query and "code" in request.query:
request.app["Log"].debug("AAI response contained the correct params.")
params = {"state": request.query["state"], "code": request.query["code"]}
else:
reason = f"AAI response is missing mandatory params, received: {request.query}"
raise aiohttp.web.HTTPBadRequest(reason=reason)
# Verify oidc_state and retrieve auth session
try:
oidc_session = request.app["oidc_client"].get_session_information(params["state"])
except KeyError as e:
# This exception is raised if the RPHandler doesn't have the supplied "state"
request.app["Log"].error(f"OIDC not initialised: {e}")
raise aiohttp.web.HTTPForbidden(reason="Bad OIDC session.")
oidc_session["auth_request"]["code"] = params["code"]
# finalize requests id_token and access_token with code, validates them and requests userinfo data
try:
oidc_result = request.app["oidc_client"].finalize(
oidc_session["iss"], oidc_session["auth_request"]
)
except KeyError as e:
request.app["Log"].error(f"Issuer {oidc_session['iss']} not found: {e}.")
raise aiohttp.web.HTTPBadRequest(reason="Token issuer not found.")
except OidcServiceError as e:
# This exception is raised if RPHandler encounters an error due to:
# 1. "code" is wrong, so token request failed
# 2. token validation failed
# 3. userinfo request failed
request.app["Log"].error(f"OIDC Callback failed with: {e}")
raise aiohttp.web.HTTPBadRequest(reason="Invalid OIDC callback.")
session = await aiohttp_session.new_session(request)
session["at"] = time.time()
session["referer"] = request.url.host
session["oidc"] = {
"userinfo": oidc_result["userinfo"].to_dict(),
"state": oidc_result["state"],
"access_token": oidc_result["token"],
}
csc_projects: typing.List[typing.Any] | None = _get_projects_from_userinfo(
session["oidc"]["userinfo"]
)
# add entry to session only if the OIDC provider has csc-projects in userinfo
if csc_projects is not None:
session["csc-projects"] = csc_projects
request.app["Log"].debug(session["oidc"])
response = aiohttp.web.Response(
status=302, headers={"Location": "/login"}, reason="Redirection to login"
)
if session["oidc"]["userinfo"].get("homeFederation", "") == "Haka":
response.headers["Location"] = HAKA_OIDC_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]),
oidc=str(setd["keystone_oidc_provider"]),
origin=str(setd["set_origin_address"]),
)
return response
async def | (
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Create new session cookie for the user."""
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
response = aiohttp.web.Response(status=302, reason="Redirection to login")
# Add a cookie for navigating
if "navto" in request.query.keys():
response.set_cookie("NAV_TO", request.query["navto"], expires=str(3600))
if setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" in session:
response = aiohttp.web.FileResponse(
str(setd["static_directory"]) + "/login2step.html"
)
else:
response.headers["Location"] = "/"
else:
response.headers["Location"] = "/login/front"
return response
async def sso_query_begin(
request: typing.Union[aiohttp.web.Request, None]
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Display login page and initiate federated keystone authentication."""
# Return the form based login page if the service isn't trusted
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
if request and setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" not in session:
return aiohttp.web.Response(status=302, headers={"Location": "/"})
if not setd["has_trust"]:
response = aiohttp.web.FileResponse(str(setd["static_directory"]) + "/login.html")
return disable_cache(response)
response = aiohttp.web.Response(
status=302,
)
response.headers["Location"] = HAKA_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]), origin=str(setd["set_origin_address"])
)
return response
async def sso_query_begin_oidc(
request: typing.Union[aiohttp.web.Request, None]
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Initiate a federated Keystone authentication with OIDC."""
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
if request and setd["oidc_enabled"]:
session = await aiohttp_session.get_session(request)
if "oidc" not in session:
return aiohttp.web.Response(status=302, headers={"Location": "/"})
if not setd["has_trust"]:
response = aiohttp.web.FileResponse(str(setd["static_directory"]) + "/login.html")
return disable_cache(response)
return aiohttp.web.Response(
status=302,
headers={
"Location": HAKA_OIDC_ENDPOINT(
endpoint=str(setd["auth_endpoint_url"]),
oidc=str(setd["keystone_oidc_provider"]),
origin=str(setd["set_origin_address"]),
),
},
)
def test_token(
formdata: MultiDictProxy[typing.Union[str, bytes, aiohttp.web.FileField]],
request: aiohttp.web.Request,
) -> str:
"""Validate unscoped token."""
unscoped: typing.Union[str, None] = None
log = request.app["Log"]
if "token" in formdata:
unscoped = str(formdata["token"])
log.debug(
f"Got OS token in formdata from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from form
if "token" in request.query and unscoped is None:
unscoped = request.query["token"]
log.debug(
"Got OS token in query string "
f"from address {request.remote} :: {time.ctime()}"
)
# Try getting the token id from headers
if "X-Auth-Token" in request.headers and unscoped is None:
unscoped = request.headers["X-Auth-Token"]
log.debug(
"Got OS token in http header "
f"from address {request.remote} :: {time.ctime()}"
)
if unscoped is None:
raise aiohttp.web.HTTPBadRequest(reason="Token missing from query")
if not (re.match("[a-f0-9]{32}", unscoped) and len(unscoped) == 32):
try:
# Check the magic byte matches a fernet token
if not base64.urlsafe_b64decode(unscoped.encode("utf-8"))[:1] == b"\x80":
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
# Handle failures in base64decode
except (binascii.Error, UnicodeDecodeError):
raise aiohttp.web.HTTPBadRequest(reason="Token is malformed")
log.info("Got OS token in login return")
return unscoped
async def credentials_login_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure with classic POST."""
log = request.app["Log"]
client = request.app["api_client"]
log.info("Got login request with username, password")
form = await request.post()
try:
username = str(form["username"])
password = str(form["password"])
except KeyError:
raise aiohttp.web.HTTPBadRequest(reason="Username or password not provided")
# Get an unscoped token with credentials
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"password",
],
"password": {
"user": {
"name": username,
"domain": {
"name": "Default",
},
"password": password,
},
},
},
"scope": "unscoped",
},
},
) as resp:
if resp.status == 400:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPBadRequest(reason="No username or password provided.")
if resp.status == 401:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized(
reason="Wrong username or password, or no access to the service."
)
if resp.status != 201:
text = await resp.text()
request.app["Log"].debug(text)
raise aiohttp.web.HTTPUnauthorized
unscoped = resp.headers["X-Subject-Token"]
log.debug("Got token in password auth")
return await login_with_token(request, unscoped)
async def sso_query_end(
request: aiohttp.web.Request,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Handle the login procedure return from SSO or user from POST."""
formdata = await request.post()
# Declare the unscoped token
unscoped = test_token(formdata, request)
return await login_with_token(request, unscoped)
async def login_with_token(
request: aiohttp.web.Request,
token: str,
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Log in a session with token."""
# Establish connection and begin user session
response: typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]
response = aiohttp.web.Response(
status=303,
body=None,
)
client = request.app["api_client"]
session = (
await aiohttp_session.get_session(request)
if setd["oidc_enabled"]
else await aiohttp_session.new_session(request)
)
session["at"] = time.time()
session["referer"] = request.url.host
uname = ""
taint = True if setd["force_restricted_mode"] else False
# Check token availability
avail = await get_availability_from_token(token, client)
csc_projects = session.get("csc-projects", None)
session["projects"] = {}
# Scope a token for all accessible projects
for project in avail["projects"]:
# Filter out projects without a declared access if the OIDC provider supports it
project_without_prefix = project["name"].removeprefix("project_")
if isinstance(csc_projects, list) and project_without_prefix not in csc_projects:
request.app["Log"].debug(
"Project %r is not enabled for sd-connect, skipping",
project["name"],
)
continue
async with client.post(
f"{setd['auth_endpoint_url']}/auth/tokens",
json={
"auth": {
"identity": {
"methods": [
"token",
],
"token": {
"id": token,
},
},
"scope": {"project": {"id": project["id"]}},
}
},
) as resp:
if resp.status == 401:
raise aiohttp.web.HTTPUnauthorized(reason="Token is not valid")
if resp.status == 403:
raise aiohttp.web.HTTPForbidden(reason="No access to service with token.")
ret = await resp.json()
request.app["Log"].debug(f"token output: {ret}")
obj_role = False
request.app["Log"].debug(f'roles: {ret["token"]["roles"]}')
for role in ret["token"]["roles"]:
if role["name"] in str(setd["os_accepted_roles"]).split(";"):
obj_role = True
if not obj_role:
continue
scoped = resp.headers["X-Subject-Token"]
# Use the first available public endpoint
endpoint = [
list(filter(lambda i: i["interface"] == "public", i["endpoints"]))[0]
for i in filter(
lambda i: i["type"] == "object-store", ret["token"]["catalog"]
)
][0]
request.app["Log"].debug(endpoint)
if not uname:
uname = ret["token"]["user"]["name"]
session["projects"][project["id"]] = {
"id": project["id"],
"name": project["name"],
"endpoint": endpoint["url"],
"token": scoped,
"tainted": True if setd["force_restricted_mode"] else False,
}
session["token"] = token
session["uname"] = uname
# the intersection of sdConnectProjects and Allas projects is empty
# in practice this might happen if there are sd connect projects that
# don't have Allas enabled
if not session["projects"]:
request.app["Log"].debug("possible sdConnectProjects and Allas projects mismatch")
raise aiohttp.web.HTTPForbidden(
reason="There are no projects available for this user."
)
session["taint"] = True if taint else False
session.changed()
if taint:
response.headers["Location"] = "/select"
return response
# Redirect to the browse page
if "NAV_TO" in request.cookies.keys():
response.headers["Location"] = request.cookies["NAV_TO"]
response.del_cookie("NAV_TO")
else:
response.headers["Location"] = "/browse"
return response
async def handle_project_lock(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Lock down to a specific project."""
log = request.app["Log"]
log.info("Call for locking down the project.")
session = await aiohttp_session.get_session(request)
project = request.match_info["project"]
# Ditch all projects that aren't the one specified if project is defined
if project in session["projects"]:
session["projects"] = dict(
filter(
lambda val: val[0] == project,
session["projects"].items(),
)
)
# If the project doesn't exist, allow all untainted projects
else:
session["projects"] = dict(
filter(lambda val: not val[1]["tainted"], session["projects"].items())
)
if not session["projects"]:
session.invalidate()
raise aiohttp.web.HTTPForbidden(reason="No untainted projects available.")
# The session is no longer tainted if it's been locked
session["taint"] = False
session.changed()
return aiohttp.web.Response(
status=303,
body=None,
headers={
"Location": "/browse",
},
)
async def handle_logout(request: aiohttp.web.Request) -> aiohttp.web.Response:
"""Properly kill the session for the user."""
log = request.app["Log"]
client = request.app["api_client"]
if not setd["set_session_devmode"]:
try:
session = await aiohttp_session.get_session(request)
log.info(f"Killing session {session.identity}")
for project in session["projects"]:
async with client.delete(
f"{setd['auth_endpoint_url']}/auth/tokens",
headers={
"X-Auth-Token": session["token"],
"X-Subject-Token": session["projects"][project]["token"],
},
):
pass
session.invalidate()
except aiohttp.web.HTTPUnauthorized:
log.info("Trying to log our an invalidated session")
raise aiohttp.web.HTTPUnauthorized
response = aiohttp.web.Response(status=303)
response.headers["Location"] = "/"
return response
def _get_projects_from_userinfo(
userinfo: typing.Dict[str, typing.Any],
) -> typing.List[typing.Any] | None:
"""Parse projects from userinfo.
:param userinfo: dict from userinfo containing user profile
:returns: None if userinfo doesn't contain csc-projects, or a list with "project_name"s
:raises HTTPUnauthorized in case no projects are available
"""
if "sdConnectProjects" in userinfo:
# Remove the possibly existing "project_" prefix
projects = [
p.removeprefix("project_") for p in userinfo["sdConnectProjects"].split(" ")
]
# we add this check in case the claim `sdConnectProjects does not exist`
# and we want to enforce this at deployment
elif setd["sdconnect_enabled"] and "sdConnectProjects" not in userinfo:
projects = []
else:
return None
if len(projects) == 0:
# No project group information received, aborting
raise aiohttp.web.HTTPUnauthorized(reason="User is not a member of any project.")
return projects
| handle_login | identifier_name |
docker.go | package gnomock
import (
"context"
"errors"
"fmt"
"io"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/go-connections/nat"
"github.com/orlangure/gnomock/internal/cleaner"
"github.com/orlangure/gnomock/internal/health"
"go.uber.org/zap"
)
const (
localhostAddr = "127.0.0.1"
defaultStopTimeoutSec = 1
duplicateContainerPattern = `Conflict. The container name "(?:.+?)" is already in use by container "(\w+)". You have to remove \(or rename\) that container to be able to reuse that name.` // nolint:lll
dockerSockAddr = "/var/run/docker.sock"
)
var duplicateContainerRegexp = regexp.MustCompile(duplicateContainerPattern)
type docker struct {
client *client.Client
log *zap.SugaredLogger
// This lock is used to protect docker client from concurrent connections
// with version negotiation. As of this moment, there is a data race in
// docker client when version negotiation is requested. This data race is
// not dangerous, but it triggers race detector alarms, so it should be
// avoided. Currently the client still has this issue, so this is an
// attempt to fix it locally by preventing concurrent connection using the
// same client (mostly when `Stop` is called with multiple containers).
//
// https://github.com/moby/moby/pull/42379
lock sync.Mutex
}
func (g *g) dockerConnect() (*docker, error) {
g.log.Info("connecting to docker engine")
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return nil, errors.Join(ErrEnvClient, err)
}
g.log.Info("connected to docker engine")
return &docker{client: cli, log: g.log}, nil
}
func (d *docker) isExistingLocalImage(ctx context.Context, image string) (bool, error) {
images, err := d.client.ImageList(ctx, types.ImageListOptions{All: true})
if err != nil {
return false, fmt.Errorf("can't list image: %w", err)
}
for _, img := range images {
for _, repoTag := range img.RepoTags {
if image == repoTag {
return true, nil
}
if !strings.Contains(repoTag, "/") {
repoTag = "library/" + repoTag
}
if strings.HasSuffix(image, repoTag) {
return true, nil
}
}
}
return false, nil
}
func (d *docker) pullImage(ctx context.Context, image string, cfg *Options) error {
d.log.Info("pulling image")
reader, err := d.client.ImagePull(ctx, image, types.ImagePullOptions{
RegistryAuth: cfg.Auth,
})
if err != nil {
return fmt.Errorf("can't pull image: %w", err)
}
defer func() {
closeErr := reader.Close()
if err == nil {
err = closeErr
}
}()
_, err = io.ReadAll(reader)
if err != nil {
return fmt.Errorf("can't read server output: %w", err)
}
d.log.Info("image pulled")
return nil
}
func (d *docker) startContainer(ctx context.Context, image string, ports NamedPorts, cfg *Options) (*Container, error) {
if cfg.Reuse {
container, ok, err := d.findReusableContainer(ctx, image, ports, cfg)
if err != nil {
return nil, err
}
if ok {
d.log.Info("re-using container")
return container, nil
}
}
d.log.Info("starting container")
resp, err := d.prepareContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't prepare container: %w", err)
}
sidecarChan := d.setupContainerCleanup(resp.ID, cfg)
err = d.client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
if err != nil {
return nil, fmt.Errorf("can't start container %s: %w", resp.ID, err)
}
container, err := d.waitForContainerNetwork(ctx, resp.ID, ports)
if err != nil {
return nil, fmt.Errorf("container network isn't ready: %w", err)
}
if sidecar, ok := <-sidecarChan; ok {
container.ID = generateID(container.ID, sidecar)
}
d.log.Infow("container started", "container", container)
return container, nil
}
func (d *docker) setupContainerCleanup(id string, cfg *Options) chan string {
sidecarChan := make(chan string)
go func() {
defer close(sidecarChan)
if cfg.DisableAutoCleanup || cfg.Reuse || cfg.Debug {
return
}
opts := []Option{
WithDisableAutoCleanup(),
WithHostMounts(dockerSockAddr, dockerSockAddr),
WithHealthCheck(func(ctx context.Context, c *Container) error {
return health.HTTPGet(ctx, c.DefaultAddress())
}),
WithInit(func(ctx context.Context, c *Container) error {
return cleaner.Notify(context.Background(), c.DefaultAddress(), id)
}),
}
if cfg.UseLocalImagesFirst {
opts = append(opts, WithUseLocalImagesFirst())
}
if sc, err := StartCustom(
cleaner.Image, DefaultTCP(cleaner.Port),
opts...,
); err == nil {
sidecarChan <- sc.ID
}
}()
return sidecarChan
}
func (d *docker) prepareContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
pullImage := true
if cfg.UseLocalImagesFirst {
isExisting, err := d.isExistingLocalImage(ctx, image)
if err != nil {
return nil, fmt.Errorf("can't list image: %w", err)
}
if isExisting {
pullImage = false
}
}
if pullImage {
if err := d.pullImage(ctx, image, cfg); err != nil {
return nil, fmt.Errorf("can't pull image: %w", err)
}
} | }
return resp, err
}
func (d *docker) waitForContainerNetwork(ctx context.Context, id string, ports NamedPorts) (*Container, error) {
d.log.Infow("waiting for container network", "container", id)
tick := time.NewTicker(time.Millisecond * 250)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return nil, fmt.Errorf("container network is unavailable after timeout")
case <-tick.C:
containerJSON, err := d.client.ContainerInspect(ctx, id)
if err != nil {
return nil, fmt.Errorf("can't inspect container %s: %w", id, err)
}
boundNamedPorts, err := d.boundNamedPorts(containerJSON, ports)
if err != nil {
return nil, fmt.Errorf("can't find bound ports: %w", err)
}
d.log.Infow("waiting for port allocation", "container", id)
if len(boundNamedPorts) == len(ports) {
return &Container{
ID: id,
Host: d.hostAddr(),
Ports: boundNamedPorts,
gateway: containerJSON.NetworkSettings.Gateway,
}, nil
}
}
}
}
func (d *docker) exposedPorts(namedPorts NamedPorts) nat.PortSet {
exposedPorts := make(nat.PortSet)
for _, port := range namedPorts {
containerPort := fmt.Sprintf("%d/%s", port.Port, port.Protocol)
exposedPorts[nat.Port(containerPort)] = struct{}{}
}
return exposedPorts
}
func (d *docker) portBindings(exposedPorts nat.PortSet, ports NamedPorts) nat.PortMap {
portBindings := make(nat.PortMap)
// for the container to be accessible from another container, it cannot
// listen on 127.0.0.1 as it will be accessed by gateway address (e.g
// 172.17.0.1), so its port should be exposed everywhere
hostAddr := d.hostAddr()
if isInDocker() {
hostAddr = "0.0.0.0"
}
for port := range exposedPorts {
binding := nat.PortBinding{
HostIP: hostAddr,
}
if pName, err := ports.Find(port.Proto(), port.Int()); err == nil {
namedPort := ports.Get(pName)
if namedPort.HostPort > 0 {
binding.HostPort = strconv.Itoa(namedPort.HostPort)
}
}
portBindings[port] = []nat.PortBinding{binding}
}
return portBindings
}
func (d *docker) createContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
exposedPorts := d.exposedPorts(ports)
containerConfig := &container.Config{
Image: image,
ExposedPorts: exposedPorts,
Env: cfg.Env,
}
if len(cfg.Cmd) > 0 {
containerConfig.Cmd = cfg.Cmd
}
if len(cfg.Entrypoint) > 0 {
containerConfig.Entrypoint = cfg.Entrypoint
}
mounts := []mount.Mount{}
for src, dst := range cfg.HostMounts {
mounts = append(mounts, mount.Mount{
Type: mount.TypeBind,
Source: src,
Target: dst,
})
}
portBindings := d.portBindings(exposedPorts, ports)
hostConfig := &container.HostConfig{
PortBindings: portBindings,
AutoRemove: !cfg.Debug,
Privileged: cfg.Privileged,
Mounts: mounts,
ExtraHosts: cfg.ExtraHosts,
}
resp, err := d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, cfg.ContainerName)
if err == nil {
return &resp, nil
}
matches := duplicateContainerRegexp.FindStringSubmatch(err.Error())
if len(matches) == 2 {
d.log.Infow("duplicate container found, stopping", "container", matches[1])
err = d.client.ContainerRemove(ctx, matches[1], types.ContainerRemoveOptions{
Force: true,
})
if err != nil {
return nil, fmt.Errorf("can't remove existing container: %w", err)
}
resp, err = d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, cfg.ContainerName)
}
return &resp, err
}
func (d *docker) findReusableContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*Container, bool, error) {
if cfg.ContainerName == "" {
return nil, false, fmt.Errorf("container name is required when container reuse is enabled")
}
list, err := d.client.ContainerList(ctx, types.ContainerListOptions{
Filters: filters.NewArgs(
filters.Arg("name", cfg.ContainerName),
filters.Arg("ancestor", image),
filters.Arg("status", "running"),
),
})
if err != nil || len(list) < 1 {
return nil, false, err
}
container, err := d.waitForContainerNetwork(ctx, list[0].ID, ports)
if err != nil {
return nil, false, err
}
return container, true, nil
}
func (d *docker) boundNamedPorts(json types.ContainerJSON, namedPorts NamedPorts) (NamedPorts, error) {
boundNamedPorts := make(NamedPorts)
for containerPort, bindings := range json.NetworkSettings.Ports {
if len(bindings) == 0 {
continue
}
hostPortNum, err := strconv.Atoi(bindings[0].HostPort)
if err != nil {
return nil, fmt.Errorf("invalid host port value '%s': %w", bindings[0].HostPort, err)
}
proto, intPort := containerPort.Proto(), containerPort.Int()
portName, err := namedPorts.Find(proto, intPort)
if err != nil {
return nil, fmt.Errorf("can't find port %s/%d: %w", proto, intPort, err)
}
boundNamedPorts[portName] = Port{
Protocol: proto,
Port: hostPortNum,
}
}
return boundNamedPorts, nil
}
func (d *docker) readLogs(ctx context.Context, id string) (io.ReadCloser, error) {
d.log.Info("starting container logs forwarder")
logsOptions := types.ContainerLogsOptions{
ShowStderr: true, ShowStdout: true, Follow: true,
}
rc, err := d.client.ContainerLogs(ctx, id, logsOptions)
if err != nil {
return nil, fmt.Errorf("can't read logs: %w", err)
}
d.log.Info("container logs forwarder ready")
return rc, nil
}
func (d *docker) stopContainer(ctx context.Context, id string) error {
d.lock.Lock()
defer d.lock.Unlock()
stopTimeout := defaultStopTimeoutSec
err := d.client.ContainerStop(ctx, id, container.StopOptions{
Timeout: &stopTimeout,
})
if err != nil && !client.IsErrNotFound(err) {
return fmt.Errorf("can't stop container %s: %w", id, err)
}
return nil
}
func (d *docker) removeContainer(ctx context.Context, id string) error {
d.lock.Lock()
defer d.lock.Unlock()
err := d.client.ContainerRemove(ctx, id, types.ContainerRemoveOptions{Force: true})
if err != nil && !client.IsErrNotFound(err) && !isDeletionAlreadyInProgessError(err, id) {
return fmt.Errorf("can't remove container %s: %w", id, err)
}
return nil
}
// hostAddr returns an address of a host that runs the containers. If
// DOCKER_HOST environment variable is not set, if its value is an invalid URL,
// or if it is a `unix:///` socket address, it returns local address.
func (d *docker) hostAddr() string {
if dh := os.Getenv("DOCKER_HOST"); dh != "" {
u, err := url.Parse(dh)
if err == nil {
if host := u.Hostname(); host != "" {
return host
}
}
}
return localhostAddr
}
func isDeletionAlreadyInProgessError(err error, id string) bool {
var e errdefs.ErrConflict
if errors.As(err, &e) {
if err.Error() == fmt.Sprintf("Error response from daemon: removal of container %s is already in progress", id) {
return true
}
}
return false
} |
resp, err := d.createContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't create container: %w", err) | random_line_split |
docker.go | package gnomock
import (
"context"
"errors"
"fmt"
"io"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/go-connections/nat"
"github.com/orlangure/gnomock/internal/cleaner"
"github.com/orlangure/gnomock/internal/health"
"go.uber.org/zap"
)
const (
localhostAddr = "127.0.0.1"
defaultStopTimeoutSec = 1
duplicateContainerPattern = `Conflict. The container name "(?:.+?)" is already in use by container "(\w+)". You have to remove \(or rename\) that container to be able to reuse that name.` // nolint:lll
dockerSockAddr = "/var/run/docker.sock"
)
var duplicateContainerRegexp = regexp.MustCompile(duplicateContainerPattern)
type docker struct {
client *client.Client
log *zap.SugaredLogger
// This lock is used to protect docker client from concurrent connections
// with version negotiation. As of this moment, there is a data race in
// docker client when version negotiation is requested. This data race is
// not dangerous, but it triggers race detector alarms, so it should be
// avoided. Currently the client still has this issue, so this is an
// attempt to fix it locally by preventing concurrent connection using the
// same client (mostly when `Stop` is called with multiple containers).
//
// https://github.com/moby/moby/pull/42379
lock sync.Mutex
}
func (g *g) dockerConnect() (*docker, error) {
g.log.Info("connecting to docker engine")
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return nil, errors.Join(ErrEnvClient, err)
}
g.log.Info("connected to docker engine")
return &docker{client: cli, log: g.log}, nil
}
func (d *docker) isExistingLocalImage(ctx context.Context, image string) (bool, error) {
images, err := d.client.ImageList(ctx, types.ImageListOptions{All: true})
if err != nil {
return false, fmt.Errorf("can't list image: %w", err)
}
for _, img := range images {
for _, repoTag := range img.RepoTags {
if image == repoTag {
return true, nil
}
if !strings.Contains(repoTag, "/") {
repoTag = "library/" + repoTag
}
if strings.HasSuffix(image, repoTag) {
return true, nil
}
}
}
return false, nil
}
func (d *docker) pullImage(ctx context.Context, image string, cfg *Options) error {
d.log.Info("pulling image")
reader, err := d.client.ImagePull(ctx, image, types.ImagePullOptions{
RegistryAuth: cfg.Auth,
})
if err != nil {
return fmt.Errorf("can't pull image: %w", err)
}
defer func() {
closeErr := reader.Close()
if err == nil {
err = closeErr
}
}()
_, err = io.ReadAll(reader)
if err != nil {
return fmt.Errorf("can't read server output: %w", err)
}
d.log.Info("image pulled")
return nil
}
func (d *docker) startContainer(ctx context.Context, image string, ports NamedPorts, cfg *Options) (*Container, error) {
if cfg.Reuse {
container, ok, err := d.findReusableContainer(ctx, image, ports, cfg)
if err != nil {
return nil, err
}
if ok {
d.log.Info("re-using container")
return container, nil
}
}
d.log.Info("starting container")
resp, err := d.prepareContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't prepare container: %w", err)
}
sidecarChan := d.setupContainerCleanup(resp.ID, cfg)
err = d.client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
if err != nil {
return nil, fmt.Errorf("can't start container %s: %w", resp.ID, err)
}
container, err := d.waitForContainerNetwork(ctx, resp.ID, ports)
if err != nil {
return nil, fmt.Errorf("container network isn't ready: %w", err)
}
if sidecar, ok := <-sidecarChan; ok {
container.ID = generateID(container.ID, sidecar)
}
d.log.Infow("container started", "container", container)
return container, nil
}
func (d *docker) setupContainerCleanup(id string, cfg *Options) chan string {
sidecarChan := make(chan string)
go func() {
defer close(sidecarChan)
if cfg.DisableAutoCleanup || cfg.Reuse || cfg.Debug {
return
}
opts := []Option{
WithDisableAutoCleanup(),
WithHostMounts(dockerSockAddr, dockerSockAddr),
WithHealthCheck(func(ctx context.Context, c *Container) error {
return health.HTTPGet(ctx, c.DefaultAddress())
}),
WithInit(func(ctx context.Context, c *Container) error {
return cleaner.Notify(context.Background(), c.DefaultAddress(), id)
}),
}
if cfg.UseLocalImagesFirst {
opts = append(opts, WithUseLocalImagesFirst())
}
if sc, err := StartCustom(
cleaner.Image, DefaultTCP(cleaner.Port),
opts...,
); err == nil {
sidecarChan <- sc.ID
}
}()
return sidecarChan
}
func (d *docker) prepareContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
pullImage := true
if cfg.UseLocalImagesFirst {
isExisting, err := d.isExistingLocalImage(ctx, image)
if err != nil {
return nil, fmt.Errorf("can't list image: %w", err)
}
if isExisting {
pullImage = false
}
}
if pullImage {
if err := d.pullImage(ctx, image, cfg); err != nil {
return nil, fmt.Errorf("can't pull image: %w", err)
}
}
resp, err := d.createContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't create container: %w", err)
}
return resp, err
}
func (d *docker) waitForContainerNetwork(ctx context.Context, id string, ports NamedPorts) (*Container, error) {
d.log.Infow("waiting for container network", "container", id)
tick := time.NewTicker(time.Millisecond * 250)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return nil, fmt.Errorf("container network is unavailable after timeout")
case <-tick.C:
containerJSON, err := d.client.ContainerInspect(ctx, id)
if err != nil {
return nil, fmt.Errorf("can't inspect container %s: %w", id, err)
}
boundNamedPorts, err := d.boundNamedPorts(containerJSON, ports)
if err != nil {
return nil, fmt.Errorf("can't find bound ports: %w", err)
}
d.log.Infow("waiting for port allocation", "container", id)
if len(boundNamedPorts) == len(ports) {
return &Container{
ID: id,
Host: d.hostAddr(),
Ports: boundNamedPorts,
gateway: containerJSON.NetworkSettings.Gateway,
}, nil
}
}
}
}
func (d *docker) exposedPorts(namedPorts NamedPorts) nat.PortSet {
exposedPorts := make(nat.PortSet)
for _, port := range namedPorts {
containerPort := fmt.Sprintf("%d/%s", port.Port, port.Protocol)
exposedPorts[nat.Port(containerPort)] = struct{}{}
}
return exposedPorts
}
func (d *docker) portBindings(exposedPorts nat.PortSet, ports NamedPorts) nat.PortMap {
portBindings := make(nat.PortMap)
// for the container to be accessible from another container, it cannot
// listen on 127.0.0.1 as it will be accessed by gateway address (e.g
// 172.17.0.1), so its port should be exposed everywhere
hostAddr := d.hostAddr()
if isInDocker() {
hostAddr = "0.0.0.0"
}
for port := range exposedPorts {
binding := nat.PortBinding{
HostIP: hostAddr,
}
if pName, err := ports.Find(port.Proto(), port.Int()); err == nil {
namedPort := ports.Get(pName)
if namedPort.HostPort > 0 {
binding.HostPort = strconv.Itoa(namedPort.HostPort)
}
}
portBindings[port] = []nat.PortBinding{binding}
}
return portBindings
}
func (d *docker) createContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
exposedPorts := d.exposedPorts(ports)
containerConfig := &container.Config{
Image: image,
ExposedPorts: exposedPorts,
Env: cfg.Env,
}
if len(cfg.Cmd) > 0 {
containerConfig.Cmd = cfg.Cmd
}
if len(cfg.Entrypoint) > 0 {
containerConfig.Entrypoint = cfg.Entrypoint
}
mounts := []mount.Mount{}
for src, dst := range cfg.HostMounts {
mounts = append(mounts, mount.Mount{
Type: mount.TypeBind,
Source: src,
Target: dst,
})
}
portBindings := d.portBindings(exposedPorts, ports)
hostConfig := &container.HostConfig{
PortBindings: portBindings,
AutoRemove: !cfg.Debug,
Privileged: cfg.Privileged,
Mounts: mounts,
ExtraHosts: cfg.ExtraHosts,
}
resp, err := d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, cfg.ContainerName)
if err == nil {
return &resp, nil
}
matches := duplicateContainerRegexp.FindStringSubmatch(err.Error())
if len(matches) == 2 {
d.log.Infow("duplicate container found, stopping", "container", matches[1])
err = d.client.ContainerRemove(ctx, matches[1], types.ContainerRemoveOptions{
Force: true,
})
if err != nil {
return nil, fmt.Errorf("can't remove existing container: %w", err)
}
resp, err = d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, cfg.ContainerName)
}
return &resp, err
}
func (d *docker) findReusableContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*Container, bool, error) {
if cfg.ContainerName == "" {
return nil, false, fmt.Errorf("container name is required when container reuse is enabled")
}
list, err := d.client.ContainerList(ctx, types.ContainerListOptions{
Filters: filters.NewArgs(
filters.Arg("name", cfg.ContainerName),
filters.Arg("ancestor", image),
filters.Arg("status", "running"),
),
})
if err != nil || len(list) < 1 {
return nil, false, err
}
container, err := d.waitForContainerNetwork(ctx, list[0].ID, ports)
if err != nil {
return nil, false, err
}
return container, true, nil
}
func (d *docker) boundNamedPorts(json types.ContainerJSON, namedPorts NamedPorts) (NamedPorts, error) {
boundNamedPorts := make(NamedPorts)
for containerPort, bindings := range json.NetworkSettings.Ports {
if len(bindings) == 0 {
continue
}
hostPortNum, err := strconv.Atoi(bindings[0].HostPort)
if err != nil {
return nil, fmt.Errorf("invalid host port value '%s': %w", bindings[0].HostPort, err)
}
proto, intPort := containerPort.Proto(), containerPort.Int()
portName, err := namedPorts.Find(proto, intPort)
if err != nil {
return nil, fmt.Errorf("can't find port %s/%d: %w", proto, intPort, err)
}
boundNamedPorts[portName] = Port{
Protocol: proto,
Port: hostPortNum,
}
}
return boundNamedPorts, nil
}
func (d *docker) readLogs(ctx context.Context, id string) (io.ReadCloser, error) {
d.log.Info("starting container logs forwarder")
logsOptions := types.ContainerLogsOptions{
ShowStderr: true, ShowStdout: true, Follow: true,
}
rc, err := d.client.ContainerLogs(ctx, id, logsOptions)
if err != nil {
return nil, fmt.Errorf("can't read logs: %w", err)
}
d.log.Info("container logs forwarder ready")
return rc, nil
}
func (d *docker) stopContainer(ctx context.Context, id string) error {
d.lock.Lock()
defer d.lock.Unlock()
stopTimeout := defaultStopTimeoutSec
err := d.client.ContainerStop(ctx, id, container.StopOptions{
Timeout: &stopTimeout,
})
if err != nil && !client.IsErrNotFound(err) {
return fmt.Errorf("can't stop container %s: %w", id, err)
}
return nil
}
func (d *docker) removeContainer(ctx context.Context, id string) error {
d.lock.Lock()
defer d.lock.Unlock()
err := d.client.ContainerRemove(ctx, id, types.ContainerRemoveOptions{Force: true})
if err != nil && !client.IsErrNotFound(err) && !isDeletionAlreadyInProgessError(err, id) {
return fmt.Errorf("can't remove container %s: %w", id, err)
}
return nil
}
// hostAddr returns an address of a host that runs the containers. If
// DOCKER_HOST environment variable is not set, if its value is an invalid URL,
// or if it is a `unix:///` socket address, it returns local address.
func (d *docker) hostAddr() string {
if dh := os.Getenv("DOCKER_HOST"); dh != "" {
u, err := url.Parse(dh)
if err == nil {
if host := u.Hostname(); host != "" {
return host
}
}
}
return localhostAddr
}
func | (err error, id string) bool {
var e errdefs.ErrConflict
if errors.As(err, &e) {
if err.Error() == fmt.Sprintf("Error response from daemon: removal of container %s is already in progress", id) {
return true
}
}
return false
}
| isDeletionAlreadyInProgessError | identifier_name |
docker.go | package gnomock
import (
"context"
"errors"
"fmt"
"io"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/go-connections/nat"
"github.com/orlangure/gnomock/internal/cleaner"
"github.com/orlangure/gnomock/internal/health"
"go.uber.org/zap"
)
const (
localhostAddr = "127.0.0.1"
defaultStopTimeoutSec = 1
duplicateContainerPattern = `Conflict. The container name "(?:.+?)" is already in use by container "(\w+)". You have to remove \(or rename\) that container to be able to reuse that name.` // nolint:lll
dockerSockAddr = "/var/run/docker.sock"
)
var duplicateContainerRegexp = regexp.MustCompile(duplicateContainerPattern)
type docker struct {
client *client.Client
log *zap.SugaredLogger
// This lock is used to protect docker client from concurrent connections
// with version negotiation. As of this moment, there is a data race in
// docker client when version negotiation is requested. This data race is
// not dangerous, but it triggers race detector alarms, so it should be
// avoided. Currently the client still has this issue, so this is an
// attempt to fix it locally by preventing concurrent connection using the
// same client (mostly when `Stop` is called with multiple containers).
//
// https://github.com/moby/moby/pull/42379
lock sync.Mutex
}
func (g *g) dockerConnect() (*docker, error) {
g.log.Info("connecting to docker engine")
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return nil, errors.Join(ErrEnvClient, err)
}
g.log.Info("connected to docker engine")
return &docker{client: cli, log: g.log}, nil
}
func (d *docker) isExistingLocalImage(ctx context.Context, image string) (bool, error) {
images, err := d.client.ImageList(ctx, types.ImageListOptions{All: true})
if err != nil {
return false, fmt.Errorf("can't list image: %w", err)
}
for _, img := range images {
for _, repoTag := range img.RepoTags {
if image == repoTag {
return true, nil
}
if !strings.Contains(repoTag, "/") {
repoTag = "library/" + repoTag
}
if strings.HasSuffix(image, repoTag) {
return true, nil
}
}
}
return false, nil
}
func (d *docker) pullImage(ctx context.Context, image string, cfg *Options) error {
d.log.Info("pulling image")
reader, err := d.client.ImagePull(ctx, image, types.ImagePullOptions{
RegistryAuth: cfg.Auth,
})
if err != nil {
return fmt.Errorf("can't pull image: %w", err)
}
defer func() {
closeErr := reader.Close()
if err == nil {
err = closeErr
}
}()
_, err = io.ReadAll(reader)
if err != nil {
return fmt.Errorf("can't read server output: %w", err)
}
d.log.Info("image pulled")
return nil
}
func (d *docker) startContainer(ctx context.Context, image string, ports NamedPorts, cfg *Options) (*Container, error) |
func (d *docker) setupContainerCleanup(id string, cfg *Options) chan string {
sidecarChan := make(chan string)
go func() {
defer close(sidecarChan)
if cfg.DisableAutoCleanup || cfg.Reuse || cfg.Debug {
return
}
opts := []Option{
WithDisableAutoCleanup(),
WithHostMounts(dockerSockAddr, dockerSockAddr),
WithHealthCheck(func(ctx context.Context, c *Container) error {
return health.HTTPGet(ctx, c.DefaultAddress())
}),
WithInit(func(ctx context.Context, c *Container) error {
return cleaner.Notify(context.Background(), c.DefaultAddress(), id)
}),
}
if cfg.UseLocalImagesFirst {
opts = append(opts, WithUseLocalImagesFirst())
}
if sc, err := StartCustom(
cleaner.Image, DefaultTCP(cleaner.Port),
opts...,
); err == nil {
sidecarChan <- sc.ID
}
}()
return sidecarChan
}
func (d *docker) prepareContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
pullImage := true
if cfg.UseLocalImagesFirst {
isExisting, err := d.isExistingLocalImage(ctx, image)
if err != nil {
return nil, fmt.Errorf("can't list image: %w", err)
}
if isExisting {
pullImage = false
}
}
if pullImage {
if err := d.pullImage(ctx, image, cfg); err != nil {
return nil, fmt.Errorf("can't pull image: %w", err)
}
}
resp, err := d.createContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't create container: %w", err)
}
return resp, err
}
func (d *docker) waitForContainerNetwork(ctx context.Context, id string, ports NamedPorts) (*Container, error) {
d.log.Infow("waiting for container network", "container", id)
tick := time.NewTicker(time.Millisecond * 250)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return nil, fmt.Errorf("container network is unavailable after timeout")
case <-tick.C:
containerJSON, err := d.client.ContainerInspect(ctx, id)
if err != nil {
return nil, fmt.Errorf("can't inspect container %s: %w", id, err)
}
boundNamedPorts, err := d.boundNamedPorts(containerJSON, ports)
if err != nil {
return nil, fmt.Errorf("can't find bound ports: %w", err)
}
d.log.Infow("waiting for port allocation", "container", id)
if len(boundNamedPorts) == len(ports) {
return &Container{
ID: id,
Host: d.hostAddr(),
Ports: boundNamedPorts,
gateway: containerJSON.NetworkSettings.Gateway,
}, nil
}
}
}
}
func (d *docker) exposedPorts(namedPorts NamedPorts) nat.PortSet {
exposedPorts := make(nat.PortSet)
for _, port := range namedPorts {
containerPort := fmt.Sprintf("%d/%s", port.Port, port.Protocol)
exposedPorts[nat.Port(containerPort)] = struct{}{}
}
return exposedPorts
}
func (d *docker) portBindings(exposedPorts nat.PortSet, ports NamedPorts) nat.PortMap {
portBindings := make(nat.PortMap)
// for the container to be accessible from another container, it cannot
// listen on 127.0.0.1 as it will be accessed by gateway address (e.g
// 172.17.0.1), so its port should be exposed everywhere
hostAddr := d.hostAddr()
if isInDocker() {
hostAddr = "0.0.0.0"
}
for port := range exposedPorts {
binding := nat.PortBinding{
HostIP: hostAddr,
}
if pName, err := ports.Find(port.Proto(), port.Int()); err == nil {
namedPort := ports.Get(pName)
if namedPort.HostPort > 0 {
binding.HostPort = strconv.Itoa(namedPort.HostPort)
}
}
portBindings[port] = []nat.PortBinding{binding}
}
return portBindings
}
func (d *docker) createContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
exposedPorts := d.exposedPorts(ports)
containerConfig := &container.Config{
Image: image,
ExposedPorts: exposedPorts,
Env: cfg.Env,
}
if len(cfg.Cmd) > 0 {
containerConfig.Cmd = cfg.Cmd
}
if len(cfg.Entrypoint) > 0 {
containerConfig.Entrypoint = cfg.Entrypoint
}
mounts := []mount.Mount{}
for src, dst := range cfg.HostMounts {
mounts = append(mounts, mount.Mount{
Type: mount.TypeBind,
Source: src,
Target: dst,
})
}
portBindings := d.portBindings(exposedPorts, ports)
hostConfig := &container.HostConfig{
PortBindings: portBindings,
AutoRemove: !cfg.Debug,
Privileged: cfg.Privileged,
Mounts: mounts,
ExtraHosts: cfg.ExtraHosts,
}
resp, err := d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, cfg.ContainerName)
if err == nil {
return &resp, nil
}
matches := duplicateContainerRegexp.FindStringSubmatch(err.Error())
if len(matches) == 2 {
d.log.Infow("duplicate container found, stopping", "container", matches[1])
err = d.client.ContainerRemove(ctx, matches[1], types.ContainerRemoveOptions{
Force: true,
})
if err != nil {
return nil, fmt.Errorf("can't remove existing container: %w", err)
}
resp, err = d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, cfg.ContainerName)
}
return &resp, err
}
func (d *docker) findReusableContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*Container, bool, error) {
if cfg.ContainerName == "" {
return nil, false, fmt.Errorf("container name is required when container reuse is enabled")
}
list, err := d.client.ContainerList(ctx, types.ContainerListOptions{
Filters: filters.NewArgs(
filters.Arg("name", cfg.ContainerName),
filters.Arg("ancestor", image),
filters.Arg("status", "running"),
),
})
if err != nil || len(list) < 1 {
return nil, false, err
}
container, err := d.waitForContainerNetwork(ctx, list[0].ID, ports)
if err != nil {
return nil, false, err
}
return container, true, nil
}
func (d *docker) boundNamedPorts(json types.ContainerJSON, namedPorts NamedPorts) (NamedPorts, error) {
boundNamedPorts := make(NamedPorts)
for containerPort, bindings := range json.NetworkSettings.Ports {
if len(bindings) == 0 {
continue
}
hostPortNum, err := strconv.Atoi(bindings[0].HostPort)
if err != nil {
return nil, fmt.Errorf("invalid host port value '%s': %w", bindings[0].HostPort, err)
}
proto, intPort := containerPort.Proto(), containerPort.Int()
portName, err := namedPorts.Find(proto, intPort)
if err != nil {
return nil, fmt.Errorf("can't find port %s/%d: %w", proto, intPort, err)
}
boundNamedPorts[portName] = Port{
Protocol: proto,
Port: hostPortNum,
}
}
return boundNamedPorts, nil
}
func (d *docker) readLogs(ctx context.Context, id string) (io.ReadCloser, error) {
d.log.Info("starting container logs forwarder")
logsOptions := types.ContainerLogsOptions{
ShowStderr: true, ShowStdout: true, Follow: true,
}
rc, err := d.client.ContainerLogs(ctx, id, logsOptions)
if err != nil {
return nil, fmt.Errorf("can't read logs: %w", err)
}
d.log.Info("container logs forwarder ready")
return rc, nil
}
func (d *docker) stopContainer(ctx context.Context, id string) error {
d.lock.Lock()
defer d.lock.Unlock()
stopTimeout := defaultStopTimeoutSec
err := d.client.ContainerStop(ctx, id, container.StopOptions{
Timeout: &stopTimeout,
})
if err != nil && !client.IsErrNotFound(err) {
return fmt.Errorf("can't stop container %s: %w", id, err)
}
return nil
}
func (d *docker) removeContainer(ctx context.Context, id string) error {
d.lock.Lock()
defer d.lock.Unlock()
err := d.client.ContainerRemove(ctx, id, types.ContainerRemoveOptions{Force: true})
if err != nil && !client.IsErrNotFound(err) && !isDeletionAlreadyInProgessError(err, id) {
return fmt.Errorf("can't remove container %s: %w", id, err)
}
return nil
}
// hostAddr returns an address of a host that runs the containers. If
// DOCKER_HOST environment variable is not set, if its value is an invalid URL,
// or if it is a `unix:///` socket address, it returns local address.
func (d *docker) hostAddr() string {
if dh := os.Getenv("DOCKER_HOST"); dh != "" {
u, err := url.Parse(dh)
if err == nil {
if host := u.Hostname(); host != "" {
return host
}
}
}
return localhostAddr
}
func isDeletionAlreadyInProgessError(err error, id string) bool {
var e errdefs.ErrConflict
if errors.As(err, &e) {
if err.Error() == fmt.Sprintf("Error response from daemon: removal of container %s is already in progress", id) {
return true
}
}
return false
}
| {
if cfg.Reuse {
container, ok, err := d.findReusableContainer(ctx, image, ports, cfg)
if err != nil {
return nil, err
}
if ok {
d.log.Info("re-using container")
return container, nil
}
}
d.log.Info("starting container")
resp, err := d.prepareContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't prepare container: %w", err)
}
sidecarChan := d.setupContainerCleanup(resp.ID, cfg)
err = d.client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
if err != nil {
return nil, fmt.Errorf("can't start container %s: %w", resp.ID, err)
}
container, err := d.waitForContainerNetwork(ctx, resp.ID, ports)
if err != nil {
return nil, fmt.Errorf("container network isn't ready: %w", err)
}
if sidecar, ok := <-sidecarChan; ok {
container.ID = generateID(container.ID, sidecar)
}
d.log.Infow("container started", "container", container)
return container, nil
} | identifier_body |
docker.go | package gnomock
import (
"context"
"errors"
"fmt"
"io"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/docker/go-connections/nat"
"github.com/orlangure/gnomock/internal/cleaner"
"github.com/orlangure/gnomock/internal/health"
"go.uber.org/zap"
)
const (
localhostAddr = "127.0.0.1"
defaultStopTimeoutSec = 1
duplicateContainerPattern = `Conflict. The container name "(?:.+?)" is already in use by container "(\w+)". You have to remove \(or rename\) that container to be able to reuse that name.` // nolint:lll
dockerSockAddr = "/var/run/docker.sock"
)
var duplicateContainerRegexp = regexp.MustCompile(duplicateContainerPattern)
type docker struct {
client *client.Client
log *zap.SugaredLogger
// This lock is used to protect docker client from concurrent connections
// with version negotiation. As of this moment, there is a data race in
// docker client when version negotiation is requested. This data race is
// not dangerous, but it triggers race detector alarms, so it should be
// avoided. Currently the client still has this issue, so this is an
// attempt to fix it locally by preventing concurrent connection using the
// same client (mostly when `Stop` is called with multiple containers).
//
// https://github.com/moby/moby/pull/42379
lock sync.Mutex
}
func (g *g) dockerConnect() (*docker, error) {
g.log.Info("connecting to docker engine")
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return nil, errors.Join(ErrEnvClient, err)
}
g.log.Info("connected to docker engine")
return &docker{client: cli, log: g.log}, nil
}
func (d *docker) isExistingLocalImage(ctx context.Context, image string) (bool, error) {
images, err := d.client.ImageList(ctx, types.ImageListOptions{All: true})
if err != nil {
return false, fmt.Errorf("can't list image: %w", err)
}
for _, img := range images {
for _, repoTag := range img.RepoTags {
if image == repoTag {
return true, nil
}
if !strings.Contains(repoTag, "/") {
repoTag = "library/" + repoTag
}
if strings.HasSuffix(image, repoTag) {
return true, nil
}
}
}
return false, nil
}
func (d *docker) pullImage(ctx context.Context, image string, cfg *Options) error {
d.log.Info("pulling image")
reader, err := d.client.ImagePull(ctx, image, types.ImagePullOptions{
RegistryAuth: cfg.Auth,
})
if err != nil {
return fmt.Errorf("can't pull image: %w", err)
}
defer func() {
closeErr := reader.Close()
if err == nil {
err = closeErr
}
}()
_, err = io.ReadAll(reader)
if err != nil {
return fmt.Errorf("can't read server output: %w", err)
}
d.log.Info("image pulled")
return nil
}
func (d *docker) startContainer(ctx context.Context, image string, ports NamedPorts, cfg *Options) (*Container, error) {
if cfg.Reuse {
container, ok, err := d.findReusableContainer(ctx, image, ports, cfg)
if err != nil {
return nil, err
}
if ok {
d.log.Info("re-using container")
return container, nil
}
}
d.log.Info("starting container")
resp, err := d.prepareContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't prepare container: %w", err)
}
sidecarChan := d.setupContainerCleanup(resp.ID, cfg)
err = d.client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{})
if err != nil {
return nil, fmt.Errorf("can't start container %s: %w", resp.ID, err)
}
container, err := d.waitForContainerNetwork(ctx, resp.ID, ports)
if err != nil {
return nil, fmt.Errorf("container network isn't ready: %w", err)
}
if sidecar, ok := <-sidecarChan; ok {
container.ID = generateID(container.ID, sidecar)
}
d.log.Infow("container started", "container", container)
return container, nil
}
func (d *docker) setupContainerCleanup(id string, cfg *Options) chan string {
sidecarChan := make(chan string)
go func() {
defer close(sidecarChan)
if cfg.DisableAutoCleanup || cfg.Reuse || cfg.Debug {
return
}
opts := []Option{
WithDisableAutoCleanup(),
WithHostMounts(dockerSockAddr, dockerSockAddr),
WithHealthCheck(func(ctx context.Context, c *Container) error {
return health.HTTPGet(ctx, c.DefaultAddress())
}),
WithInit(func(ctx context.Context, c *Container) error {
return cleaner.Notify(context.Background(), c.DefaultAddress(), id)
}),
}
if cfg.UseLocalImagesFirst {
opts = append(opts, WithUseLocalImagesFirst())
}
if sc, err := StartCustom(
cleaner.Image, DefaultTCP(cleaner.Port),
opts...,
); err == nil {
sidecarChan <- sc.ID
}
}()
return sidecarChan
}
func (d *docker) prepareContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
pullImage := true
if cfg.UseLocalImagesFirst {
isExisting, err := d.isExistingLocalImage(ctx, image)
if err != nil {
return nil, fmt.Errorf("can't list image: %w", err)
}
if isExisting {
pullImage = false
}
}
if pullImage {
if err := d.pullImage(ctx, image, cfg); err != nil {
return nil, fmt.Errorf("can't pull image: %w", err)
}
}
resp, err := d.createContainer(ctx, image, ports, cfg)
if err != nil {
return nil, fmt.Errorf("can't create container: %w", err)
}
return resp, err
}
func (d *docker) waitForContainerNetwork(ctx context.Context, id string, ports NamedPorts) (*Container, error) {
d.log.Infow("waiting for container network", "container", id)
tick := time.NewTicker(time.Millisecond * 250)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return nil, fmt.Errorf("container network is unavailable after timeout")
case <-tick.C:
containerJSON, err := d.client.ContainerInspect(ctx, id)
if err != nil {
return nil, fmt.Errorf("can't inspect container %s: %w", id, err)
}
boundNamedPorts, err := d.boundNamedPorts(containerJSON, ports)
if err != nil {
return nil, fmt.Errorf("can't find bound ports: %w", err)
}
d.log.Infow("waiting for port allocation", "container", id)
if len(boundNamedPorts) == len(ports) {
return &Container{
ID: id,
Host: d.hostAddr(),
Ports: boundNamedPorts,
gateway: containerJSON.NetworkSettings.Gateway,
}, nil
}
}
}
}
func (d *docker) exposedPorts(namedPorts NamedPorts) nat.PortSet {
exposedPorts := make(nat.PortSet)
for _, port := range namedPorts {
containerPort := fmt.Sprintf("%d/%s", port.Port, port.Protocol)
exposedPorts[nat.Port(containerPort)] = struct{}{}
}
return exposedPorts
}
func (d *docker) portBindings(exposedPorts nat.PortSet, ports NamedPorts) nat.PortMap {
portBindings := make(nat.PortMap)
// for the container to be accessible from another container, it cannot
// listen on 127.0.0.1 as it will be accessed by gateway address (e.g
// 172.17.0.1), so its port should be exposed everywhere
hostAddr := d.hostAddr()
if isInDocker() {
hostAddr = "0.0.0.0"
}
for port := range exposedPorts {
binding := nat.PortBinding{
HostIP: hostAddr,
}
if pName, err := ports.Find(port.Proto(), port.Int()); err == nil {
namedPort := ports.Get(pName)
if namedPort.HostPort > 0 {
binding.HostPort = strconv.Itoa(namedPort.HostPort)
}
}
portBindings[port] = []nat.PortBinding{binding}
}
return portBindings
}
func (d *docker) createContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*container.CreateResponse, error) {
exposedPorts := d.exposedPorts(ports)
containerConfig := &container.Config{
Image: image,
ExposedPorts: exposedPorts,
Env: cfg.Env,
}
if len(cfg.Cmd) > 0 {
containerConfig.Cmd = cfg.Cmd
}
if len(cfg.Entrypoint) > 0 {
containerConfig.Entrypoint = cfg.Entrypoint
}
mounts := []mount.Mount{}
for src, dst := range cfg.HostMounts {
mounts = append(mounts, mount.Mount{
Type: mount.TypeBind,
Source: src,
Target: dst,
})
}
portBindings := d.portBindings(exposedPorts, ports)
hostConfig := &container.HostConfig{
PortBindings: portBindings,
AutoRemove: !cfg.Debug,
Privileged: cfg.Privileged,
Mounts: mounts,
ExtraHosts: cfg.ExtraHosts,
}
resp, err := d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, cfg.ContainerName)
if err == nil |
matches := duplicateContainerRegexp.FindStringSubmatch(err.Error())
if len(matches) == 2 {
d.log.Infow("duplicate container found, stopping", "container", matches[1])
err = d.client.ContainerRemove(ctx, matches[1], types.ContainerRemoveOptions{
Force: true,
})
if err != nil {
return nil, fmt.Errorf("can't remove existing container: %w", err)
}
resp, err = d.client.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, cfg.ContainerName)
}
return &resp, err
}
func (d *docker) findReusableContainer(
ctx context.Context,
image string,
ports NamedPorts,
cfg *Options,
) (*Container, bool, error) {
if cfg.ContainerName == "" {
return nil, false, fmt.Errorf("container name is required when container reuse is enabled")
}
list, err := d.client.ContainerList(ctx, types.ContainerListOptions{
Filters: filters.NewArgs(
filters.Arg("name", cfg.ContainerName),
filters.Arg("ancestor", image),
filters.Arg("status", "running"),
),
})
if err != nil || len(list) < 1 {
return nil, false, err
}
container, err := d.waitForContainerNetwork(ctx, list[0].ID, ports)
if err != nil {
return nil, false, err
}
return container, true, nil
}
func (d *docker) boundNamedPorts(json types.ContainerJSON, namedPorts NamedPorts) (NamedPorts, error) {
boundNamedPorts := make(NamedPorts)
for containerPort, bindings := range json.NetworkSettings.Ports {
if len(bindings) == 0 {
continue
}
hostPortNum, err := strconv.Atoi(bindings[0].HostPort)
if err != nil {
return nil, fmt.Errorf("invalid host port value '%s': %w", bindings[0].HostPort, err)
}
proto, intPort := containerPort.Proto(), containerPort.Int()
portName, err := namedPorts.Find(proto, intPort)
if err != nil {
return nil, fmt.Errorf("can't find port %s/%d: %w", proto, intPort, err)
}
boundNamedPorts[portName] = Port{
Protocol: proto,
Port: hostPortNum,
}
}
return boundNamedPorts, nil
}
func (d *docker) readLogs(ctx context.Context, id string) (io.ReadCloser, error) {
d.log.Info("starting container logs forwarder")
logsOptions := types.ContainerLogsOptions{
ShowStderr: true, ShowStdout: true, Follow: true,
}
rc, err := d.client.ContainerLogs(ctx, id, logsOptions)
if err != nil {
return nil, fmt.Errorf("can't read logs: %w", err)
}
d.log.Info("container logs forwarder ready")
return rc, nil
}
func (d *docker) stopContainer(ctx context.Context, id string) error {
d.lock.Lock()
defer d.lock.Unlock()
stopTimeout := defaultStopTimeoutSec
err := d.client.ContainerStop(ctx, id, container.StopOptions{
Timeout: &stopTimeout,
})
if err != nil && !client.IsErrNotFound(err) {
return fmt.Errorf("can't stop container %s: %w", id, err)
}
return nil
}
func (d *docker) removeContainer(ctx context.Context, id string) error {
d.lock.Lock()
defer d.lock.Unlock()
err := d.client.ContainerRemove(ctx, id, types.ContainerRemoveOptions{Force: true})
if err != nil && !client.IsErrNotFound(err) && !isDeletionAlreadyInProgessError(err, id) {
return fmt.Errorf("can't remove container %s: %w", id, err)
}
return nil
}
// hostAddr returns an address of a host that runs the containers. If
// DOCKER_HOST environment variable is not set, if its value is an invalid URL,
// or if it is a `unix:///` socket address, it returns local address.
func (d *docker) hostAddr() string {
if dh := os.Getenv("DOCKER_HOST"); dh != "" {
u, err := url.Parse(dh)
if err == nil {
if host := u.Hostname(); host != "" {
return host
}
}
}
return localhostAddr
}
func isDeletionAlreadyInProgessError(err error, id string) bool {
var e errdefs.ErrConflict
if errors.As(err, &e) {
if err.Error() == fmt.Sprintf("Error response from daemon: removal of container %s is already in progress", id) {
return true
}
}
return false
}
| {
return &resp, nil
} | conditional_block |
amqp_transport.py | # -*- coding: utf-8 -*-
# Copyright (C) 2020 Panayiotou, Konstantinos <klpanagi@gmail.com>
# Author: Panayiotou, Konstantinos <klpanagi@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import time
import atexit
import signal
import json
import pika
# import ssl
from .r4a_logger import create_logger, LoggingLevel
class RPCMeta(object):
__slots__ = [
'channel', 'method', 'properties'
]
def __init__(self, channel=None, method=None, properties=None):
self.channel = channel
self.method = method
self.properties = properties
class MessageProperties(pika.BasicProperties):
"""Message Properties/Attribures used for sending and receiving messages.
Args:
content_type (str):
content_encoding (str):
timestamp (str):
"""
def __init__(self, content_type=None, content_encoding=None,
timestamp=None, correlation_id=None, reply_to=None,
message_id=None, user_id=None, app_id=None):
"""Constructor."""
if timestamp is None:
timestamp = (time.time() + 0.5) * 1000
timestamp = int(timestamp)
super(MessageProperties, self).__init__(
content_type=content_type,
content_encoding=content_encoding,
timestamp=timestamp,
correlation_id=correlation_id,
reply_to=reply_to,
message_id=str(message_id) if message_id is not None else None,
user_id=str(user_id) if user_id is not None else None,
app_id=str(app_id) if app_id is not None else None
)
class ConnectionParameters(pika.ConnectionParameters):
"""AMQP Connection parameters.
Args:
host (str): Hostname of AMQP broker to connect to.
port (int|str): AMQP broker listening port.
creds (object): Auth Credentials - Credentials instance.
secure (bool): Enable SSL/TLS (AMQPS) - Not supported!!
reconnect_attempts (int): The reconnection attempts to make before
droping and raising an Exception.
retry_delay (float): Time delay between reconnect attempts.
timeout (float): Socket Connection timeout value.
timeout (float): Blocked Connection timeout value.
Set the timeout, in seconds, that the connection may remain blocked
(triggered by Connection.Blocked from broker). If the timeout
expires before connection becomes unblocked, the connection will
be torn down.
heartbeat_timeout (int): Controls AMQP heartbeat
timeout negotiation during connection tuning. An integer value
always overrides the value proposed by broker. Use 0 to deactivate
heartbeats and None to always accept the broker's proposal.
The value passed for timeout is also used to calculate an interval
at which a heartbeat frame is sent to the broker. The interval is
equal to the timeout value divided by two.
channel_max (int): The max permissible number of channels per
connection. Defaults to 128.
"""
__slots__ = [
'host', 'port', 'secure', 'vhost', 'reconnect_attempts', 'retry_delay',
'timeout', 'heartbeat_timeout', 'blocked_connection_timeout', 'creds'
]
def __init__(self, host='127.0.0.1', port='5672', creds=None,
secure=False, vhost='/', reconnect_attempts=5,
retry_delay=2.0, timeout=120, blocked_connection_timeout=None,
heartbeat_timeout=60, channel_max=128):
"""Constructor."""
self.host = host
self.port = port
self.secure = secure
self.vhost = vhost
self.reconnect_attempts = reconnect_attempts
self.retry_delay = retry_delay
self.timeout = timeout
self.blocked_connection_timeout = blocked_connection_timeout
self.heartbeat_timeout = heartbeat_timeout
self.channel_max = channel_max
if creds is None:
creds = Credentials()
super(ConnectionParameters, self).__init__(
host=host,
port=str(port),
credentials=creds,
connection_attempts=reconnect_attempts,
retry_delay=retry_delay,
blocked_connection_timeout=blocked_connection_timeout,
socket_timeout=timeout,
virtual_host=vhost,
heartbeat=heartbeat_timeout,
channel_max=channel_max)
def __str__(self):
_properties = {
'host': self.host,
'port': self.port,
'vhost': self.vhost,
'reconnect_attempts': self.reconnect_attempts,
'retry_delay': self.retry_delay,
'timeout': self.timeout,
'blocked_connection_timeout': self.blocked_connection_timeout,
'heartbeat_timeout': self.heartbeat_timeout,
'channel_max': self.channel_max
}
_str = json.dumps(_properties)
return _str
class AMQPConnection(pika.BlockingConnection):
"""Connection. Thin wrapper around pika.BlockingConnection"""
def __init__(self, conn_params):
self._connection_params = conn_params
self._pika_connection = None
super(AMQPConnection, self).__init__(
parameters=self._connection_params)
class ExchangeTypes(object):
"""AMQP Exchange Types."""
Topic = 'topic'
Direct = 'direct'
Fanout = 'fanout'
Default = ''
class Credentials(pika.PlainCredentials):
"""Connection credentials for authn/authz.
Args:
username (str): The username.
password (str): The password (Basic Authentication).
"""
__slots__ = ['username', 'password']
def __init__(self, username='guest', password='guest'):
"""Constructor."""
super(Credentials, self).__init__(username=username, password=password)
class AMQPTransportSync(object):
"""Broker Interface.
Implements commonly used functionalities. Base class of high-level
implementations such as SubscriberSync and RpcServer.
"""
def __init__(self, *args, **kwargs):
"""Constructor."""
self._connection = None
self._channel = None
self._closing = False
self._debug = False
self.logger = None
if 'logger' in kwargs:
self.logger = kwargs.pop('logger')
else:
self.logger = create_logger('{}-{}'.format(
self.__class__.__name__, self._name))
if 'debug' in kwargs:
self.debug = kwargs.pop('debug')
else:
self.debug = False
if 'connection_params' in kwargs:
self.connection_params = kwargs.pop('connection_params')
else:
# Default Connection Parameters
self.connection_params = ConnectionParameters()
if 'creds' in kwargs:
self.credentials = kwargs.pop('creds') | self.connection_params.credentials = self.credentials
else:
self.credentials = self.connection_params.credentials
# So that connections do not go zombie
atexit.register(self._graceful_shutdown)
@property
def channel(self):
return self._channel
@property
def connection(self):
return self._connection
@property
def debug(self):
"""Debug mode flag."""
return self._debug
@debug.setter
def debug(self, val):
if not isinstance(val, bool):
raise TypeError('Value should be boolean')
self._debug = val
if self._debug is True:
self.logger.setLevel(LoggingLevel.DEBUG)
else:
self.logger.setLevel(LoggingLevel.INFO)
def connect(self):
"""Connect to the AMQP broker. Creates a new channel."""
if self._connection is not None:
self.logger.debug('Using allready existing connection [{}]'.format(
self._connection))
# Create a new communication channel
self._channel = self._connection.channel()
return True
try:
# Create a new connection
self.logger.debug(
'Connecting to AMQP broker @ [{}:{}, vhost={}]...'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
self.logger.debug('Connection parameters:')
self.logger.debug(self.connection_params)
self._connection = AMQPConnection(self.connection_params)
# Create a new communication channel
self._channel = self._connection.channel()
self.logger.info(
'Connected to AMQP broker @ [{}:{}, vhost={}]'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
except pika.exceptions.ConnectionClosed:
self.logger.debug('Connection timed out. Reconnecting...')
return self.connect()
except pika.exceptions.AMQPConnectionError:
self.logger.debug('Connection error. Reconnecting...')
return self.connect()
except Exception as exc:
self.logger.exception('')
raise (exc)
return self._channel
def process_amqp_events(self):
"""Force process amqp events, such as heartbeat packages."""
self.connection.process_data_events()
def _signal_handler(self, signum, frame):
self.logger.info('Signal received: ', signum)
self._graceful_shutdown()
def _graceful_shutdown(self):
if not self.connection:
return
if self._channel.is_closed:
# self.logger.warning('Channel is allready closed')
return
self.logger.debug('Invoking a graceful shutdown...')
self._channel.stop_consuming()
self._channel.close()
self.logger.debug('Channel closed!')
def exchange_exists(self, exchange_name):
resp = self._channel.exchange_declare(
exchange=exchange_name,
passive=True, # Perform a declare or just to see if it exists
)
self.logger.debug('Exchange exists result: {}'.format(resp))
return resp
def create_exchange(self, exchange_name, exchange_type, internal=None):
"""
Create a new exchange.
@param exchange_name: The name of the exchange (e.g. com.logging).
@type exchange_name: string
@param exchange_type: The type of the exchange (e.g. 'topic').
@type exchange_type: string
"""
self._channel.exchange_declare(
exchange=exchange_name,
durable=True, # Survive reboot
passive=False, # Perform a declare or just to see if it exists
internal=internal, # Can only be published to by other exchanges
exchange_type=exchange_type
)
self.logger.debug('Created exchange: [name={}, type={}]'.format(
exchange_name, exchange_type))
def create_queue(self, queue_name='', exclusive=True, queue_size=10,
message_ttl=60000, overflow_behaviour='drop-head',
expires=600000):
"""
Create a new queue.
@param queue_name: The name of the queue.
@type queue_name: string
@param exclusive: Only allow access by the current connection.
@type exclusive: bool
@param queue_size: The size of the queue
@type queue_size: int
@param message_ttl: Per-queue message time-to-live
(https://www.rabbitmq.com/ttl.html#per-queue-message-ttl)
@type message_ttl: int
@param overflow_behaviour: Overflow behaviour - 'drop-head' ||
'reject-publish'.
https://www.rabbitmq.com/maxlength.html#overflow-behaviour
@type overflow_behaviour: str
@param expires: Queues will expire after a period of time only
when they are not used (e.g. do not have consumers).
This feature can be used together with the auto-delete
queue property. The value is expressed in milliseconds (ms).
Default value is 10 minutes.
https://www.rabbitmq.com/ttl.html#queue-ttl
"""
args = {
'x-max-length': queue_size,
'x-overflow': overflow_behaviour,
'x-message-ttl': message_ttl,
'x-expires': expires
}
result = self._channel.queue_declare(
exclusive=exclusive,
queue=queue_name,
durable=False,
auto_delete=True,
arguments=args)
queue_name = result.method.queue
self.logger.debug('Created queue [{}] [size={}, ttl={}]'.format(
queue_name, queue_size, message_ttl))
return queue_name
def delete_queue(self, queue_name):
self._channel.queue_delete(queue=queue_name)
def _queue_exists_clb(self, arg):
print(arg)
def queue_exists(self, queue_name):
"""Check if a queue exists, given its name.
Args:
queue_name (str): The name of the queue.
Returns:
int: True if queue exists False otherwise.
"""
# resp = self._channel.queue_declare(queue_name, passive=True,
# callback=self._queue_exists_clb)
try:
resp = self._channel.queue_declare(queue_name, passive=True)
except pika.exceptions.ChannelClosedByBroker as exc:
self.connect()
if exc.reply_code == 404: # Not Found
return False
else:
self.logger.warning('Queue exists <{}>'.format(queue_name))
return True
def bind_queue(self, exchange_name, queue_name, bind_key):
"""
Bind a queue to and exchange using a bind-key.
@param exchange_name: The name of the exchange (e.g. com.logging).
@type exchange_name: string
@param queue_name: The name of the queue.
@type queue_name: string
@param bind_key: The binding key name.
@type bind_key: string
"""
self.logger.info('Subscribed to topic: {}'.format(bind_key))
try:
self._channel.queue_bind(
exchange=exchange_name, queue=queue_name, routing_key=bind_key)
except Exception as exc:
raise exc
def close(self):
self._graceful_shutdown()
def disconnect(self):
self._graceful_shutdown()
def __del__(self):
self._graceful_shutdown()
class AMQPTransportAsync(object):
CONNECTION_TIMEOUT_SEC = 5
def __init__(self, host='127.0.0.1', port='5672', exchange='amq.topic'):
self._connection = None
self._channel = None
self._closing = False
self.logger = create_logger(self.__class__.__name__)
self._exchange = 'amq.topic'
self._host = host
self._port = port
super(AMQPTransportAsync, self).__init__()
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
self.logger.info("Connecting to AMQP broker @ [{}:{}] ...".format(
self._host, self._port))
connection = pika.SelectConnection(
pika.URLParameters(host=self.host, port=self.port),
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
stop_ioloop_on_close=False)
self._connection = connection
return connection
def on_connection_open(self, unused_connection):
"""
This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused_connection: pika.SelectConnection
"""
self.logger.info('Connection established!')
self.open_channel()
def add_on_connection_close_callback(self):
"""
This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
self.logger.info('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def on_connection_closed(self, connection, reply_code, reply_text):
"""
This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
if self._closing:
self._connection.ioloop.stop()
else:
self.logger.warning(
'Connection closed, reopening in 5 seconds: (%s) %s',
reply_code, reply_text)
self._connection.add_timeout(self.CONNECTION_TIMEOUT_SEC,
self.reconnect)
def on_connection_open_error(self, _unused_connection, err):
"""This method is called by pika if the connection to RabbitMQ
can't be established.
:param pika.SelectConnection _unused_connection: The connection
:param Exception err: The error
"""
self.logger.info('Connection open failed: %s', err)
self.reconnect()
def reconnect(self):
"""
Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
if not self._closing:
# Create a new connection
self._connection = self.connect()
# There is now a new connection, needs a new ioloop to run
self._connection.ioloop.start()
def open_channel(self):
"""
Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
self.logger.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
IMPLEMENT IN INHERITED CLASSES!!!!!!!!
:param pika.channel.Channel channel: The channel object
"""
self.logger.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
self.logger.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
self.logger.warning('Channel %i was closed: (%s) %s', channel,
reply_code, reply_text)
self._connection.close()
def create_exchange(self, exchange_name, exchange_type, on_declareok):
"""
Declare/Create an exchange.
@param exchange_name: The name of the exchange
@type exchange_name: string
@param exchange_type: the type of the exchange
@type exchange_type:
"""
self.logger.debug('Declaring exchange {} [type={}]', exchange_name,
exchange_type)
cb = functools.partial(
on_declareok, userdata=exchange_name)
self._channel.exchange_declare(
exchange=exchange_name,
exchange_type=exchange_type,
callback=cb)
def create_queue(self, queue_name=''):
result = self._channel.queue_declare(exclusive=True, queue=queue_name)
queue_name = result.method.queue
self._queue_name = queue_name
self.logger.info("Created queue [{}]".format(queue_name))
return queue_name
def _bind_queue(self, exchange_name, queue_name, bind_key):
try:
self._channel.queue_bind(
exchange=exchange_name, queue=queue_name, routing_key=bind_key)
except Exception:
self.logger.exception()
def set_qos(self, on_ok):
"""This method sets up the consumer prefetch to only be delivered
one message at a time. The consumer must acknowledge this message
before RabbitMQ will deliver another one. You should experiment
with different prefetch values to achieve desired performance.
"""
self._channel.basic_qos(
prefetch_count=self._prefetch_count, callback=on_ok)
def __del__(self):
self._connection.close() | random_line_split | |
amqp_transport.py | # -*- coding: utf-8 -*-
# Copyright (C) 2020 Panayiotou, Konstantinos <klpanagi@gmail.com>
# Author: Panayiotou, Konstantinos <klpanagi@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import time
import atexit
import signal
import json
import pika
# import ssl
from .r4a_logger import create_logger, LoggingLevel
class RPCMeta(object):
__slots__ = [
'channel', 'method', 'properties'
]
def __init__(self, channel=None, method=None, properties=None):
self.channel = channel
self.method = method
self.properties = properties
class MessageProperties(pika.BasicProperties):
"""Message Properties/Attribures used for sending and receiving messages.
Args:
content_type (str):
content_encoding (str):
timestamp (str):
"""
def __init__(self, content_type=None, content_encoding=None,
timestamp=None, correlation_id=None, reply_to=None,
message_id=None, user_id=None, app_id=None):
"""Constructor."""
if timestamp is None:
timestamp = (time.time() + 0.5) * 1000
timestamp = int(timestamp)
super(MessageProperties, self).__init__(
content_type=content_type,
content_encoding=content_encoding,
timestamp=timestamp,
correlation_id=correlation_id,
reply_to=reply_to,
message_id=str(message_id) if message_id is not None else None,
user_id=str(user_id) if user_id is not None else None,
app_id=str(app_id) if app_id is not None else None
)
class ConnectionParameters(pika.ConnectionParameters):
"""AMQP Connection parameters.
Args:
host (str): Hostname of AMQP broker to connect to.
port (int|str): AMQP broker listening port.
creds (object): Auth Credentials - Credentials instance.
secure (bool): Enable SSL/TLS (AMQPS) - Not supported!!
reconnect_attempts (int): The reconnection attempts to make before
droping and raising an Exception.
retry_delay (float): Time delay between reconnect attempts.
timeout (float): Socket Connection timeout value.
timeout (float): Blocked Connection timeout value.
Set the timeout, in seconds, that the connection may remain blocked
(triggered by Connection.Blocked from broker). If the timeout
expires before connection becomes unblocked, the connection will
be torn down.
heartbeat_timeout (int): Controls AMQP heartbeat
timeout negotiation during connection tuning. An integer value
always overrides the value proposed by broker. Use 0 to deactivate
heartbeats and None to always accept the broker's proposal.
The value passed for timeout is also used to calculate an interval
at which a heartbeat frame is sent to the broker. The interval is
equal to the timeout value divided by two.
channel_max (int): The max permissible number of channels per
connection. Defaults to 128.
"""
__slots__ = [
'host', 'port', 'secure', 'vhost', 'reconnect_attempts', 'retry_delay',
'timeout', 'heartbeat_timeout', 'blocked_connection_timeout', 'creds'
]
def __init__(self, host='127.0.0.1', port='5672', creds=None,
secure=False, vhost='/', reconnect_attempts=5,
retry_delay=2.0, timeout=120, blocked_connection_timeout=None,
heartbeat_timeout=60, channel_max=128):
"""Constructor."""
self.host = host
self.port = port
self.secure = secure
self.vhost = vhost
self.reconnect_attempts = reconnect_attempts
self.retry_delay = retry_delay
self.timeout = timeout
self.blocked_connection_timeout = blocked_connection_timeout
self.heartbeat_timeout = heartbeat_timeout
self.channel_max = channel_max
if creds is None:
creds = Credentials()
super(ConnectionParameters, self).__init__(
host=host,
port=str(port),
credentials=creds,
connection_attempts=reconnect_attempts,
retry_delay=retry_delay,
blocked_connection_timeout=blocked_connection_timeout,
socket_timeout=timeout,
virtual_host=vhost,
heartbeat=heartbeat_timeout,
channel_max=channel_max)
def __str__(self):
_properties = {
'host': self.host,
'port': self.port,
'vhost': self.vhost,
'reconnect_attempts': self.reconnect_attempts,
'retry_delay': self.retry_delay,
'timeout': self.timeout,
'blocked_connection_timeout': self.blocked_connection_timeout,
'heartbeat_timeout': self.heartbeat_timeout,
'channel_max': self.channel_max
}
_str = json.dumps(_properties)
return _str
class AMQPConnection(pika.BlockingConnection):
"""Connection. Thin wrapper around pika.BlockingConnection"""
def __init__(self, conn_params):
self._connection_params = conn_params
self._pika_connection = None
super(AMQPConnection, self).__init__(
parameters=self._connection_params)
class ExchangeTypes(object):
"""AMQP Exchange Types."""
Topic = 'topic'
Direct = 'direct'
Fanout = 'fanout'
Default = ''
class Credentials(pika.PlainCredentials):
"""Connection credentials for authn/authz.
Args:
username (str): The username.
password (str): The password (Basic Authentication).
"""
__slots__ = ['username', 'password']
def __init__(self, username='guest', password='guest'):
"""Constructor."""
super(Credentials, self).__init__(username=username, password=password)
class AMQPTransportSync(object):
"""Broker Interface.
Implements commonly used functionalities. Base class of high-level
implementations such as SubscriberSync and RpcServer.
"""
def __init__(self, *args, **kwargs):
"""Constructor."""
self._connection = None
self._channel = None
self._closing = False
self._debug = False
self.logger = None
if 'logger' in kwargs:
self.logger = kwargs.pop('logger')
else:
self.logger = create_logger('{}-{}'.format(
self.__class__.__name__, self._name))
if 'debug' in kwargs:
self.debug = kwargs.pop('debug')
else:
self.debug = False
if 'connection_params' in kwargs:
self.connection_params = kwargs.pop('connection_params')
else:
# Default Connection Parameters
self.connection_params = ConnectionParameters()
if 'creds' in kwargs:
self.credentials = kwargs.pop('creds')
self.connection_params.credentials = self.credentials
else:
self.credentials = self.connection_params.credentials
# So that connections do not go zombie
atexit.register(self._graceful_shutdown)
@property
def channel(self):
return self._channel
@property
def connection(self):
return self._connection
@property
def debug(self):
"""Debug mode flag."""
return self._debug
@debug.setter
def debug(self, val):
if not isinstance(val, bool):
raise TypeError('Value should be boolean')
self._debug = val
if self._debug is True:
self.logger.setLevel(LoggingLevel.DEBUG)
else:
self.logger.setLevel(LoggingLevel.INFO)
def connect(self):
"""Connect to the AMQP broker. Creates a new channel."""
if self._connection is not None:
self.logger.debug('Using allready existing connection [{}]'.format(
self._connection))
# Create a new communication channel
self._channel = self._connection.channel()
return True
try:
# Create a new connection
self.logger.debug(
'Connecting to AMQP broker @ [{}:{}, vhost={}]...'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
self.logger.debug('Connection parameters:')
self.logger.debug(self.connection_params)
self._connection = AMQPConnection(self.connection_params)
# Create a new communication channel
self._channel = self._connection.channel()
self.logger.info(
'Connected to AMQP broker @ [{}:{}, vhost={}]'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
except pika.exceptions.ConnectionClosed:
self.logger.debug('Connection timed out. Reconnecting...')
return self.connect()
except pika.exceptions.AMQPConnectionError:
self.logger.debug('Connection error. Reconnecting...')
return self.connect()
except Exception as exc:
self.logger.exception('')
raise (exc)
return self._channel
def process_amqp_events(self):
"""Force process amqp events, such as heartbeat packages."""
self.connection.process_data_events()
def _signal_handler(self, signum, frame):
self.logger.info('Signal received: ', signum)
self._graceful_shutdown()
def _graceful_shutdown(self):
if not self.connection:
return
if self._channel.is_closed:
# self.logger.warning('Channel is allready closed')
return
self.logger.debug('Invoking a graceful shutdown...')
self._channel.stop_consuming()
self._channel.close()
self.logger.debug('Channel closed!')
def exchange_exists(self, exchange_name):
resp = self._channel.exchange_declare(
exchange=exchange_name,
passive=True, # Perform a declare or just to see if it exists
)
self.logger.debug('Exchange exists result: {}'.format(resp))
return resp
def create_exchange(self, exchange_name, exchange_type, internal=None):
"""
Create a new exchange.
@param exchange_name: The name of the exchange (e.g. com.logging).
@type exchange_name: string
@param exchange_type: The type of the exchange (e.g. 'topic').
@type exchange_type: string
"""
self._channel.exchange_declare(
exchange=exchange_name,
durable=True, # Survive reboot
passive=False, # Perform a declare or just to see if it exists
internal=internal, # Can only be published to by other exchanges
exchange_type=exchange_type
)
self.logger.debug('Created exchange: [name={}, type={}]'.format(
exchange_name, exchange_type))
def create_queue(self, queue_name='', exclusive=True, queue_size=10,
message_ttl=60000, overflow_behaviour='drop-head',
expires=600000):
"""
Create a new queue.
@param queue_name: The name of the queue.
@type queue_name: string
@param exclusive: Only allow access by the current connection.
@type exclusive: bool
@param queue_size: The size of the queue
@type queue_size: int
@param message_ttl: Per-queue message time-to-live
(https://www.rabbitmq.com/ttl.html#per-queue-message-ttl)
@type message_ttl: int
@param overflow_behaviour: Overflow behaviour - 'drop-head' ||
'reject-publish'.
https://www.rabbitmq.com/maxlength.html#overflow-behaviour
@type overflow_behaviour: str
@param expires: Queues will expire after a period of time only
when they are not used (e.g. do not have consumers).
This feature can be used together with the auto-delete
queue property. The value is expressed in milliseconds (ms).
Default value is 10 minutes.
https://www.rabbitmq.com/ttl.html#queue-ttl
"""
args = {
'x-max-length': queue_size,
'x-overflow': overflow_behaviour,
'x-message-ttl': message_ttl,
'x-expires': expires
}
result = self._channel.queue_declare(
exclusive=exclusive,
queue=queue_name,
durable=False,
auto_delete=True,
arguments=args)
queue_name = result.method.queue
self.logger.debug('Created queue [{}] [size={}, ttl={}]'.format(
queue_name, queue_size, message_ttl))
return queue_name
def delete_queue(self, queue_name):
self._channel.queue_delete(queue=queue_name)
def _queue_exists_clb(self, arg):
print(arg)
def queue_exists(self, queue_name):
"""Check if a queue exists, given its name.
Args:
queue_name (str): The name of the queue.
Returns:
int: True if queue exists False otherwise.
"""
# resp = self._channel.queue_declare(queue_name, passive=True,
# callback=self._queue_exists_clb)
try:
resp = self._channel.queue_declare(queue_name, passive=True)
except pika.exceptions.ChannelClosedByBroker as exc:
self.connect()
if exc.reply_code == 404: # Not Found
|
else:
self.logger.warning('Queue exists <{}>'.format(queue_name))
return True
def bind_queue(self, exchange_name, queue_name, bind_key):
"""
Bind a queue to and exchange using a bind-key.
@param exchange_name: The name of the exchange (e.g. com.logging).
@type exchange_name: string
@param queue_name: The name of the queue.
@type queue_name: string
@param bind_key: The binding key name.
@type bind_key: string
"""
self.logger.info('Subscribed to topic: {}'.format(bind_key))
try:
self._channel.queue_bind(
exchange=exchange_name, queue=queue_name, routing_key=bind_key)
except Exception as exc:
raise exc
def close(self):
self._graceful_shutdown()
def disconnect(self):
self._graceful_shutdown()
def __del__(self):
self._graceful_shutdown()
class AMQPTransportAsync(object):
CONNECTION_TIMEOUT_SEC = 5
def __init__(self, host='127.0.0.1', port='5672', exchange='amq.topic'):
self._connection = None
self._channel = None
self._closing = False
self.logger = create_logger(self.__class__.__name__)
self._exchange = 'amq.topic'
self._host = host
self._port = port
super(AMQPTransportAsync, self).__init__()
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
self.logger.info("Connecting to AMQP broker @ [{}:{}] ...".format(
self._host, self._port))
connection = pika.SelectConnection(
pika.URLParameters(host=self.host, port=self.port),
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
stop_ioloop_on_close=False)
self._connection = connection
return connection
def on_connection_open(self, unused_connection):
"""
This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused_connection: pika.SelectConnection
"""
self.logger.info('Connection established!')
self.open_channel()
def add_on_connection_close_callback(self):
"""
This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
self.logger.info('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def on_connection_closed(self, connection, reply_code, reply_text):
"""
This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
if self._closing:
self._connection.ioloop.stop()
else:
self.logger.warning(
'Connection closed, reopening in 5 seconds: (%s) %s',
reply_code, reply_text)
self._connection.add_timeout(self.CONNECTION_TIMEOUT_SEC,
self.reconnect)
def on_connection_open_error(self, _unused_connection, err):
"""This method is called by pika if the connection to RabbitMQ
can't be established.
:param pika.SelectConnection _unused_connection: The connection
:param Exception err: The error
"""
self.logger.info('Connection open failed: %s', err)
self.reconnect()
def reconnect(self):
"""
Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
if not self._closing:
# Create a new connection
self._connection = self.connect()
# There is now a new connection, needs a new ioloop to run
self._connection.ioloop.start()
def open_channel(self):
"""
Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
self.logger.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
IMPLEMENT IN INHERITED CLASSES!!!!!!!!
:param pika.channel.Channel channel: The channel object
"""
self.logger.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
self.logger.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
self.logger.warning('Channel %i was closed: (%s) %s', channel,
reply_code, reply_text)
self._connection.close()
def create_exchange(self, exchange_name, exchange_type, on_declareok):
"""
Declare/Create an exchange.
@param exchange_name: The name of the exchange
@type exchange_name: string
@param exchange_type: the type of the exchange
@type exchange_type:
"""
self.logger.debug('Declaring exchange {} [type={}]', exchange_name,
exchange_type)
cb = functools.partial(
on_declareok, userdata=exchange_name)
self._channel.exchange_declare(
exchange=exchange_name,
exchange_type=exchange_type,
callback=cb)
def create_queue(self, queue_name=''):
result = self._channel.queue_declare(exclusive=True, queue=queue_name)
queue_name = result.method.queue
self._queue_name = queue_name
self.logger.info("Created queue [{}]".format(queue_name))
return queue_name
def _bind_queue(self, exchange_name, queue_name, bind_key):
try:
self._channel.queue_bind(
exchange=exchange_name, queue=queue_name, routing_key=bind_key)
except Exception:
self.logger.exception()
def set_qos(self, on_ok):
"""This method sets up the consumer prefetch to only be delivered
one message at a time. The consumer must acknowledge this message
before RabbitMQ will deliver another one. You should experiment
with different prefetch values to achieve desired performance.
"""
self._channel.basic_qos(
prefetch_count=self._prefetch_count, callback=on_ok)
def __del__(self):
self._connection.close()
| return False | conditional_block |
amqp_transport.py | # -*- coding: utf-8 -*-
# Copyright (C) 2020 Panayiotou, Konstantinos <klpanagi@gmail.com>
# Author: Panayiotou, Konstantinos <klpanagi@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import time
import atexit
import signal
import json
import pika
# import ssl
from .r4a_logger import create_logger, LoggingLevel
class RPCMeta(object):
__slots__ = [
'channel', 'method', 'properties'
]
def __init__(self, channel=None, method=None, properties=None):
self.channel = channel
self.method = method
self.properties = properties
class MessageProperties(pika.BasicProperties):
"""Message Properties/Attribures used for sending and receiving messages.
Args:
content_type (str):
content_encoding (str):
timestamp (str):
"""
def __init__(self, content_type=None, content_encoding=None,
timestamp=None, correlation_id=None, reply_to=None,
message_id=None, user_id=None, app_id=None):
"""Constructor."""
if timestamp is None:
timestamp = (time.time() + 0.5) * 1000
timestamp = int(timestamp)
super(MessageProperties, self).__init__(
content_type=content_type,
content_encoding=content_encoding,
timestamp=timestamp,
correlation_id=correlation_id,
reply_to=reply_to,
message_id=str(message_id) if message_id is not None else None,
user_id=str(user_id) if user_id is not None else None,
app_id=str(app_id) if app_id is not None else None
)
class ConnectionParameters(pika.ConnectionParameters):
"""AMQP Connection parameters.
Args:
host (str): Hostname of AMQP broker to connect to.
port (int|str): AMQP broker listening port.
creds (object): Auth Credentials - Credentials instance.
secure (bool): Enable SSL/TLS (AMQPS) - Not supported!!
reconnect_attempts (int): The reconnection attempts to make before
droping and raising an Exception.
retry_delay (float): Time delay between reconnect attempts.
timeout (float): Socket Connection timeout value.
timeout (float): Blocked Connection timeout value.
Set the timeout, in seconds, that the connection may remain blocked
(triggered by Connection.Blocked from broker). If the timeout
expires before connection becomes unblocked, the connection will
be torn down.
heartbeat_timeout (int): Controls AMQP heartbeat
timeout negotiation during connection tuning. An integer value
always overrides the value proposed by broker. Use 0 to deactivate
heartbeats and None to always accept the broker's proposal.
The value passed for timeout is also used to calculate an interval
at which a heartbeat frame is sent to the broker. The interval is
equal to the timeout value divided by two.
channel_max (int): The max permissible number of channels per
connection. Defaults to 128.
"""
__slots__ = [
'host', 'port', 'secure', 'vhost', 'reconnect_attempts', 'retry_delay',
'timeout', 'heartbeat_timeout', 'blocked_connection_timeout', 'creds'
]
def __init__(self, host='127.0.0.1', port='5672', creds=None,
secure=False, vhost='/', reconnect_attempts=5,
retry_delay=2.0, timeout=120, blocked_connection_timeout=None,
heartbeat_timeout=60, channel_max=128):
"""Constructor."""
self.host = host
self.port = port
self.secure = secure
self.vhost = vhost
self.reconnect_attempts = reconnect_attempts
self.retry_delay = retry_delay
self.timeout = timeout
self.blocked_connection_timeout = blocked_connection_timeout
self.heartbeat_timeout = heartbeat_timeout
self.channel_max = channel_max
if creds is None:
creds = Credentials()
super(ConnectionParameters, self).__init__(
host=host,
port=str(port),
credentials=creds,
connection_attempts=reconnect_attempts,
retry_delay=retry_delay,
blocked_connection_timeout=blocked_connection_timeout,
socket_timeout=timeout,
virtual_host=vhost,
heartbeat=heartbeat_timeout,
channel_max=channel_max)
def __str__(self):
_properties = {
'host': self.host,
'port': self.port,
'vhost': self.vhost,
'reconnect_attempts': self.reconnect_attempts,
'retry_delay': self.retry_delay,
'timeout': self.timeout,
'blocked_connection_timeout': self.blocked_connection_timeout,
'heartbeat_timeout': self.heartbeat_timeout,
'channel_max': self.channel_max
}
_str = json.dumps(_properties)
return _str
class AMQPConnection(pika.BlockingConnection):
"""Connection. Thin wrapper around pika.BlockingConnection"""
def __init__(self, conn_params):
self._connection_params = conn_params
self._pika_connection = None
super(AMQPConnection, self).__init__(
parameters=self._connection_params)
class ExchangeTypes(object):
"""AMQP Exchange Types."""
Topic = 'topic'
Direct = 'direct'
Fanout = 'fanout'
Default = ''
class Credentials(pika.PlainCredentials):
"""Connection credentials for authn/authz.
Args:
username (str): The username.
password (str): The password (Basic Authentication).
"""
__slots__ = ['username', 'password']
def __init__(self, username='guest', password='guest'):
"""Constructor."""
super(Credentials, self).__init__(username=username, password=password)
class AMQPTransportSync(object):
"""Broker Interface.
Implements commonly used functionalities. Base class of high-level
implementations such as SubscriberSync and RpcServer.
"""
def __init__(self, *args, **kwargs):
"""Constructor."""
self._connection = None
self._channel = None
self._closing = False
self._debug = False
self.logger = None
if 'logger' in kwargs:
self.logger = kwargs.pop('logger')
else:
self.logger = create_logger('{}-{}'.format(
self.__class__.__name__, self._name))
if 'debug' in kwargs:
self.debug = kwargs.pop('debug')
else:
self.debug = False
if 'connection_params' in kwargs:
self.connection_params = kwargs.pop('connection_params')
else:
# Default Connection Parameters
self.connection_params = ConnectionParameters()
if 'creds' in kwargs:
self.credentials = kwargs.pop('creds')
self.connection_params.credentials = self.credentials
else:
self.credentials = self.connection_params.credentials
# So that connections do not go zombie
atexit.register(self._graceful_shutdown)
@property
def channel(self):
return self._channel
@property
def connection(self):
return self._connection
@property
def debug(self):
"""Debug mode flag."""
return self._debug
@debug.setter
def debug(self, val):
if not isinstance(val, bool):
raise TypeError('Value should be boolean')
self._debug = val
if self._debug is True:
self.logger.setLevel(LoggingLevel.DEBUG)
else:
self.logger.setLevel(LoggingLevel.INFO)
def connect(self):
"""Connect to the AMQP broker. Creates a new channel."""
if self._connection is not None:
self.logger.debug('Using allready existing connection [{}]'.format(
self._connection))
# Create a new communication channel
self._channel = self._connection.channel()
return True
try:
# Create a new connection
self.logger.debug(
'Connecting to AMQP broker @ [{}:{}, vhost={}]...'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
self.logger.debug('Connection parameters:')
self.logger.debug(self.connection_params)
self._connection = AMQPConnection(self.connection_params)
# Create a new communication channel
self._channel = self._connection.channel()
self.logger.info(
'Connected to AMQP broker @ [{}:{}, vhost={}]'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
except pika.exceptions.ConnectionClosed:
self.logger.debug('Connection timed out. Reconnecting...')
return self.connect()
except pika.exceptions.AMQPConnectionError:
self.logger.debug('Connection error. Reconnecting...')
return self.connect()
except Exception as exc:
self.logger.exception('')
raise (exc)
return self._channel
def process_amqp_events(self):
"""Force process amqp events, such as heartbeat packages."""
self.connection.process_data_events()
def _signal_handler(self, signum, frame):
self.logger.info('Signal received: ', signum)
self._graceful_shutdown()
def _graceful_shutdown(self):
if not self.connection:
return
if self._channel.is_closed:
# self.logger.warning('Channel is allready closed')
return
self.logger.debug('Invoking a graceful shutdown...')
self._channel.stop_consuming()
self._channel.close()
self.logger.debug('Channel closed!')
def exchange_exists(self, exchange_name):
resp = self._channel.exchange_declare(
exchange=exchange_name,
passive=True, # Perform a declare or just to see if it exists
)
self.logger.debug('Exchange exists result: {}'.format(resp))
return resp
def create_exchange(self, exchange_name, exchange_type, internal=None):
"""
Create a new exchange.
@param exchange_name: The name of the exchange (e.g. com.logging).
@type exchange_name: string
@param exchange_type: The type of the exchange (e.g. 'topic').
@type exchange_type: string
"""
self._channel.exchange_declare(
exchange=exchange_name,
durable=True, # Survive reboot
passive=False, # Perform a declare or just to see if it exists
internal=internal, # Can only be published to by other exchanges
exchange_type=exchange_type
)
self.logger.debug('Created exchange: [name={}, type={}]'.format(
exchange_name, exchange_type))
def create_queue(self, queue_name='', exclusive=True, queue_size=10,
message_ttl=60000, overflow_behaviour='drop-head',
expires=600000):
"""
Create a new queue.
@param queue_name: The name of the queue.
@type queue_name: string
@param exclusive: Only allow access by the current connection.
@type exclusive: bool
@param queue_size: The size of the queue
@type queue_size: int
@param message_ttl: Per-queue message time-to-live
(https://www.rabbitmq.com/ttl.html#per-queue-message-ttl)
@type message_ttl: int
@param overflow_behaviour: Overflow behaviour - 'drop-head' ||
'reject-publish'.
https://www.rabbitmq.com/maxlength.html#overflow-behaviour
@type overflow_behaviour: str
@param expires: Queues will expire after a period of time only
when they are not used (e.g. do not have consumers).
This feature can be used together with the auto-delete
queue property. The value is expressed in milliseconds (ms).
Default value is 10 minutes.
https://www.rabbitmq.com/ttl.html#queue-ttl
"""
args = {
'x-max-length': queue_size,
'x-overflow': overflow_behaviour,
'x-message-ttl': message_ttl,
'x-expires': expires
}
result = self._channel.queue_declare(
exclusive=exclusive,
queue=queue_name,
durable=False,
auto_delete=True,
arguments=args)
queue_name = result.method.queue
self.logger.debug('Created queue [{}] [size={}, ttl={}]'.format(
queue_name, queue_size, message_ttl))
return queue_name
def delete_queue(self, queue_name):
self._channel.queue_delete(queue=queue_name)
def _queue_exists_clb(self, arg):
print(arg)
def queue_exists(self, queue_name):
"""Check if a queue exists, given its name.
Args:
queue_name (str): The name of the queue.
Returns:
int: True if queue exists False otherwise.
"""
# resp = self._channel.queue_declare(queue_name, passive=True,
# callback=self._queue_exists_clb)
try:
resp = self._channel.queue_declare(queue_name, passive=True)
except pika.exceptions.ChannelClosedByBroker as exc:
self.connect()
if exc.reply_code == 404: # Not Found
return False
else:
self.logger.warning('Queue exists <{}>'.format(queue_name))
return True
def bind_queue(self, exchange_name, queue_name, bind_key):
"""
Bind a queue to and exchange using a bind-key.
@param exchange_name: The name of the exchange (e.g. com.logging).
@type exchange_name: string
@param queue_name: The name of the queue.
@type queue_name: string
@param bind_key: The binding key name.
@type bind_key: string
"""
self.logger.info('Subscribed to topic: {}'.format(bind_key))
try:
self._channel.queue_bind(
exchange=exchange_name, queue=queue_name, routing_key=bind_key)
except Exception as exc:
raise exc
def close(self):
self._graceful_shutdown()
def disconnect(self):
self._graceful_shutdown()
def __del__(self):
self._graceful_shutdown()
class AMQPTransportAsync(object):
| CONNECTION_TIMEOUT_SEC = 5
def __init__(self, host='127.0.0.1', port='5672', exchange='amq.topic'):
self._connection = None
self._channel = None
self._closing = False
self.logger = create_logger(self.__class__.__name__)
self._exchange = 'amq.topic'
self._host = host
self._port = port
super(AMQPTransportAsync, self).__init__()
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
self.logger.info("Connecting to AMQP broker @ [{}:{}] ...".format(
self._host, self._port))
connection = pika.SelectConnection(
pika.URLParameters(host=self.host, port=self.port),
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
stop_ioloop_on_close=False)
self._connection = connection
return connection
def on_connection_open(self, unused_connection):
"""
This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused_connection: pika.SelectConnection
"""
self.logger.info('Connection established!')
self.open_channel()
def add_on_connection_close_callback(self):
"""
This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
self.logger.info('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def on_connection_closed(self, connection, reply_code, reply_text):
"""
This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
if self._closing:
self._connection.ioloop.stop()
else:
self.logger.warning(
'Connection closed, reopening in 5 seconds: (%s) %s',
reply_code, reply_text)
self._connection.add_timeout(self.CONNECTION_TIMEOUT_SEC,
self.reconnect)
def on_connection_open_error(self, _unused_connection, err):
"""This method is called by pika if the connection to RabbitMQ
can't be established.
:param pika.SelectConnection _unused_connection: The connection
:param Exception err: The error
"""
self.logger.info('Connection open failed: %s', err)
self.reconnect()
def reconnect(self):
"""
Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
if not self._closing:
# Create a new connection
self._connection = self.connect()
# There is now a new connection, needs a new ioloop to run
self._connection.ioloop.start()
def open_channel(self):
"""
Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
self.logger.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
IMPLEMENT IN INHERITED CLASSES!!!!!!!!
:param pika.channel.Channel channel: The channel object
"""
self.logger.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
self.logger.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
self.logger.warning('Channel %i was closed: (%s) %s', channel,
reply_code, reply_text)
self._connection.close()
def create_exchange(self, exchange_name, exchange_type, on_declareok):
"""
Declare/Create an exchange.
@param exchange_name: The name of the exchange
@type exchange_name: string
@param exchange_type: the type of the exchange
@type exchange_type:
"""
self.logger.debug('Declaring exchange {} [type={}]', exchange_name,
exchange_type)
cb = functools.partial(
on_declareok, userdata=exchange_name)
self._channel.exchange_declare(
exchange=exchange_name,
exchange_type=exchange_type,
callback=cb)
def create_queue(self, queue_name=''):
result = self._channel.queue_declare(exclusive=True, queue=queue_name)
queue_name = result.method.queue
self._queue_name = queue_name
self.logger.info("Created queue [{}]".format(queue_name))
return queue_name
def _bind_queue(self, exchange_name, queue_name, bind_key):
try:
self._channel.queue_bind(
exchange=exchange_name, queue=queue_name, routing_key=bind_key)
except Exception:
self.logger.exception()
def set_qos(self, on_ok):
"""This method sets up the consumer prefetch to only be delivered
one message at a time. The consumer must acknowledge this message
before RabbitMQ will deliver another one. You should experiment
with different prefetch values to achieve desired performance.
"""
self._channel.basic_qos(
prefetch_count=self._prefetch_count, callback=on_ok)
def __del__(self):
self._connection.close() | identifier_body | |
amqp_transport.py | # -*- coding: utf-8 -*-
# Copyright (C) 2020 Panayiotou, Konstantinos <klpanagi@gmail.com>
# Author: Panayiotou, Konstantinos <klpanagi@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import time
import atexit
import signal
import json
import pika
# import ssl
from .r4a_logger import create_logger, LoggingLevel
class RPCMeta(object):
__slots__ = [
'channel', 'method', 'properties'
]
def __init__(self, channel=None, method=None, properties=None):
self.channel = channel
self.method = method
self.properties = properties
class | (pika.BasicProperties):
"""Message Properties/Attribures used for sending and receiving messages.
Args:
content_type (str):
content_encoding (str):
timestamp (str):
"""
def __init__(self, content_type=None, content_encoding=None,
timestamp=None, correlation_id=None, reply_to=None,
message_id=None, user_id=None, app_id=None):
"""Constructor."""
if timestamp is None:
timestamp = (time.time() + 0.5) * 1000
timestamp = int(timestamp)
super(MessageProperties, self).__init__(
content_type=content_type,
content_encoding=content_encoding,
timestamp=timestamp,
correlation_id=correlation_id,
reply_to=reply_to,
message_id=str(message_id) if message_id is not None else None,
user_id=str(user_id) if user_id is not None else None,
app_id=str(app_id) if app_id is not None else None
)
class ConnectionParameters(pika.ConnectionParameters):
"""AMQP Connection parameters.
Args:
host (str): Hostname of AMQP broker to connect to.
port (int|str): AMQP broker listening port.
creds (object): Auth Credentials - Credentials instance.
secure (bool): Enable SSL/TLS (AMQPS) - Not supported!!
reconnect_attempts (int): The reconnection attempts to make before
droping and raising an Exception.
retry_delay (float): Time delay between reconnect attempts.
timeout (float): Socket Connection timeout value.
timeout (float): Blocked Connection timeout value.
Set the timeout, in seconds, that the connection may remain blocked
(triggered by Connection.Blocked from broker). If the timeout
expires before connection becomes unblocked, the connection will
be torn down.
heartbeat_timeout (int): Controls AMQP heartbeat
timeout negotiation during connection tuning. An integer value
always overrides the value proposed by broker. Use 0 to deactivate
heartbeats and None to always accept the broker's proposal.
The value passed for timeout is also used to calculate an interval
at which a heartbeat frame is sent to the broker. The interval is
equal to the timeout value divided by two.
channel_max (int): The max permissible number of channels per
connection. Defaults to 128.
"""
__slots__ = [
'host', 'port', 'secure', 'vhost', 'reconnect_attempts', 'retry_delay',
'timeout', 'heartbeat_timeout', 'blocked_connection_timeout', 'creds'
]
def __init__(self, host='127.0.0.1', port='5672', creds=None,
secure=False, vhost='/', reconnect_attempts=5,
retry_delay=2.0, timeout=120, blocked_connection_timeout=None,
heartbeat_timeout=60, channel_max=128):
"""Constructor."""
self.host = host
self.port = port
self.secure = secure
self.vhost = vhost
self.reconnect_attempts = reconnect_attempts
self.retry_delay = retry_delay
self.timeout = timeout
self.blocked_connection_timeout = blocked_connection_timeout
self.heartbeat_timeout = heartbeat_timeout
self.channel_max = channel_max
if creds is None:
creds = Credentials()
super(ConnectionParameters, self).__init__(
host=host,
port=str(port),
credentials=creds,
connection_attempts=reconnect_attempts,
retry_delay=retry_delay,
blocked_connection_timeout=blocked_connection_timeout,
socket_timeout=timeout,
virtual_host=vhost,
heartbeat=heartbeat_timeout,
channel_max=channel_max)
def __str__(self):
_properties = {
'host': self.host,
'port': self.port,
'vhost': self.vhost,
'reconnect_attempts': self.reconnect_attempts,
'retry_delay': self.retry_delay,
'timeout': self.timeout,
'blocked_connection_timeout': self.blocked_connection_timeout,
'heartbeat_timeout': self.heartbeat_timeout,
'channel_max': self.channel_max
}
_str = json.dumps(_properties)
return _str
class AMQPConnection(pika.BlockingConnection):
"""Connection. Thin wrapper around pika.BlockingConnection"""
def __init__(self, conn_params):
self._connection_params = conn_params
self._pika_connection = None
super(AMQPConnection, self).__init__(
parameters=self._connection_params)
class ExchangeTypes(object):
"""AMQP Exchange Types."""
Topic = 'topic'
Direct = 'direct'
Fanout = 'fanout'
Default = ''
class Credentials(pika.PlainCredentials):
"""Connection credentials for authn/authz.
Args:
username (str): The username.
password (str): The password (Basic Authentication).
"""
__slots__ = ['username', 'password']
def __init__(self, username='guest', password='guest'):
"""Constructor."""
super(Credentials, self).__init__(username=username, password=password)
class AMQPTransportSync(object):
"""Broker Interface.
Implements commonly used functionalities. Base class of high-level
implementations such as SubscriberSync and RpcServer.
"""
def __init__(self, *args, **kwargs):
"""Constructor."""
self._connection = None
self._channel = None
self._closing = False
self._debug = False
self.logger = None
if 'logger' in kwargs:
self.logger = kwargs.pop('logger')
else:
self.logger = create_logger('{}-{}'.format(
self.__class__.__name__, self._name))
if 'debug' in kwargs:
self.debug = kwargs.pop('debug')
else:
self.debug = False
if 'connection_params' in kwargs:
self.connection_params = kwargs.pop('connection_params')
else:
# Default Connection Parameters
self.connection_params = ConnectionParameters()
if 'creds' in kwargs:
self.credentials = kwargs.pop('creds')
self.connection_params.credentials = self.credentials
else:
self.credentials = self.connection_params.credentials
# So that connections do not go zombie
atexit.register(self._graceful_shutdown)
@property
def channel(self):
return self._channel
@property
def connection(self):
return self._connection
@property
def debug(self):
"""Debug mode flag."""
return self._debug
@debug.setter
def debug(self, val):
if not isinstance(val, bool):
raise TypeError('Value should be boolean')
self._debug = val
if self._debug is True:
self.logger.setLevel(LoggingLevel.DEBUG)
else:
self.logger.setLevel(LoggingLevel.INFO)
def connect(self):
"""Connect to the AMQP broker. Creates a new channel."""
if self._connection is not None:
self.logger.debug('Using allready existing connection [{}]'.format(
self._connection))
# Create a new communication channel
self._channel = self._connection.channel()
return True
try:
# Create a new connection
self.logger.debug(
'Connecting to AMQP broker @ [{}:{}, vhost={}]...'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
self.logger.debug('Connection parameters:')
self.logger.debug(self.connection_params)
self._connection = AMQPConnection(self.connection_params)
# Create a new communication channel
self._channel = self._connection.channel()
self.logger.info(
'Connected to AMQP broker @ [{}:{}, vhost={}]'.format(
self.connection_params.host,
self.connection_params.port,
self.connection_params.vhost))
except pika.exceptions.ConnectionClosed:
self.logger.debug('Connection timed out. Reconnecting...')
return self.connect()
except pika.exceptions.AMQPConnectionError:
self.logger.debug('Connection error. Reconnecting...')
return self.connect()
except Exception as exc:
self.logger.exception('')
raise (exc)
return self._channel
def process_amqp_events(self):
"""Force process amqp events, such as heartbeat packages."""
self.connection.process_data_events()
def _signal_handler(self, signum, frame):
self.logger.info('Signal received: ', signum)
self._graceful_shutdown()
def _graceful_shutdown(self):
if not self.connection:
return
if self._channel.is_closed:
# self.logger.warning('Channel is allready closed')
return
self.logger.debug('Invoking a graceful shutdown...')
self._channel.stop_consuming()
self._channel.close()
self.logger.debug('Channel closed!')
def exchange_exists(self, exchange_name):
resp = self._channel.exchange_declare(
exchange=exchange_name,
passive=True, # Perform a declare or just to see if it exists
)
self.logger.debug('Exchange exists result: {}'.format(resp))
return resp
def create_exchange(self, exchange_name, exchange_type, internal=None):
"""
Create a new exchange.
@param exchange_name: The name of the exchange (e.g. com.logging).
@type exchange_name: string
@param exchange_type: The type of the exchange (e.g. 'topic').
@type exchange_type: string
"""
self._channel.exchange_declare(
exchange=exchange_name,
durable=True, # Survive reboot
passive=False, # Perform a declare or just to see if it exists
internal=internal, # Can only be published to by other exchanges
exchange_type=exchange_type
)
self.logger.debug('Created exchange: [name={}, type={}]'.format(
exchange_name, exchange_type))
def create_queue(self, queue_name='', exclusive=True, queue_size=10,
message_ttl=60000, overflow_behaviour='drop-head',
expires=600000):
"""
Create a new queue.
@param queue_name: The name of the queue.
@type queue_name: string
@param exclusive: Only allow access by the current connection.
@type exclusive: bool
@param queue_size: The size of the queue
@type queue_size: int
@param message_ttl: Per-queue message time-to-live
(https://www.rabbitmq.com/ttl.html#per-queue-message-ttl)
@type message_ttl: int
@param overflow_behaviour: Overflow behaviour - 'drop-head' ||
'reject-publish'.
https://www.rabbitmq.com/maxlength.html#overflow-behaviour
@type overflow_behaviour: str
@param expires: Queues will expire after a period of time only
when they are not used (e.g. do not have consumers).
This feature can be used together with the auto-delete
queue property. The value is expressed in milliseconds (ms).
Default value is 10 minutes.
https://www.rabbitmq.com/ttl.html#queue-ttl
"""
args = {
'x-max-length': queue_size,
'x-overflow': overflow_behaviour,
'x-message-ttl': message_ttl,
'x-expires': expires
}
result = self._channel.queue_declare(
exclusive=exclusive,
queue=queue_name,
durable=False,
auto_delete=True,
arguments=args)
queue_name = result.method.queue
self.logger.debug('Created queue [{}] [size={}, ttl={}]'.format(
queue_name, queue_size, message_ttl))
return queue_name
def delete_queue(self, queue_name):
self._channel.queue_delete(queue=queue_name)
def _queue_exists_clb(self, arg):
print(arg)
def queue_exists(self, queue_name):
"""Check if a queue exists, given its name.
Args:
queue_name (str): The name of the queue.
Returns:
int: True if queue exists False otherwise.
"""
# resp = self._channel.queue_declare(queue_name, passive=True,
# callback=self._queue_exists_clb)
try:
resp = self._channel.queue_declare(queue_name, passive=True)
except pika.exceptions.ChannelClosedByBroker as exc:
self.connect()
if exc.reply_code == 404: # Not Found
return False
else:
self.logger.warning('Queue exists <{}>'.format(queue_name))
return True
def bind_queue(self, exchange_name, queue_name, bind_key):
"""
Bind a queue to and exchange using a bind-key.
@param exchange_name: The name of the exchange (e.g. com.logging).
@type exchange_name: string
@param queue_name: The name of the queue.
@type queue_name: string
@param bind_key: The binding key name.
@type bind_key: string
"""
self.logger.info('Subscribed to topic: {}'.format(bind_key))
try:
self._channel.queue_bind(
exchange=exchange_name, queue=queue_name, routing_key=bind_key)
except Exception as exc:
raise exc
def close(self):
self._graceful_shutdown()
def disconnect(self):
self._graceful_shutdown()
def __del__(self):
self._graceful_shutdown()
class AMQPTransportAsync(object):
CONNECTION_TIMEOUT_SEC = 5
def __init__(self, host='127.0.0.1', port='5672', exchange='amq.topic'):
self._connection = None
self._channel = None
self._closing = False
self.logger = create_logger(self.__class__.__name__)
self._exchange = 'amq.topic'
self._host = host
self._port = port
super(AMQPTransportAsync, self).__init__()
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
self.logger.info("Connecting to AMQP broker @ [{}:{}] ...".format(
self._host, self._port))
connection = pika.SelectConnection(
pika.URLParameters(host=self.host, port=self.port),
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed,
stop_ioloop_on_close=False)
self._connection = connection
return connection
def on_connection_open(self, unused_connection):
"""
This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused_connection: pika.SelectConnection
"""
self.logger.info('Connection established!')
self.open_channel()
def add_on_connection_close_callback(self):
"""
This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
self.logger.info('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def on_connection_closed(self, connection, reply_code, reply_text):
"""
This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
if self._closing:
self._connection.ioloop.stop()
else:
self.logger.warning(
'Connection closed, reopening in 5 seconds: (%s) %s',
reply_code, reply_text)
self._connection.add_timeout(self.CONNECTION_TIMEOUT_SEC,
self.reconnect)
def on_connection_open_error(self, _unused_connection, err):
"""This method is called by pika if the connection to RabbitMQ
can't be established.
:param pika.SelectConnection _unused_connection: The connection
:param Exception err: The error
"""
self.logger.info('Connection open failed: %s', err)
self.reconnect()
def reconnect(self):
"""
Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
if not self._closing:
# Create a new connection
self._connection = self.connect()
# There is now a new connection, needs a new ioloop to run
self._connection.ioloop.start()
def open_channel(self):
"""
Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
self.logger.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
IMPLEMENT IN INHERITED CLASSES!!!!!!!!
:param pika.channel.Channel channel: The channel object
"""
self.logger.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
self.logger.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
self.logger.warning('Channel %i was closed: (%s) %s', channel,
reply_code, reply_text)
self._connection.close()
def create_exchange(self, exchange_name, exchange_type, on_declareok):
"""
Declare/Create an exchange.
@param exchange_name: The name of the exchange
@type exchange_name: string
@param exchange_type: the type of the exchange
@type exchange_type:
"""
self.logger.debug('Declaring exchange {} [type={}]', exchange_name,
exchange_type)
cb = functools.partial(
on_declareok, userdata=exchange_name)
self._channel.exchange_declare(
exchange=exchange_name,
exchange_type=exchange_type,
callback=cb)
def create_queue(self, queue_name=''):
result = self._channel.queue_declare(exclusive=True, queue=queue_name)
queue_name = result.method.queue
self._queue_name = queue_name
self.logger.info("Created queue [{}]".format(queue_name))
return queue_name
def _bind_queue(self, exchange_name, queue_name, bind_key):
try:
self._channel.queue_bind(
exchange=exchange_name, queue=queue_name, routing_key=bind_key)
except Exception:
self.logger.exception()
def set_qos(self, on_ok):
"""This method sets up the consumer prefetch to only be delivered
one message at a time. The consumer must acknowledge this message
before RabbitMQ will deliver another one. You should experiment
with different prefetch values to achieve desired performance.
"""
self._channel.basic_qos(
prefetch_count=self._prefetch_count, callback=on_ok)
def __del__(self):
self._connection.close()
| MessageProperties | identifier_name |
path.rs | //! This module contains code for abstracting object locations that work
//! across different backing implementations and platforms.
use itertools::Itertools;
use percent_encoding::{percent_decode_str, percent_encode, AsciiSet, CONTROLS};
use std::path::PathBuf;
/// Universal interface for handling paths and locations for objects and
/// directories in the object store.
///
/// It allows IOx to be completely decoupled from the underlying object store
/// implementations.
///
/// Deliberately does not implement `Display` or `ToString`! Use one of the
/// converters.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Default)]
pub struct ObjectStorePath {
parts: Vec<PathPart>,
}
impl ObjectStorePath {
/// For use when receiving a path from an object store API directly, not
/// when building a path. Assumes DELIMITER is the separator.
///
/// TODO: Improve performance by implementing a CoW-type model to delay
/// parsing until needed TODO: This should only be available to cloud
/// storage
pub fn from_cloud_unchecked(path: impl Into<String>) -> Self {
let path = path.into();
Self {
parts: path
.split_terminator(DELIMITER)
.map(|s| PathPart(s.to_string()))
.collect(),
}
}
/// For use when receiving a path from a filesystem directly, not
/// when building a path. Uses the standard library's path splitting
/// implementation to separate into parts.
pub fn from_path_buf_unchecked(path: impl Into<PathBuf>) -> Self {
let path = path.into();
Self {
parts: path
.iter()
.flat_map(|s| s.to_os_string().into_string().map(PathPart))
.collect(),
}
}
/// Add a part to the end of the path, encoding any restricted characters.
pub fn push(&mut self, part: impl Into<String>) {
let part = part.into();
self.parts.push((&*part).into());
}
/// Add a `PathPart` to the end of the path. Infallible because the
/// `PathPart` should already have been checked for restricted
/// characters.
pub fn push_part(&mut self, part: &PathPart) {
self.parts.push(part.to_owned());
}
/// Add the parts of `ObjectStorePath` to the end of the path. Notably does
/// *not* behave as `PathBuf::push` does: no existing part of `self`
/// will be replaced as part of this call.
pub fn push_path(&mut self, path: &Self) {
self.parts.extend_from_slice(&path.parts);
}
/// Push a bunch of parts in one go.
pub fn push_all<'a>(&mut self, parts: impl AsRef<[&'a str]>) {
self.parts.extend(parts.as_ref().iter().map(|&v| v.into()));
}
/// Return the component parts of the path.
pub fn as_parts(&self) -> &[PathPart] {
self.parts.as_ref()
}
/// Pops a part from the path and returns it, or `None` if it's empty.
pub fn pop(&mut self) -> Option<&PathPart> {
unimplemented!()
}
/// Determines whether `prefix` is a prefix of `self`.
pub fn starts_with(&self, prefix: &Self) -> bool {
let diff = itertools::diff_with(self.parts.iter(), prefix.parts.iter(), |a, b| a == b);
match diff {
None => true,
Some(itertools::Diff::Shorter(..)) => true,
Some(itertools::Diff::FirstMismatch(_, mut remaining_self, mut remaining_prefix)) => {
let first_prefix = remaining_prefix.next().expect("must be at least one value");
// there must not be any other remaining parts in the prefix
remaining_prefix.next().is_none()
// and the next item in self must start with the last item in the prefix
&& remaining_self
.next()
.expect("must be at least one value")
.0
.starts_with(&first_prefix.0)
}
_ => false,
}
}
/// Returns delimiter-separated parts contained in `self` after `prefix`.
pub fn parts_after_prefix(&self, _prefix: &Self) -> &[PathPart] {
unimplemented!()
}
}
// TODO: I made these structs rather than functions because I could see
// `convert` being part of a trait, possibly, but that seemed a bit overly
// complex for now.
/// Converts `ObjectStorePath`s to `String`s that are appropriate for use as
/// locations in cloud storage.
#[derive(Debug, Clone, Copy)]
pub struct CloudConverter {}
impl CloudConverter {
/// Creates a cloud storage location by joining this `ObjectStorePath`'s
/// parts with `DELIMITER`
pub fn convert(object_store_path: &ObjectStorePath) -> String {
object_store_path.parts.iter().map(|p| &p.0).join(DELIMITER)
}
}
/// Converts `ObjectStorePath`s to `String`s that are appropriate for use as
/// locations in filesystem storage.
#[derive(Debug, Clone, Copy)]
pub struct | {}
impl FileConverter {
/// Creates a filesystem `PathBuf` location by using the standard library's
/// `PathBuf` building implementation appropriate for the current
/// platform.
pub fn convert(object_store_path: &ObjectStorePath) -> PathBuf {
object_store_path.parts.iter().map(|p| &p.0).collect()
}
}
/// The delimiter to separate object namespaces, creating a directory structure.
pub const DELIMITER: &str = "/";
// percent_encode's API needs this as a byte
const DELIMITER_BYTE: u8 = DELIMITER.as_bytes()[0];
/// The PathPart type exists to validate the directory/file names that form part
/// of a path.
///
/// A PathPart instance is guaranteed to contain no `/` characters as it can
/// only be constructed by going through the `try_from` impl.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Default)]
pub struct PathPart(String);
/// Characters we want to encode.
const INVALID: &AsciiSet = &CONTROLS
// The delimiter we are reserving for internal hierarchy
.add(DELIMITER_BYTE)
// Characters AWS recommends avoiding for object keys
// https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
.add(b'\\')
.add(b'{')
// TODO: Non-printable ASCII characters (128–255 decimal characters)
.add(b'^')
.add(b'}')
.add(b'%')
.add(b'`')
.add(b']')
.add(b'"') // " <-- my editor is confused about double quotes within single quotes
.add(b'>')
.add(b'[')
.add(b'~')
.add(b'<')
.add(b'#')
.add(b'|')
// Characters Google Cloud Storage recommends avoiding for object names
// https://cloud.google.com/storage/docs/naming-objects
.add(b'\r')
.add(b'\n')
.add(b'*')
.add(b'?');
impl From<&str> for PathPart {
fn from(v: &str) -> Self {
match v {
// We don't want to encode `.` generally, but we do want to disallow parts of paths
// to be equal to `.` or `..` to prevent file system traversal shenanigans.
"." => Self(String::from("%2E")),
".." => Self(String::from("%2E%2E")),
other => Self(percent_encode(other.as_bytes(), INVALID).to_string()),
}
}
}
impl std::fmt::Display for PathPart {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
percent_decode_str(&self.0)
.decode_utf8()
.expect("Valid UTF-8 that came from String")
.fmt(f)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn path_part_delimiter_gets_encoded() {
let part: PathPart = "foo/bar".into();
assert_eq!(part, PathPart(String::from("foo%2Fbar")));
}
#[test]
fn path_part_gets_decoded_for_display() {
let part: PathPart = "foo/bar".into();
assert_eq!(part.to_string(), "foo/bar");
}
#[test]
fn path_part_given_already_encoded_string() {
let part: PathPart = "foo%2Fbar".into();
assert_eq!(part, PathPart(String::from("foo%252Fbar")));
assert_eq!(part.to_string(), "foo%2Fbar");
}
#[test]
fn path_part_cant_be_one_dot() {
let part: PathPart = ".".into();
assert_eq!(part, PathPart(String::from("%2E")));
assert_eq!(part.to_string(), ".");
}
#[test]
fn path_part_cant_be_two_dots() {
let part: PathPart = "..".into();
assert_eq!(part, PathPart(String::from("%2E%2E")));
assert_eq!(part.to_string(), "..");
}
// Invariants to maintain/document/test:
//
// - always ends in DELIMITER if it's a directory. If it's the end object, it
// should have some sort of file extension like .parquet, .json, or .segment
// - does not contain unencoded DELIMITER
// - for file paths: does not escape root dir
// - for object storage: looks like directories
// - Paths that come from object stores directly don't need to be
// parsed/validated
// - Within a process, the same backing store will always be used
//
#[test]
fn cloud_prefix_no_trailing_delimiter_or_filename() {
// Use case: a file named `test_file.json` exists in object storage and it
// should be returned for a search on prefix `test`, so the prefix path
// should not get a trailing delimiter automatically added
let mut prefix = ObjectStorePath::default();
prefix.push("test");
let converted = CloudConverter::convert(&prefix);
assert_eq!(converted, "test");
}
#[test]
fn cloud_prefix_with_trailing_delimiter() {
// Use case: files exist in object storage named `foo/bar.json` and
// `foo_test.json`. A search for the prefix `foo/` should return
// `foo/bar.json` but not `foo_test.json'.
let mut prefix = ObjectStorePath::default();
prefix.push_all(&["test", ""]);
let converted = CloudConverter::convert(&prefix);
assert_eq!(converted, "test/");
}
#[test]
fn push_encodes() {
let mut location = ObjectStorePath::default();
location.push("foo/bar");
location.push("baz%2Ftest");
let converted = CloudConverter::convert(&location);
assert_eq!(converted, "foo%2Fbar/baz%252Ftest");
}
#[test]
fn push_all_encodes() {
let mut location = ObjectStorePath::default();
location.push_all(&["foo/bar", "baz%2Ftest"]);
let converted = CloudConverter::convert(&location);
assert_eq!(converted, "foo%2Fbar/baz%252Ftest");
}
#[test]
fn starts_with_parts() {
let mut haystack = ObjectStorePath::default();
haystack.push_all(&["foo/bar", "baz%2Ftest", "something"]);
assert!(
haystack.starts_with(&haystack),
"{:?} should have started with {:?}",
haystack,
haystack
);
let mut needle = haystack.clone();
needle.push("longer now");
assert!(
!haystack.starts_with(&needle),
"{:?} shouldn't have started with {:?}",
haystack,
needle
);
let mut needle = ObjectStorePath::default();
needle.push("foo/bar");
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
needle.push("baz%2Ftest");
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
let mut needle = ObjectStorePath::default();
needle.push("f");
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
needle.push("oo/bar");
assert!(
!haystack.starts_with(&needle),
"{:?} shouldn't have started with {:?}",
haystack,
needle
);
let mut needle = ObjectStorePath::default();
needle.push_all(&["foo/bar", "baz"]);
assert!(
haystack.starts_with(&needle),
"{:?} should have started with {:?}",
haystack,
needle
);
}
}
| FileConverter | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.