file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
view_test.go | package api
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/labstack/echo/v4"
"github.com/labstack/gommon/log"
"github.com/nsf/jsondiff"
"github.com/udovin/solve/config"
"github.com/udovin/solve/core"
"github.com/udovin/solve/db"
"github.com/udovin/solve/invoker"
"github.com/udovin/solve/migrations"
"github.com/udovin/solve/models"
)
type TestEnv struct {
tb testing.TB
checks *testCheckState
Core *core.Core
Server *httptest.Server
Client *testClient
Socket *testClient
Now time.Time
Rand *rand.Rand
}
func (e *TestEnv) SyncStores() {
ctx := context.Background()
if err := e.Core.Accounts.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Users.Sync(ctx); err != nil |
if err := e.Core.Sessions.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Roles.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.RoleEdges.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.AccountRoles.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Contests.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Problems.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Compilers.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Settings.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
}
func (e *TestEnv) CreateUserRoles(login string, roles ...string) error {
for _, role := range roles {
if _, err := e.Socket.CreateUserRole(context.Background(), login, role); err != nil {
return err
}
}
return nil
}
func (e *TestEnv) Check(data any) {
e.checks.Check(data)
}
func (e *TestEnv) Close() {
e.Server.Close()
e.Core.Stop()
_ = db.ApplyMigrations(context.Background(), e.Core.DB, "solve", migrations.Schema, db.WithZeroMigration)
_ = db.ApplyMigrations(context.Background(), e.Core.DB, "solve_data", migrations.Data, db.WithZeroMigration)
e.checks.Close()
}
func (e *TestEnv) WaitProblemUpdated(id int64) {
for {
if err := e.Core.Tasks.Sync(context.Background()); err != nil {
e.tb.Fatal("Error:", err)
}
tasks, err := e.Core.Tasks.FindByProblem(id)
if err != nil {
e.tb.Fatal("Error:", err)
}
if len(tasks) == 0 {
e.tb.Fatal("Empty problem tasks")
}
if tasks[0].Status == models.SucceededTask {
return
}
if tasks[0].Status == models.FailedTask {
e.tb.Fatalf("Task failed: %q", string(tasks[0].State))
}
time.Sleep(time.Second)
}
}
type TestEnvOption interface {
UpdateConfig(*config.Config)
Setup(*TestEnv) error
}
type WithInvoker struct{}
func (o WithInvoker) UpdateConfig(cfg *config.Config) {
cfg.Invoker = &config.Invoker{
Workers: 1,
Safeexec: config.Safeexec{
Path: "../safeexec/safeexec",
},
}
}
func (o WithInvoker) Setup(env *TestEnv) error {
return invoker.New(env.Core).Start()
}
func NewTestEnv(tb testing.TB, options ...TestEnvOption) *TestEnv {
env := TestEnv{
tb: tb,
checks: newTestCheckState(tb),
Now: time.Date(2020, 1, 1, 10, 0, 0, 0, time.UTC),
Rand: rand.New(rand.NewSource(42)),
}
cfg := config.Config{
DB: config.DB{
Options: config.SQLiteOptions{Path: ":memory:"},
},
Security: &config.Security{
PasswordSalt: "qwerty123",
},
Storage: &config.Storage{
Options: config.LocalStorageOptions{
FilesDir: tb.TempDir(),
},
},
}
if _, ok := tb.(*testing.B); ok || os.Getenv("TEST_ENABLE_LOGS") != "1" {
log.SetLevel(log.OFF)
cfg.LogLevel = config.LogLevel(log.OFF)
}
for _, option := range options {
option.UpdateConfig(&cfg)
}
if c, err := core.NewCore(cfg); err != nil {
tb.Fatal("Error:", err)
} else {
env.Core = c
}
env.Core.SetupAllStores()
ctx := context.Background()
_ = db.ApplyMigrations(ctx, env.Core.DB, "solve", migrations.Schema, db.WithZeroMigration)
_ = db.ApplyMigrations(ctx, env.Core.DB, "solve_data", migrations.Data, db.WithZeroMigration)
if err := db.ApplyMigrations(ctx, env.Core.DB, "solve", migrations.Schema); err != nil {
tb.Fatal("Error:", err)
}
if err := db.ApplyMigrations(ctx, env.Core.DB, "solve_data", migrations.Data); err != nil {
tb.Fatal("Error:", err)
}
if err := env.Core.Start(); err != nil {
tb.Fatal("Error:", err)
}
e := echo.New()
e.Logger = env.Core.Logger()
view := NewView(env.Core)
nowFn := func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
c.Set(nowKey, env.Now)
return next(c)
}
}
e.Use(nowFn)
view.Register(e.Group("/api"))
view.RegisterSocket(e.Group("/socket"))
view.StartDaemons()
env.Server = httptest.NewServer(e)
env.Client = newTestClient(env.Server.URL + "/api")
env.Socket = newTestClient(env.Server.URL + "/socket")
for _, option := range options {
option.Setup(&env)
}
return &env
}
type TestUser struct {
User
Password string
env *TestEnv
}
func (u *TestUser) LoginClient() {
_, err := u.env.Client.Login(context.Background(), u.User.Login, u.Password)
if err != nil {
u.env.tb.Fatal("Error:", err)
}
}
func (u *TestUser) LogoutClient() {
if err := u.env.Client.Logout(context.Background()); err != nil {
u.env.tb.Fatal("Error:", err)
}
}
func (u *TestUser) AddRoles(names ...string) {
if err := u.env.CreateUserRoles(u.User.Login, names...); err != nil {
u.env.tb.Fatal("Error:", err)
}
u.env.SyncStores()
}
func NewTestUser(e *TestEnv) *TestUser {
login := fmt.Sprintf("login-%d", e.Rand.Int31())
password := fmt.Sprintf("password-%d", e.Rand.Int63())
user, err := e.Client.Register(context.Background(), RegisterUserForm{
Login: login,
Email: login + "@example.com",
Password: password,
FirstName: "First",
LastName: "Last",
MiddleName: "Middle",
})
if err != nil {
e.tb.Fatal("Error:", err)
}
return &TestUser{
User: user,
Password: password,
env: e,
}
}
type testCheckState struct {
tb testing.TB
checks []json.RawMessage
pos int
reset bool
path string
}
func (s *testCheckState) Check(data any) {
raw, err := json.MarshalIndent(data, "", " ")
if err != nil {
s.tb.Fatal("Unable to marshal data:", data)
}
if s.pos > len(s.checks) {
s.tb.Fatalf("Invalid check position: %d", s.pos)
}
if s.pos == len(s.checks) {
if s.reset {
s.checks = append(s.checks, raw)
s.pos++
return
}
s.tb.Errorf("Unexpected check with data: %s", raw)
s.tb.Fatalf("Maybe you should use: TEST_RESET_DATA=1")
}
options := jsondiff.DefaultConsoleOptions()
diff, report := jsondiff.Compare(s.checks[s.pos], raw, &options)
if diff != jsondiff.FullMatch {
if s.reset {
s.checks[s.pos] = raw
s.pos++
return
}
s.tb.Errorf("Unexpected result difference: %s", report)
s.tb.Fatalf("Maybe you should use: TEST_RESET_DATA=1")
}
s.pos++
}
func (s *testCheckState) Close() {
if s.reset {
if s.pos == 0 {
_ = os.Remove(s.path)
return
}
raw, err := json.MarshalIndent(s.checks, "", " ")
if err != nil {
s.tb.Fatal("Unable to marshal test data:", err)
}
if err := os.WriteFile(
s.path, raw, os.ModePerm,
); err != nil {
s.tb.Fatal("Error:", err)
}
}
}
func newTestCheckState(tb testing.TB) *testCheckState {
state := testCheckState{
tb: tb,
reset: os.Getenv("TEST_RESET_DATA") == "1",
path: filepath.Join("testdata", tb.Name()+".json"),
}
if !state.reset {
file, err := os.Open(state.path)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
tb.Fatal("Error:", err)
}
} else {
defer file.Close()
if err := json.NewDecoder(file).Decode(&state.checks); err != nil {
tb.Fatal("Error:", err)
}
}
}
return &state
}
type testClient struct {
*Client
}
type testJar struct {
mutex sync.Mutex
cookies map[string]*http.Cookie
}
func (j *testJar) Cookies(*url.URL) []*http.Cookie {
j.mutex.Lock()
defer j.mutex.Unlock()
var cookies []*http.Cookie
for _, cookie := range j.cookies {
cookies = append(cookies, cookie)
}
return cookies
}
func (j *testJar) SetCookies(u *url.URL, cookies []*http.Cookie) {
j.mutex.Lock()
defer j.mutex.Unlock()
if j.cookies == nil {
j.cookies = map[string]*http.Cookie{}
}
for _, cookie := range cookies {
j.cookies[cookie.Name] = cookie
}
}
func newTestClient(endpoint string) *testClient {
client := NewClient(endpoint)
client.client.Jar = &testJar{}
client.Headers = map[string]string{"X-Solve-Sync": "1"}
return &testClient{client}
}
func (c *testClient) Status() (Status, error) {
req, err := http.NewRequest(http.MethodGet, c.getURL("/v0/status"), nil)
if err != nil {
return Status{}, err
}
var respData Status
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) ObserveUser(login string) (User, error) {
req, err := http.NewRequest(
http.MethodGet, c.getURL("/v0/users/%s", login), nil,
)
if err != nil {
return User{}, err
}
var respData User
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) ObserveContests() (Contests, error) {
req, err := http.NewRequest(
http.MethodGet, c.getURL("/v0/contests"), nil,
)
if err != nil {
return Contests{}, err
}
var respData Contests
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) CreateContest(form createContestForm) (Contest, error) {
data, err := json.Marshal(form)
if err != nil {
return Contest{}, err
}
req, err := http.NewRequest(
http.MethodPost, c.getURL("/v0/contests"),
bytes.NewReader(data),
)
if err != nil {
return Contest{}, err
}
var respData Contest
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) CreateContestProblem(
contestID int64,
form createContestProblemForm,
) (ContestProblem, error) {
data, err := json.Marshal(form)
if err != nil {
return ContestProblem{}, err
}
req, err := http.NewRequest(
http.MethodPost,
c.getURL("/v0/contests/%d/problems", contestID),
bytes.NewReader(data),
)
if err != nil {
return ContestProblem{}, err
}
var respData ContestProblem
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) CreateRoleRole(role string, child string) (Role, error) {
req, err := http.NewRequest(
http.MethodPost, c.getURL("/v0/roles/%s/roles/%s", role, child),
nil,
)
if err != nil {
return Role{}, err
}
var respData Role
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) DeleteRoleRole(role string, child string) (Role, error) {
req, err := http.NewRequest(
http.MethodDelete, c.getURL("/v0/roles/%s/roles/%s", role, child),
nil,
)
if err != nil {
return Role{}, err
}
var respData Role
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func TestPing(t *testing.T) {
e := NewTestEnv(t)
defer e.Close()
if err := e.Client.Ping(context.Background()); err != nil {
t.Fatal("Error:", err)
}
}
func TestHealth(t *testing.T) {
e := NewTestEnv(t)
defer e.Close()
if err := e.Client.Health(context.Background()); err != nil {
t.Fatal("Error:", err)
}
}
func TestHealthUnhealthy(t *testing.T) {
e := NewTestEnv(t)
defer e.Close()
if err := e.Core.DB.Close(); err != nil {
t.Fatal("Error:", err)
}
err := e.Client.Health(context.Background())
resp, ok := err.(statusCodeResponse)
if !ok {
t.Fatal("Invalid error:", err)
}
expectStatus(t, http.StatusInternalServerError, resp.StatusCode())
}
func expectStatus(tb testing.TB, expected, got int) {
if got != expected {
tb.Fatalf("Expected %v, got %v", expected, got)
}
}
| {
e.tb.Fatal("Error:", err)
} | conditional_block |
view_test.go | package api
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/labstack/echo/v4"
"github.com/labstack/gommon/log"
"github.com/nsf/jsondiff"
"github.com/udovin/solve/config"
"github.com/udovin/solve/core"
"github.com/udovin/solve/db"
"github.com/udovin/solve/invoker"
"github.com/udovin/solve/migrations"
"github.com/udovin/solve/models"
)
type TestEnv struct {
tb testing.TB
checks *testCheckState
Core *core.Core
Server *httptest.Server
Client *testClient
Socket *testClient
Now time.Time
Rand *rand.Rand
}
func (e *TestEnv) SyncStores() {
ctx := context.Background()
if err := e.Core.Accounts.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Users.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Sessions.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Roles.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.RoleEdges.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.AccountRoles.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Contests.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Problems.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Compilers.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Settings.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
}
func (e *TestEnv) CreateUserRoles(login string, roles ...string) error {
for _, role := range roles {
if _, err := e.Socket.CreateUserRole(context.Background(), login, role); err != nil {
return err
}
}
return nil
}
func (e *TestEnv) Check(data any) {
e.checks.Check(data)
}
func (e *TestEnv) Close() {
e.Server.Close()
e.Core.Stop()
_ = db.ApplyMigrations(context.Background(), e.Core.DB, "solve", migrations.Schema, db.WithZeroMigration)
_ = db.ApplyMigrations(context.Background(), e.Core.DB, "solve_data", migrations.Data, db.WithZeroMigration)
e.checks.Close()
}
func (e *TestEnv) WaitProblemUpdated(id int64) {
for {
if err := e.Core.Tasks.Sync(context.Background()); err != nil {
e.tb.Fatal("Error:", err)
}
tasks, err := e.Core.Tasks.FindByProblem(id)
if err != nil {
e.tb.Fatal("Error:", err)
}
if len(tasks) == 0 {
e.tb.Fatal("Empty problem tasks")
}
if tasks[0].Status == models.SucceededTask {
return
}
if tasks[0].Status == models.FailedTask {
e.tb.Fatalf("Task failed: %q", string(tasks[0].State))
}
time.Sleep(time.Second)
}
}
type TestEnvOption interface {
UpdateConfig(*config.Config)
Setup(*TestEnv) error
}
type WithInvoker struct{}
func (o WithInvoker) UpdateConfig(cfg *config.Config) {
cfg.Invoker = &config.Invoker{
Workers: 1,
Safeexec: config.Safeexec{
Path: "../safeexec/safeexec",
},
}
}
func (o WithInvoker) Setup(env *TestEnv) error {
return invoker.New(env.Core).Start()
}
func NewTestEnv(tb testing.TB, options ...TestEnvOption) *TestEnv {
env := TestEnv{
tb: tb,
checks: newTestCheckState(tb),
Now: time.Date(2020, 1, 1, 10, 0, 0, 0, time.UTC),
Rand: rand.New(rand.NewSource(42)),
}
cfg := config.Config{
DB: config.DB{
Options: config.SQLiteOptions{Path: ":memory:"},
},
Security: &config.Security{
PasswordSalt: "qwerty123",
},
Storage: &config.Storage{
Options: config.LocalStorageOptions{
FilesDir: tb.TempDir(),
},
},
}
if _, ok := tb.(*testing.B); ok || os.Getenv("TEST_ENABLE_LOGS") != "1" {
log.SetLevel(log.OFF)
cfg.LogLevel = config.LogLevel(log.OFF)
}
for _, option := range options {
option.UpdateConfig(&cfg)
}
if c, err := core.NewCore(cfg); err != nil {
tb.Fatal("Error:", err)
} else {
env.Core = c
}
env.Core.SetupAllStores()
ctx := context.Background()
_ = db.ApplyMigrations(ctx, env.Core.DB, "solve", migrations.Schema, db.WithZeroMigration)
_ = db.ApplyMigrations(ctx, env.Core.DB, "solve_data", migrations.Data, db.WithZeroMigration)
if err := db.ApplyMigrations(ctx, env.Core.DB, "solve", migrations.Schema); err != nil {
tb.Fatal("Error:", err)
}
if err := db.ApplyMigrations(ctx, env.Core.DB, "solve_data", migrations.Data); err != nil {
tb.Fatal("Error:", err)
}
if err := env.Core.Start(); err != nil {
tb.Fatal("Error:", err)
}
e := echo.New()
e.Logger = env.Core.Logger()
view := NewView(env.Core)
nowFn := func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
c.Set(nowKey, env.Now)
return next(c)
}
}
e.Use(nowFn)
view.Register(e.Group("/api"))
view.RegisterSocket(e.Group("/socket"))
view.StartDaemons()
env.Server = httptest.NewServer(e)
env.Client = newTestClient(env.Server.URL + "/api")
env.Socket = newTestClient(env.Server.URL + "/socket")
for _, option := range options {
option.Setup(&env)
}
return &env
}
type TestUser struct {
User
Password string
env *TestEnv
}
func (u *TestUser) LoginClient() {
_, err := u.env.Client.Login(context.Background(), u.User.Login, u.Password)
if err != nil {
u.env.tb.Fatal("Error:", err)
}
}
func (u *TestUser) LogoutClient() {
if err := u.env.Client.Logout(context.Background()); err != nil {
u.env.tb.Fatal("Error:", err)
}
}
func (u *TestUser) AddRoles(names ...string) {
if err := u.env.CreateUserRoles(u.User.Login, names...); err != nil {
u.env.tb.Fatal("Error:", err)
}
u.env.SyncStores()
}
func NewTestUser(e *TestEnv) *TestUser {
login := fmt.Sprintf("login-%d", e.Rand.Int31())
password := fmt.Sprintf("password-%d", e.Rand.Int63())
user, err := e.Client.Register(context.Background(), RegisterUserForm{
Login: login,
Email: login + "@example.com",
Password: password,
FirstName: "First",
LastName: "Last",
MiddleName: "Middle",
})
if err != nil { | e.tb.Fatal("Error:", err)
}
return &TestUser{
User: user,
Password: password,
env: e,
}
}
type testCheckState struct {
tb testing.TB
checks []json.RawMessage
pos int
reset bool
path string
}
func (s *testCheckState) Check(data any) {
raw, err := json.MarshalIndent(data, "", " ")
if err != nil {
s.tb.Fatal("Unable to marshal data:", data)
}
if s.pos > len(s.checks) {
s.tb.Fatalf("Invalid check position: %d", s.pos)
}
if s.pos == len(s.checks) {
if s.reset {
s.checks = append(s.checks, raw)
s.pos++
return
}
s.tb.Errorf("Unexpected check with data: %s", raw)
s.tb.Fatalf("Maybe you should use: TEST_RESET_DATA=1")
}
options := jsondiff.DefaultConsoleOptions()
diff, report := jsondiff.Compare(s.checks[s.pos], raw, &options)
if diff != jsondiff.FullMatch {
if s.reset {
s.checks[s.pos] = raw
s.pos++
return
}
s.tb.Errorf("Unexpected result difference: %s", report)
s.tb.Fatalf("Maybe you should use: TEST_RESET_DATA=1")
}
s.pos++
}
func (s *testCheckState) Close() {
if s.reset {
if s.pos == 0 {
_ = os.Remove(s.path)
return
}
raw, err := json.MarshalIndent(s.checks, "", " ")
if err != nil {
s.tb.Fatal("Unable to marshal test data:", err)
}
if err := os.WriteFile(
s.path, raw, os.ModePerm,
); err != nil {
s.tb.Fatal("Error:", err)
}
}
}
func newTestCheckState(tb testing.TB) *testCheckState {
state := testCheckState{
tb: tb,
reset: os.Getenv("TEST_RESET_DATA") == "1",
path: filepath.Join("testdata", tb.Name()+".json"),
}
if !state.reset {
file, err := os.Open(state.path)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
tb.Fatal("Error:", err)
}
} else {
defer file.Close()
if err := json.NewDecoder(file).Decode(&state.checks); err != nil {
tb.Fatal("Error:", err)
}
}
}
return &state
}
type testClient struct {
*Client
}
type testJar struct {
mutex sync.Mutex
cookies map[string]*http.Cookie
}
func (j *testJar) Cookies(*url.URL) []*http.Cookie {
j.mutex.Lock()
defer j.mutex.Unlock()
var cookies []*http.Cookie
for _, cookie := range j.cookies {
cookies = append(cookies, cookie)
}
return cookies
}
func (j *testJar) SetCookies(u *url.URL, cookies []*http.Cookie) {
j.mutex.Lock()
defer j.mutex.Unlock()
if j.cookies == nil {
j.cookies = map[string]*http.Cookie{}
}
for _, cookie := range cookies {
j.cookies[cookie.Name] = cookie
}
}
func newTestClient(endpoint string) *testClient {
client := NewClient(endpoint)
client.client.Jar = &testJar{}
client.Headers = map[string]string{"X-Solve-Sync": "1"}
return &testClient{client}
}
func (c *testClient) Status() (Status, error) {
req, err := http.NewRequest(http.MethodGet, c.getURL("/v0/status"), nil)
if err != nil {
return Status{}, err
}
var respData Status
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) ObserveUser(login string) (User, error) {
req, err := http.NewRequest(
http.MethodGet, c.getURL("/v0/users/%s", login), nil,
)
if err != nil {
return User{}, err
}
var respData User
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) ObserveContests() (Contests, error) {
req, err := http.NewRequest(
http.MethodGet, c.getURL("/v0/contests"), nil,
)
if err != nil {
return Contests{}, err
}
var respData Contests
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) CreateContest(form createContestForm) (Contest, error) {
data, err := json.Marshal(form)
if err != nil {
return Contest{}, err
}
req, err := http.NewRequest(
http.MethodPost, c.getURL("/v0/contests"),
bytes.NewReader(data),
)
if err != nil {
return Contest{}, err
}
var respData Contest
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) CreateContestProblem(
contestID int64,
form createContestProblemForm,
) (ContestProblem, error) {
data, err := json.Marshal(form)
if err != nil {
return ContestProblem{}, err
}
req, err := http.NewRequest(
http.MethodPost,
c.getURL("/v0/contests/%d/problems", contestID),
bytes.NewReader(data),
)
if err != nil {
return ContestProblem{}, err
}
var respData ContestProblem
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) CreateRoleRole(role string, child string) (Role, error) {
req, err := http.NewRequest(
http.MethodPost, c.getURL("/v0/roles/%s/roles/%s", role, child),
nil,
)
if err != nil {
return Role{}, err
}
var respData Role
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) DeleteRoleRole(role string, child string) (Role, error) {
req, err := http.NewRequest(
http.MethodDelete, c.getURL("/v0/roles/%s/roles/%s", role, child),
nil,
)
if err != nil {
return Role{}, err
}
var respData Role
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func TestPing(t *testing.T) {
e := NewTestEnv(t)
defer e.Close()
if err := e.Client.Ping(context.Background()); err != nil {
t.Fatal("Error:", err)
}
}
func TestHealth(t *testing.T) {
e := NewTestEnv(t)
defer e.Close()
if err := e.Client.Health(context.Background()); err != nil {
t.Fatal("Error:", err)
}
}
func TestHealthUnhealthy(t *testing.T) {
e := NewTestEnv(t)
defer e.Close()
if err := e.Core.DB.Close(); err != nil {
t.Fatal("Error:", err)
}
err := e.Client.Health(context.Background())
resp, ok := err.(statusCodeResponse)
if !ok {
t.Fatal("Invalid error:", err)
}
expectStatus(t, http.StatusInternalServerError, resp.StatusCode())
}
func expectStatus(tb testing.TB, expected, got int) {
if got != expected {
tb.Fatalf("Expected %v, got %v", expected, got)
}
} | random_line_split | |
view_test.go | package api
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/labstack/echo/v4"
"github.com/labstack/gommon/log"
"github.com/nsf/jsondiff"
"github.com/udovin/solve/config"
"github.com/udovin/solve/core"
"github.com/udovin/solve/db"
"github.com/udovin/solve/invoker"
"github.com/udovin/solve/migrations"
"github.com/udovin/solve/models"
)
type TestEnv struct {
tb testing.TB
checks *testCheckState
Core *core.Core
Server *httptest.Server
Client *testClient
Socket *testClient
Now time.Time
Rand *rand.Rand
}
func (e *TestEnv) SyncStores() {
ctx := context.Background()
if err := e.Core.Accounts.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Users.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Sessions.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Roles.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.RoleEdges.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.AccountRoles.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Contests.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Problems.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Compilers.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Settings.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
}
func (e *TestEnv) CreateUserRoles(login string, roles ...string) error {
for _, role := range roles {
if _, err := e.Socket.CreateUserRole(context.Background(), login, role); err != nil {
return err
}
}
return nil
}
func (e *TestEnv) Check(data any) {
e.checks.Check(data)
}
func (e *TestEnv) Close() {
e.Server.Close()
e.Core.Stop()
_ = db.ApplyMigrations(context.Background(), e.Core.DB, "solve", migrations.Schema, db.WithZeroMigration)
_ = db.ApplyMigrations(context.Background(), e.Core.DB, "solve_data", migrations.Data, db.WithZeroMigration)
e.checks.Close()
}
func (e *TestEnv) WaitProblemUpdated(id int64) {
for {
if err := e.Core.Tasks.Sync(context.Background()); err != nil {
e.tb.Fatal("Error:", err)
}
tasks, err := e.Core.Tasks.FindByProblem(id)
if err != nil {
e.tb.Fatal("Error:", err)
}
if len(tasks) == 0 {
e.tb.Fatal("Empty problem tasks")
}
if tasks[0].Status == models.SucceededTask {
return
}
if tasks[0].Status == models.FailedTask {
e.tb.Fatalf("Task failed: %q", string(tasks[0].State))
}
time.Sleep(time.Second)
}
}
type TestEnvOption interface {
UpdateConfig(*config.Config)
Setup(*TestEnv) error
}
type WithInvoker struct{}
func (o WithInvoker) UpdateConfig(cfg *config.Config) {
cfg.Invoker = &config.Invoker{
Workers: 1,
Safeexec: config.Safeexec{
Path: "../safeexec/safeexec",
},
}
}
func (o WithInvoker) Setup(env *TestEnv) error {
return invoker.New(env.Core).Start()
}
func NewTestEnv(tb testing.TB, options ...TestEnvOption) *TestEnv {
env := TestEnv{
tb: tb,
checks: newTestCheckState(tb),
Now: time.Date(2020, 1, 1, 10, 0, 0, 0, time.UTC),
Rand: rand.New(rand.NewSource(42)),
}
cfg := config.Config{
DB: config.DB{
Options: config.SQLiteOptions{Path: ":memory:"},
},
Security: &config.Security{
PasswordSalt: "qwerty123",
},
Storage: &config.Storage{
Options: config.LocalStorageOptions{
FilesDir: tb.TempDir(),
},
},
}
if _, ok := tb.(*testing.B); ok || os.Getenv("TEST_ENABLE_LOGS") != "1" {
log.SetLevel(log.OFF)
cfg.LogLevel = config.LogLevel(log.OFF)
}
for _, option := range options {
option.UpdateConfig(&cfg)
}
if c, err := core.NewCore(cfg); err != nil {
tb.Fatal("Error:", err)
} else {
env.Core = c
}
env.Core.SetupAllStores()
ctx := context.Background()
_ = db.ApplyMigrations(ctx, env.Core.DB, "solve", migrations.Schema, db.WithZeroMigration)
_ = db.ApplyMigrations(ctx, env.Core.DB, "solve_data", migrations.Data, db.WithZeroMigration)
if err := db.ApplyMigrations(ctx, env.Core.DB, "solve", migrations.Schema); err != nil {
tb.Fatal("Error:", err)
}
if err := db.ApplyMigrations(ctx, env.Core.DB, "solve_data", migrations.Data); err != nil {
tb.Fatal("Error:", err)
}
if err := env.Core.Start(); err != nil {
tb.Fatal("Error:", err)
}
e := echo.New()
e.Logger = env.Core.Logger()
view := NewView(env.Core)
nowFn := func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
c.Set(nowKey, env.Now)
return next(c)
}
}
e.Use(nowFn)
view.Register(e.Group("/api"))
view.RegisterSocket(e.Group("/socket"))
view.StartDaemons()
env.Server = httptest.NewServer(e)
env.Client = newTestClient(env.Server.URL + "/api")
env.Socket = newTestClient(env.Server.URL + "/socket")
for _, option := range options {
option.Setup(&env)
}
return &env
}
type TestUser struct {
User
Password string
env *TestEnv
}
func (u *TestUser) LoginClient() {
_, err := u.env.Client.Login(context.Background(), u.User.Login, u.Password)
if err != nil {
u.env.tb.Fatal("Error:", err)
}
}
func (u *TestUser) LogoutClient() {
if err := u.env.Client.Logout(context.Background()); err != nil {
u.env.tb.Fatal("Error:", err)
}
}
func (u *TestUser) AddRoles(names ...string) {
if err := u.env.CreateUserRoles(u.User.Login, names...); err != nil {
u.env.tb.Fatal("Error:", err)
}
u.env.SyncStores()
}
func NewTestUser(e *TestEnv) *TestUser {
login := fmt.Sprintf("login-%d", e.Rand.Int31())
password := fmt.Sprintf("password-%d", e.Rand.Int63())
user, err := e.Client.Register(context.Background(), RegisterUserForm{
Login: login,
Email: login + "@example.com",
Password: password,
FirstName: "First",
LastName: "Last",
MiddleName: "Middle",
})
if err != nil {
e.tb.Fatal("Error:", err)
}
return &TestUser{
User: user,
Password: password,
env: e,
}
}
type testCheckState struct {
tb testing.TB
checks []json.RawMessage
pos int
reset bool
path string
}
func (s *testCheckState) Check(data any) {
raw, err := json.MarshalIndent(data, "", " ")
if err != nil {
s.tb.Fatal("Unable to marshal data:", data)
}
if s.pos > len(s.checks) {
s.tb.Fatalf("Invalid check position: %d", s.pos)
}
if s.pos == len(s.checks) {
if s.reset {
s.checks = append(s.checks, raw)
s.pos++
return
}
s.tb.Errorf("Unexpected check with data: %s", raw)
s.tb.Fatalf("Maybe you should use: TEST_RESET_DATA=1")
}
options := jsondiff.DefaultConsoleOptions()
diff, report := jsondiff.Compare(s.checks[s.pos], raw, &options)
if diff != jsondiff.FullMatch {
if s.reset {
s.checks[s.pos] = raw
s.pos++
return
}
s.tb.Errorf("Unexpected result difference: %s", report)
s.tb.Fatalf("Maybe you should use: TEST_RESET_DATA=1")
}
s.pos++
}
func (s *testCheckState) Close() {
if s.reset {
if s.pos == 0 {
_ = os.Remove(s.path)
return
}
raw, err := json.MarshalIndent(s.checks, "", " ")
if err != nil {
s.tb.Fatal("Unable to marshal test data:", err)
}
if err := os.WriteFile(
s.path, raw, os.ModePerm,
); err != nil {
s.tb.Fatal("Error:", err)
}
}
}
func newTestCheckState(tb testing.TB) *testCheckState {
state := testCheckState{
tb: tb,
reset: os.Getenv("TEST_RESET_DATA") == "1",
path: filepath.Join("testdata", tb.Name()+".json"),
}
if !state.reset {
file, err := os.Open(state.path)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
tb.Fatal("Error:", err)
}
} else {
defer file.Close()
if err := json.NewDecoder(file).Decode(&state.checks); err != nil {
tb.Fatal("Error:", err)
}
}
}
return &state
}
type testClient struct {
*Client
}
type testJar struct {
mutex sync.Mutex
cookies map[string]*http.Cookie
}
func (j *testJar) Cookies(*url.URL) []*http.Cookie {
j.mutex.Lock()
defer j.mutex.Unlock()
var cookies []*http.Cookie
for _, cookie := range j.cookies {
cookies = append(cookies, cookie)
}
return cookies
}
func (j *testJar) SetCookies(u *url.URL, cookies []*http.Cookie) {
j.mutex.Lock()
defer j.mutex.Unlock()
if j.cookies == nil {
j.cookies = map[string]*http.Cookie{}
}
for _, cookie := range cookies {
j.cookies[cookie.Name] = cookie
}
}
func newTestClient(endpoint string) *testClient {
client := NewClient(endpoint)
client.client.Jar = &testJar{}
client.Headers = map[string]string{"X-Solve-Sync": "1"}
return &testClient{client}
}
func (c *testClient) Status() (Status, error) {
req, err := http.NewRequest(http.MethodGet, c.getURL("/v0/status"), nil)
if err != nil {
return Status{}, err
}
var respData Status
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) ObserveUser(login string) (User, error) {
req, err := http.NewRequest(
http.MethodGet, c.getURL("/v0/users/%s", login), nil,
)
if err != nil {
return User{}, err
}
var respData User
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) ObserveContests() (Contests, error) {
req, err := http.NewRequest(
http.MethodGet, c.getURL("/v0/contests"), nil,
)
if err != nil {
return Contests{}, err
}
var respData Contests
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) CreateContest(form createContestForm) (Contest, error) {
data, err := json.Marshal(form)
if err != nil {
return Contest{}, err
}
req, err := http.NewRequest(
http.MethodPost, c.getURL("/v0/contests"),
bytes.NewReader(data),
)
if err != nil {
return Contest{}, err
}
var respData Contest
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) CreateContestProblem(
contestID int64,
form createContestProblemForm,
) (ContestProblem, error) {
data, err := json.Marshal(form)
if err != nil {
return ContestProblem{}, err
}
req, err := http.NewRequest(
http.MethodPost,
c.getURL("/v0/contests/%d/problems", contestID),
bytes.NewReader(data),
)
if err != nil {
return ContestProblem{}, err
}
var respData ContestProblem
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) CreateRoleRole(role string, child string) (Role, error) {
req, err := http.NewRequest(
http.MethodPost, c.getURL("/v0/roles/%s/roles/%s", role, child),
nil,
)
if err != nil {
return Role{}, err
}
var respData Role
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) DeleteRoleRole(role string, child string) (Role, error) {
req, err := http.NewRequest(
http.MethodDelete, c.getURL("/v0/roles/%s/roles/%s", role, child),
nil,
)
if err != nil {
return Role{}, err
}
var respData Role
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func TestPing(t *testing.T) {
e := NewTestEnv(t)
defer e.Close()
if err := e.Client.Ping(context.Background()); err != nil {
t.Fatal("Error:", err)
}
}
func | (t *testing.T) {
e := NewTestEnv(t)
defer e.Close()
if err := e.Client.Health(context.Background()); err != nil {
t.Fatal("Error:", err)
}
}
func TestHealthUnhealthy(t *testing.T) {
e := NewTestEnv(t)
defer e.Close()
if err := e.Core.DB.Close(); err != nil {
t.Fatal("Error:", err)
}
err := e.Client.Health(context.Background())
resp, ok := err.(statusCodeResponse)
if !ok {
t.Fatal("Invalid error:", err)
}
expectStatus(t, http.StatusInternalServerError, resp.StatusCode())
}
func expectStatus(tb testing.TB, expected, got int) {
if got != expected {
tb.Fatalf("Expected %v, got %v", expected, got)
}
}
| TestHealth | identifier_name |
client.go | /*
* The MIT License
*
* Copyright (c) 2017 Long Nguyen
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package cfclient
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// Client used to communicate with Cloud Foundry
type Client struct {
Config Config
Endpoint Endpoints
}
type Endpoints struct {
Links Links `json:"links"`
}
type Links struct {
AuthEndpoint EndpointUrl `json:"login"`
TokenEndpoint EndpointUrl `json:"uaa"`
}
type EndpointUrl struct {
URL string `json:"href"`
}
// Config is used to configure the creation of a client
type Config struct {
ApiAddress string `json:"api_url"`
Username string `json:"user"`
Password string `json:"password"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
SkipSslValidation bool `json:"skip_ssl_validation"`
HttpClient *http.Client
Token string `json:"auth_token"`
TokenSource oauth2.TokenSource
tokenSourceDeadline *time.Time
UserAgent string `json:"user_agent"`
Origin string `json:"-"`
}
type LoginHint struct {
Origin string `json:"origin"`
}
// Request is used to help build up a request
type Request struct {
method string
url string
params url.Values
body io.Reader
obj interface{}
}
// NewClient returns a new client
func NewClient(config *Config) (client *Client, err error) {
// bootstrap the config
defConfig := DefaultConfig()
if len(config.ApiAddress) == 0 {
config.ApiAddress = defConfig.ApiAddress
}
if len(config.Username) == 0 {
config.Username = defConfig.Username
}
if len(config.Password) == 0 {
config.Password = defConfig.Password
}
if len(config.Token) == 0 {
config.Token = defConfig.Token
}
if len(config.UserAgent) == 0 {
config.UserAgent = defConfig.UserAgent
}
if config.HttpClient == nil {
config.HttpClient = defConfig.HttpClient
}
if config.HttpClient.Transport == nil {
config.HttpClient.Transport = shallowDefaultTransport()
}
var tp *http.Transport
switch t := config.HttpClient.Transport.(type) {
case *http.Transport:
tp = t
case *oauth2.Transport:
if bt, ok := t.Base.(*http.Transport); ok {
tp = bt
}
}
if tp != nil {
if tp.TLSClientConfig == nil {
tp.TLSClientConfig = &tls.Config{}
}
tp.TLSClientConfig.InsecureSkipVerify = config.SkipSslValidation
}
config.ApiAddress = strings.TrimRight(config.ApiAddress, "/")
client = &Client{
Config: *config,
}
if err := client.refreshEndpoint(); err != nil {
return nil, err
}
return client, nil
}
// DefaultConfig creates a default config object used by CF client
func DefaultConfig() *Config {
return &Config{
ApiAddress: "http://api.bosh-lite.com",
Username: "admin",
Password: "admin",
Token: "",
SkipSslValidation: false,
HttpClient: http.DefaultClient,
UserAgent: "SM-CF-client/1.0",
}
}
// NewRequest is used to create a new Request
func (c *Client) NewRequest(method, path string) *Request {
requestUrl := path
if !strings.HasPrefix(path, "http") {
requestUrl = c.Config.ApiAddress + path
}
r := &Request{
method: method,
url: requestUrl,
params: make(map[string][]string),
}
return r
}
// NewRequestWithBody is used to create a new request with
func (c *Client) NewRequestWithBody(method, path string, body io.Reader) *Request {
r := c.NewRequest(method, path)
r.body = body
return r
}
// DoRequest runs a request with our client
func (c *Client) DoRequest(r *Request) (*http.Response, error) {
req, err := r.toHTTP()
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.Config.UserAgent)
if req.Body != nil && req.Header.Get("Content-type") == "" {
req.Header.Set("Content-type", "application/json")
}
resp, err := c.Config.HttpClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= http.StatusBadRequest {
return c.handleError(resp)
}
return resp, nil
}
func (c *Client) refreshEndpoint() error {
// we want to keep the Timeout value from config.HttpClient
timeout := c.Config.HttpClient.Timeout
ctx := context.Background()
ctx = context.WithValue(ctx, oauth2.HTTPClient, c.Config.HttpClient)
endpoint, err := getInfo(c.Config.ApiAddress, oauth2.NewClient(ctx, nil))
if err != nil {
return errors.Wrap(err, "Could not get endpoints from the root call")
}
switch {
case c.Config.Token != "":
c.Config = getUserTokenAuth(ctx, c.Config, endpoint)
case c.Config.ClientID != "":
c.Config = getClientAuth(ctx, c.Config, endpoint)
default:
c.Config, err = getUserAuth(ctx, c.Config, endpoint)
if err != nil {
return err
}
}
// make sure original Timeout value will be used
if c.Config.HttpClient.Timeout != timeout {
c.Config.HttpClient.Timeout = timeout
}
c.Endpoint = *endpoint
return nil
}
// getUserTokenAuth initializes client credentials from existing bearer token.
func getUserTokenAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
// Token is expected to have no "bearer" prefix
token := &oauth2.Token{
AccessToken: config.Token,
TokenType: "Bearer"}
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config
}
func getClientAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &clientcredentials.Config{
ClientID: config.ClientID,
ClientSecret: config.ClientSecret,
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
}
config.TokenSource = authConfig.TokenSource(ctx)
config.HttpClient = authConfig.Client(ctx)
return config
}
func getUserAuth(ctx context.Context, config Config, endpoints *Endpoints) (Config, error) {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
if config.Origin != "" {
loginHint := LoginHint{config.Origin}
origin, err := json.Marshal(loginHint)
if err != nil {
return config, errors.Wrap(err, "Error creating login_hint")
}
val := url.Values{}
val.Set("login_hint", string(origin))
authConfig.Endpoint.TokenURL = fmt.Sprintf("%s?%s", authConfig.Endpoint.TokenURL, val.Encode())
}
token, err := authConfig.PasswordCredentialsToken(ctx, config.Username, config.Password)
if err != nil {
return config, errors.Wrap(err, "Error getting token")
}
config.tokenSourceDeadline = &token.Expiry
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config, err
}
func getInfo(api string, httpClient *http.Client) (*Endpoints, error) {
var endpoints Endpoints
if api == "" {
return nil, fmt.Errorf("CF ApiAddress cannot be empty")
}
resp, err := httpClient.Get(api + "/")
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = decodeBody(resp, &endpoints)
if err != nil {
return nil, err
}
return &endpoints, err
}
func shallowDefaultTransport() *http.Transport {
defaultTransport := http.DefaultTransport.(*http.Transport)
return &http.Transport{
Proxy: defaultTransport.Proxy,
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
}
}
// decodeBody is used to JSON decode a body
func decodeBody(resp *http.Response, out interface{}) error {
defer resp.Body.Close()
dec := json.NewDecoder(resp.Body)
return dec.Decode(out)
}
// encodeBody is used to encode a request body
func encodeBody(obj interface{}) (io.Reader, error) {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
if err := enc.Encode(obj); err != nil {
return nil, err
}
return buf, nil
}
// toHTTP converts the request to an HTTP Request
func (r *Request) toHTTP() (*http.Request, error) {
// Check if we should encode the body
if r.body == nil && r.obj != nil {
b, err := encodeBody(r.obj)
if err != nil {
return nil, err
}
r.body = b | return http.NewRequest(r.method, r.url, r.body)
}
func (c *Client) handleError(resp *http.Response) (*http.Response, error) {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return resp, CloudFoundryHTTPError{
StatusCode: resp.StatusCode,
Status: resp.Status,
Body: body,
}
}
defer resp.Body.Close()
var cfErrors CloudFoundryErrors
if err := json.Unmarshal(body, &cfErrors); err != nil {
return resp, CloudFoundryHTTPError{
StatusCode: resp.StatusCode,
Status: resp.Status,
Body: body,
}
}
return nil, CloudFoundryToHttpError(cfErrors)
} | }
// Create the HTTP Request | random_line_split |
client.go | /*
* The MIT License
*
* Copyright (c) 2017 Long Nguyen
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package cfclient
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// Client used to communicate with Cloud Foundry
type Client struct {
Config Config
Endpoint Endpoints
}
type Endpoints struct {
Links Links `json:"links"`
}
type Links struct {
AuthEndpoint EndpointUrl `json:"login"`
TokenEndpoint EndpointUrl `json:"uaa"`
}
type EndpointUrl struct {
URL string `json:"href"`
}
// Config is used to configure the creation of a client
type Config struct {
ApiAddress string `json:"api_url"`
Username string `json:"user"`
Password string `json:"password"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
SkipSslValidation bool `json:"skip_ssl_validation"`
HttpClient *http.Client
Token string `json:"auth_token"`
TokenSource oauth2.TokenSource
tokenSourceDeadline *time.Time
UserAgent string `json:"user_agent"`
Origin string `json:"-"`
}
type LoginHint struct {
Origin string `json:"origin"`
}
// Request is used to help build up a request
type Request struct {
method string
url string
params url.Values
body io.Reader
obj interface{}
}
// NewClient returns a new client
func NewClient(config *Config) (client *Client, err error) {
// bootstrap the config
defConfig := DefaultConfig()
if len(config.ApiAddress) == 0 {
config.ApiAddress = defConfig.ApiAddress
}
if len(config.Username) == 0 {
config.Username = defConfig.Username
}
if len(config.Password) == 0 {
config.Password = defConfig.Password
}
if len(config.Token) == 0 {
config.Token = defConfig.Token
}
if len(config.UserAgent) == 0 {
config.UserAgent = defConfig.UserAgent
}
if config.HttpClient == nil {
config.HttpClient = defConfig.HttpClient
}
if config.HttpClient.Transport == nil {
config.HttpClient.Transport = shallowDefaultTransport()
}
var tp *http.Transport
switch t := config.HttpClient.Transport.(type) {
case *http.Transport:
tp = t
case *oauth2.Transport:
if bt, ok := t.Base.(*http.Transport); ok {
tp = bt
}
}
if tp != nil {
if tp.TLSClientConfig == nil {
tp.TLSClientConfig = &tls.Config{}
}
tp.TLSClientConfig.InsecureSkipVerify = config.SkipSslValidation
}
config.ApiAddress = strings.TrimRight(config.ApiAddress, "/")
client = &Client{
Config: *config,
}
if err := client.refreshEndpoint(); err != nil {
return nil, err
}
return client, nil
}
// DefaultConfig creates a default config object used by CF client
func DefaultConfig() *Config {
return &Config{
ApiAddress: "http://api.bosh-lite.com",
Username: "admin",
Password: "admin",
Token: "",
SkipSslValidation: false,
HttpClient: http.DefaultClient,
UserAgent: "SM-CF-client/1.0",
}
}
// NewRequest is used to create a new Request
func (c *Client) NewRequest(method, path string) *Request {
requestUrl := path
if !strings.HasPrefix(path, "http") {
requestUrl = c.Config.ApiAddress + path
}
r := &Request{
method: method,
url: requestUrl,
params: make(map[string][]string),
}
return r
}
// NewRequestWithBody is used to create a new request with
func (c *Client) NewRequestWithBody(method, path string, body io.Reader) *Request {
r := c.NewRequest(method, path)
r.body = body
return r
}
// DoRequest runs a request with our client
func (c *Client) DoRequest(r *Request) (*http.Response, error) {
req, err := r.toHTTP()
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.Config.UserAgent)
if req.Body != nil && req.Header.Get("Content-type") == "" {
req.Header.Set("Content-type", "application/json")
}
resp, err := c.Config.HttpClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= http.StatusBadRequest {
return c.handleError(resp)
}
return resp, nil
}
func (c *Client) refreshEndpoint() error {
// we want to keep the Timeout value from config.HttpClient
timeout := c.Config.HttpClient.Timeout
ctx := context.Background()
ctx = context.WithValue(ctx, oauth2.HTTPClient, c.Config.HttpClient)
endpoint, err := getInfo(c.Config.ApiAddress, oauth2.NewClient(ctx, nil))
if err != nil {
return errors.Wrap(err, "Could not get endpoints from the root call")
}
switch {
case c.Config.Token != "":
c.Config = getUserTokenAuth(ctx, c.Config, endpoint)
case c.Config.ClientID != "":
c.Config = getClientAuth(ctx, c.Config, endpoint)
default:
c.Config, err = getUserAuth(ctx, c.Config, endpoint)
if err != nil {
return err
}
}
// make sure original Timeout value will be used
if c.Config.HttpClient.Timeout != timeout {
c.Config.HttpClient.Timeout = timeout
}
c.Endpoint = *endpoint
return nil
}
// getUserTokenAuth initializes client credentials from existing bearer token.
func getUserTokenAuth(ctx context.Context, config Config, endpoints *Endpoints) Config |
func getClientAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &clientcredentials.Config{
ClientID: config.ClientID,
ClientSecret: config.ClientSecret,
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
}
config.TokenSource = authConfig.TokenSource(ctx)
config.HttpClient = authConfig.Client(ctx)
return config
}
func getUserAuth(ctx context.Context, config Config, endpoints *Endpoints) (Config, error) {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
if config.Origin != "" {
loginHint := LoginHint{config.Origin}
origin, err := json.Marshal(loginHint)
if err != nil {
return config, errors.Wrap(err, "Error creating login_hint")
}
val := url.Values{}
val.Set("login_hint", string(origin))
authConfig.Endpoint.TokenURL = fmt.Sprintf("%s?%s", authConfig.Endpoint.TokenURL, val.Encode())
}
token, err := authConfig.PasswordCredentialsToken(ctx, config.Username, config.Password)
if err != nil {
return config, errors.Wrap(err, "Error getting token")
}
config.tokenSourceDeadline = &token.Expiry
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config, err
}
func getInfo(api string, httpClient *http.Client) (*Endpoints, error) {
var endpoints Endpoints
if api == "" {
return nil, fmt.Errorf("CF ApiAddress cannot be empty")
}
resp, err := httpClient.Get(api + "/")
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = decodeBody(resp, &endpoints)
if err != nil {
return nil, err
}
return &endpoints, err
}
func shallowDefaultTransport() *http.Transport {
defaultTransport := http.DefaultTransport.(*http.Transport)
return &http.Transport{
Proxy: defaultTransport.Proxy,
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
}
}
// decodeBody is used to JSON decode a body
func decodeBody(resp *http.Response, out interface{}) error {
defer resp.Body.Close()
dec := json.NewDecoder(resp.Body)
return dec.Decode(out)
}
// encodeBody is used to encode a request body
func encodeBody(obj interface{}) (io.Reader, error) {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
if err := enc.Encode(obj); err != nil {
return nil, err
}
return buf, nil
}
// toHTTP converts the request to an HTTP Request
func (r *Request) toHTTP() (*http.Request, error) {
// Check if we should encode the body
if r.body == nil && r.obj != nil {
b, err := encodeBody(r.obj)
if err != nil {
return nil, err
}
r.body = b
}
// Create the HTTP Request
return http.NewRequest(r.method, r.url, r.body)
}
func (c *Client) handleError(resp *http.Response) (*http.Response, error) {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return resp, CloudFoundryHTTPError{
StatusCode: resp.StatusCode,
Status: resp.Status,
Body: body,
}
}
defer resp.Body.Close()
var cfErrors CloudFoundryErrors
if err := json.Unmarshal(body, &cfErrors); err != nil {
return resp, CloudFoundryHTTPError{
StatusCode: resp.StatusCode,
Status: resp.Status,
Body: body,
}
}
return nil, CloudFoundryToHttpError(cfErrors)
}
| {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
// Token is expected to have no "bearer" prefix
token := &oauth2.Token{
AccessToken: config.Token,
TokenType: "Bearer"}
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config
} | identifier_body |
client.go | /*
* The MIT License
*
* Copyright (c) 2017 Long Nguyen
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package cfclient
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// Client used to communicate with Cloud Foundry
type Client struct {
Config Config
Endpoint Endpoints
}
type Endpoints struct {
Links Links `json:"links"`
}
type Links struct {
AuthEndpoint EndpointUrl `json:"login"`
TokenEndpoint EndpointUrl `json:"uaa"`
}
type EndpointUrl struct {
URL string `json:"href"`
}
// Config is used to configure the creation of a client
type Config struct {
ApiAddress string `json:"api_url"`
Username string `json:"user"`
Password string `json:"password"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
SkipSslValidation bool `json:"skip_ssl_validation"`
HttpClient *http.Client
Token string `json:"auth_token"`
TokenSource oauth2.TokenSource
tokenSourceDeadline *time.Time
UserAgent string `json:"user_agent"`
Origin string `json:"-"`
}
type LoginHint struct {
Origin string `json:"origin"`
}
// Request is used to help build up a request
type Request struct {
method string
url string
params url.Values
body io.Reader
obj interface{}
}
// NewClient returns a new client
func NewClient(config *Config) (client *Client, err error) {
// bootstrap the config
defConfig := DefaultConfig()
if len(config.ApiAddress) == 0 {
config.ApiAddress = defConfig.ApiAddress
}
if len(config.Username) == 0 {
config.Username = defConfig.Username
}
if len(config.Password) == 0 {
config.Password = defConfig.Password
}
if len(config.Token) == 0 {
config.Token = defConfig.Token
}
if len(config.UserAgent) == 0 {
config.UserAgent = defConfig.UserAgent
}
if config.HttpClient == nil {
config.HttpClient = defConfig.HttpClient
}
if config.HttpClient.Transport == nil {
config.HttpClient.Transport = shallowDefaultTransport()
}
var tp *http.Transport
switch t := config.HttpClient.Transport.(type) {
case *http.Transport:
tp = t
case *oauth2.Transport:
if bt, ok := t.Base.(*http.Transport); ok {
tp = bt
}
}
if tp != nil {
if tp.TLSClientConfig == nil {
tp.TLSClientConfig = &tls.Config{}
}
tp.TLSClientConfig.InsecureSkipVerify = config.SkipSslValidation
}
config.ApiAddress = strings.TrimRight(config.ApiAddress, "/")
client = &Client{
Config: *config,
}
if err := client.refreshEndpoint(); err != nil {
return nil, err
}
return client, nil
}
// DefaultConfig creates a default config object used by CF client
func DefaultConfig() *Config {
return &Config{
ApiAddress: "http://api.bosh-lite.com",
Username: "admin",
Password: "admin",
Token: "",
SkipSslValidation: false,
HttpClient: http.DefaultClient,
UserAgent: "SM-CF-client/1.0",
}
}
// NewRequest is used to create a new Request
func (c *Client) NewRequest(method, path string) *Request {
requestUrl := path
if !strings.HasPrefix(path, "http") {
requestUrl = c.Config.ApiAddress + path
}
r := &Request{
method: method,
url: requestUrl,
params: make(map[string][]string),
}
return r
}
// NewRequestWithBody is used to create a new request with
func (c *Client) NewRequestWithBody(method, path string, body io.Reader) *Request {
r := c.NewRequest(method, path)
r.body = body
return r
}
// DoRequest runs a request with our client
func (c *Client) DoRequest(r *Request) (*http.Response, error) {
req, err := r.toHTTP()
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.Config.UserAgent)
if req.Body != nil && req.Header.Get("Content-type") == "" {
req.Header.Set("Content-type", "application/json")
}
resp, err := c.Config.HttpClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= http.StatusBadRequest {
return c.handleError(resp)
}
return resp, nil
}
func (c *Client) refreshEndpoint() error {
// we want to keep the Timeout value from config.HttpClient
timeout := c.Config.HttpClient.Timeout
ctx := context.Background()
ctx = context.WithValue(ctx, oauth2.HTTPClient, c.Config.HttpClient)
endpoint, err := getInfo(c.Config.ApiAddress, oauth2.NewClient(ctx, nil))
if err != nil {
return errors.Wrap(err, "Could not get endpoints from the root call")
}
switch {
case c.Config.Token != "":
c.Config = getUserTokenAuth(ctx, c.Config, endpoint)
case c.Config.ClientID != "":
c.Config = getClientAuth(ctx, c.Config, endpoint)
default:
c.Config, err = getUserAuth(ctx, c.Config, endpoint)
if err != nil {
return err
}
}
// make sure original Timeout value will be used
if c.Config.HttpClient.Timeout != timeout {
c.Config.HttpClient.Timeout = timeout
}
c.Endpoint = *endpoint
return nil
}
// getUserTokenAuth initializes client credentials from existing bearer token.
func getUserTokenAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
// Token is expected to have no "bearer" prefix
token := &oauth2.Token{
AccessToken: config.Token,
TokenType: "Bearer"}
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config
}
func getClientAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &clientcredentials.Config{
ClientID: config.ClientID,
ClientSecret: config.ClientSecret,
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
}
config.TokenSource = authConfig.TokenSource(ctx)
config.HttpClient = authConfig.Client(ctx)
return config
}
func getUserAuth(ctx context.Context, config Config, endpoints *Endpoints) (Config, error) {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
if config.Origin != "" {
loginHint := LoginHint{config.Origin}
origin, err := json.Marshal(loginHint)
if err != nil {
return config, errors.Wrap(err, "Error creating login_hint")
}
val := url.Values{}
val.Set("login_hint", string(origin))
authConfig.Endpoint.TokenURL = fmt.Sprintf("%s?%s", authConfig.Endpoint.TokenURL, val.Encode())
}
token, err := authConfig.PasswordCredentialsToken(ctx, config.Username, config.Password)
if err != nil {
return config, errors.Wrap(err, "Error getting token")
}
config.tokenSourceDeadline = &token.Expiry
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config, err
}
func getInfo(api string, httpClient *http.Client) (*Endpoints, error) {
var endpoints Endpoints
if api == "" {
return nil, fmt.Errorf("CF ApiAddress cannot be empty")
}
resp, err := httpClient.Get(api + "/")
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = decodeBody(resp, &endpoints)
if err != nil {
return nil, err
}
return &endpoints, err
}
func shallowDefaultTransport() *http.Transport {
defaultTransport := http.DefaultTransport.(*http.Transport)
return &http.Transport{
Proxy: defaultTransport.Proxy,
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
}
}
// decodeBody is used to JSON decode a body
func decodeBody(resp *http.Response, out interface{}) error {
defer resp.Body.Close()
dec := json.NewDecoder(resp.Body)
return dec.Decode(out)
}
// encodeBody is used to encode a request body
func encodeBody(obj interface{}) (io.Reader, error) {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
if err := enc.Encode(obj); err != nil {
return nil, err
}
return buf, nil
}
// toHTTP converts the request to an HTTP Request
func (r *Request) toHTTP() (*http.Request, error) {
// Check if we should encode the body
if r.body == nil && r.obj != nil {
b, err := encodeBody(r.obj)
if err != nil {
return nil, err
}
r.body = b
}
// Create the HTTP Request
return http.NewRequest(r.method, r.url, r.body)
}
func (c *Client) handleError(resp *http.Response) (*http.Response, error) {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return resp, CloudFoundryHTTPError{
StatusCode: resp.StatusCode,
Status: resp.Status,
Body: body,
}
}
defer resp.Body.Close()
var cfErrors CloudFoundryErrors
if err := json.Unmarshal(body, &cfErrors); err != nil |
return nil, CloudFoundryToHttpError(cfErrors)
}
| {
return resp, CloudFoundryHTTPError{
StatusCode: resp.StatusCode,
Status: resp.Status,
Body: body,
}
} | conditional_block |
client.go | /*
* The MIT License
*
* Copyright (c) 2017 Long Nguyen
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package cfclient
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// Client used to communicate with Cloud Foundry
type Client struct {
Config Config
Endpoint Endpoints
}
type Endpoints struct {
Links Links `json:"links"`
}
type Links struct {
AuthEndpoint EndpointUrl `json:"login"`
TokenEndpoint EndpointUrl `json:"uaa"`
}
type EndpointUrl struct {
URL string `json:"href"`
}
// Config is used to configure the creation of a client
type Config struct {
ApiAddress string `json:"api_url"`
Username string `json:"user"`
Password string `json:"password"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
SkipSslValidation bool `json:"skip_ssl_validation"`
HttpClient *http.Client
Token string `json:"auth_token"`
TokenSource oauth2.TokenSource
tokenSourceDeadline *time.Time
UserAgent string `json:"user_agent"`
Origin string `json:"-"`
}
type LoginHint struct {
Origin string `json:"origin"`
}
// Request is used to help build up a request
type Request struct {
method string
url string
params url.Values
body io.Reader
obj interface{}
}
// NewClient returns a new client
func NewClient(config *Config) (client *Client, err error) {
// bootstrap the config
defConfig := DefaultConfig()
if len(config.ApiAddress) == 0 {
config.ApiAddress = defConfig.ApiAddress
}
if len(config.Username) == 0 {
config.Username = defConfig.Username
}
if len(config.Password) == 0 {
config.Password = defConfig.Password
}
if len(config.Token) == 0 {
config.Token = defConfig.Token
}
if len(config.UserAgent) == 0 {
config.UserAgent = defConfig.UserAgent
}
if config.HttpClient == nil {
config.HttpClient = defConfig.HttpClient
}
if config.HttpClient.Transport == nil {
config.HttpClient.Transport = shallowDefaultTransport()
}
var tp *http.Transport
switch t := config.HttpClient.Transport.(type) {
case *http.Transport:
tp = t
case *oauth2.Transport:
if bt, ok := t.Base.(*http.Transport); ok {
tp = bt
}
}
if tp != nil {
if tp.TLSClientConfig == nil {
tp.TLSClientConfig = &tls.Config{}
}
tp.TLSClientConfig.InsecureSkipVerify = config.SkipSslValidation
}
config.ApiAddress = strings.TrimRight(config.ApiAddress, "/")
client = &Client{
Config: *config,
}
if err := client.refreshEndpoint(); err != nil {
return nil, err
}
return client, nil
}
// DefaultConfig creates a default config object used by CF client
func DefaultConfig() *Config {
return &Config{
ApiAddress: "http://api.bosh-lite.com",
Username: "admin",
Password: "admin",
Token: "",
SkipSslValidation: false,
HttpClient: http.DefaultClient,
UserAgent: "SM-CF-client/1.0",
}
}
// NewRequest is used to create a new Request
func (c *Client) NewRequest(method, path string) *Request {
requestUrl := path
if !strings.HasPrefix(path, "http") {
requestUrl = c.Config.ApiAddress + path
}
r := &Request{
method: method,
url: requestUrl,
params: make(map[string][]string),
}
return r
}
// NewRequestWithBody is used to create a new request with
func (c *Client) NewRequestWithBody(method, path string, body io.Reader) *Request {
r := c.NewRequest(method, path)
r.body = body
return r
}
// DoRequest runs a request with our client
func (c *Client) DoRequest(r *Request) (*http.Response, error) {
req, err := r.toHTTP()
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.Config.UserAgent)
if req.Body != nil && req.Header.Get("Content-type") == "" {
req.Header.Set("Content-type", "application/json")
}
resp, err := c.Config.HttpClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= http.StatusBadRequest {
return c.handleError(resp)
}
return resp, nil
}
func (c *Client) refreshEndpoint() error {
// we want to keep the Timeout value from config.HttpClient
timeout := c.Config.HttpClient.Timeout
ctx := context.Background()
ctx = context.WithValue(ctx, oauth2.HTTPClient, c.Config.HttpClient)
endpoint, err := getInfo(c.Config.ApiAddress, oauth2.NewClient(ctx, nil))
if err != nil {
return errors.Wrap(err, "Could not get endpoints from the root call")
}
switch {
case c.Config.Token != "":
c.Config = getUserTokenAuth(ctx, c.Config, endpoint)
case c.Config.ClientID != "":
c.Config = getClientAuth(ctx, c.Config, endpoint)
default:
c.Config, err = getUserAuth(ctx, c.Config, endpoint)
if err != nil {
return err
}
}
// make sure original Timeout value will be used
if c.Config.HttpClient.Timeout != timeout {
c.Config.HttpClient.Timeout = timeout
}
c.Endpoint = *endpoint
return nil
}
// getUserTokenAuth initializes client credentials from existing bearer token.
func getUserTokenAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
// Token is expected to have no "bearer" prefix
token := &oauth2.Token{
AccessToken: config.Token,
TokenType: "Bearer"}
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config
}
func getClientAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &clientcredentials.Config{
ClientID: config.ClientID,
ClientSecret: config.ClientSecret,
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
}
config.TokenSource = authConfig.TokenSource(ctx)
config.HttpClient = authConfig.Client(ctx)
return config
}
func getUserAuth(ctx context.Context, config Config, endpoints *Endpoints) (Config, error) {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
if config.Origin != "" {
loginHint := LoginHint{config.Origin}
origin, err := json.Marshal(loginHint)
if err != nil {
return config, errors.Wrap(err, "Error creating login_hint")
}
val := url.Values{}
val.Set("login_hint", string(origin))
authConfig.Endpoint.TokenURL = fmt.Sprintf("%s?%s", authConfig.Endpoint.TokenURL, val.Encode())
}
token, err := authConfig.PasswordCredentialsToken(ctx, config.Username, config.Password)
if err != nil {
return config, errors.Wrap(err, "Error getting token")
}
config.tokenSourceDeadline = &token.Expiry
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config, err
}
func getInfo(api string, httpClient *http.Client) (*Endpoints, error) {
var endpoints Endpoints
if api == "" {
return nil, fmt.Errorf("CF ApiAddress cannot be empty")
}
resp, err := httpClient.Get(api + "/")
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = decodeBody(resp, &endpoints)
if err != nil {
return nil, err
}
return &endpoints, err
}
func | () *http.Transport {
defaultTransport := http.DefaultTransport.(*http.Transport)
return &http.Transport{
Proxy: defaultTransport.Proxy,
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
}
}
// decodeBody is used to JSON decode a body
func decodeBody(resp *http.Response, out interface{}) error {
defer resp.Body.Close()
dec := json.NewDecoder(resp.Body)
return dec.Decode(out)
}
// encodeBody is used to encode a request body
func encodeBody(obj interface{}) (io.Reader, error) {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
if err := enc.Encode(obj); err != nil {
return nil, err
}
return buf, nil
}
// toHTTP converts the request to an HTTP Request
func (r *Request) toHTTP() (*http.Request, error) {
// Check if we should encode the body
if r.body == nil && r.obj != nil {
b, err := encodeBody(r.obj)
if err != nil {
return nil, err
}
r.body = b
}
// Create the HTTP Request
return http.NewRequest(r.method, r.url, r.body)
}
func (c *Client) handleError(resp *http.Response) (*http.Response, error) {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return resp, CloudFoundryHTTPError{
StatusCode: resp.StatusCode,
Status: resp.Status,
Body: body,
}
}
defer resp.Body.Close()
var cfErrors CloudFoundryErrors
if err := json.Unmarshal(body, &cfErrors); err != nil {
return resp, CloudFoundryHTTPError{
StatusCode: resp.StatusCode,
Status: resp.Status,
Body: body,
}
}
return nil, CloudFoundryToHttpError(cfErrors)
}
| shallowDefaultTransport | identifier_name |
room_model.rs | use crate::room::member::Member;
use crate::room::room::{MemberLeaveNoticeType, RoomState};
use crate::room::room::{Room, MEMBER_MAX};
use crate::task_timer::{Task, TaskCmd};
use crate::TEMPLATES;
use log::{error, info, warn};
use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
use protobuf::Message;
use rayon::slice::ParallelSliceMut;
use serde_json::{Map, Value};
use std::borrow::BorrowMut;
use std::collections::hash_map::RandomState;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::str::FromStr;
use tools::cmd_code::ClientCode;
use tools::protos::base::RoomSettingPt;
use tools::protos::room::S_LEAVE_ROOM;
use tools::tcp::TcpSender;
use tools::templates::template::TemplateMgrTrait;
use tools::templates::tile_map_temp::TileMapTempMgr;
///teamID枚举
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum TeamId {
Min = 1, //最小teamid
Max = 4, //最大teamid
}
///房间类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum RoomType {
None = 0, //无效
Custom = 1, //自定义房间
Match = 2, //匹配房间
SeasonPve = 3, //赛季PVE房间
WorldBossPve = 4, //世界boss房间
}
impl RoomType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///战斗模式类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum BattleType {
None = 0, //无效初始值
OneVOneVOneVOne = 1, //1v1v1v1
TwoVTwo = 2, //2v2
OneVOne = 3, //1v1
}
impl Default for BattleType {
fn default() -> Self {
BattleType::OneVOneVOneVOne
}
}
impl BattleType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///房间设置
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomSetting {
pub battle_type: BattleType, //战斗类型
pub turn_limit_time: u32, //回合限制时间
pub season_id: u32, //赛季id
pub is_open_ai: bool, //是否开启ai
pub victory_condition: u32, //胜利条件
}
impl From<RoomSettingPt> for RoomSetting {
fn from(rs_pt: RoomSettingPt) -> Self {
let battle_type = BattleType::try_from(rs_pt.battle_type as u8).unwrap();
let is_open_ai = rs_pt.is_open_ai;
let victory_condition = rs_pt.victory_condition;
let turn_limit_time = rs_pt.turn_limit_time;
let season_id = rs_pt.season_id;
let rs = RoomSetting {
battle_type,
turn_limit_time,
season_id,
is_open_ai,
victory_condition,
};
rs
}
}
impl From<RoomSetting> for RoomSettingPt {
fn from(r: RoomSetting) -> Self {
let mut rsp = RoomSettingPt::new();
rsp.set_victory_condition(r.victory_condition);
rsp.set_battle_type(r.battle_type as u32);
rsp.set_season_id(r.season_id);
rsp.set_turn_limit_time(r.turn_limit_time);
rsp.set_is_open_ai(r.is_open_ai);
rsp
}
}
///房间缓存
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomCache {
room_id: u32,
count: u8,
}
pub trait RoomModel {
fn get_room_type(&self) -> RoomType;
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room>;
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32>;
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32>;
fn rm_room(&mut self, room_id: &u32);
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room>;
///根据房间id获得房间的可变指针
fn get_mut_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&mut Room> {
let res = self.get_rooms_mut().get_mut(room_id);
if res.is_none() {
let s = format!("this room is not exit!room_id:{}", room_id);
anyhow::bail!(s)
}
Ok(res.unwrap())
}
///根据房间id获得房间的只读指针
fn get_ref_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&Room> {
let res = self.get_rooms_mut().get(room_id);
if res.is_none() {
anyhow::bail!("this room is not exit,room_id:{}", room_id)
}
Ok(res.unwrap())
}
}
///好友房结构体
#[derive(Clone, Default)]
pub struct CustomRoom {
pub rooms: HashMap<u32, Room>, //封装房间房间id->房间结构体实例
}
impl RoomModel for CustomRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Custom
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
res
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let user_id = owner.user_id;
let mut room = Room::new(owner.clone(), RoomType::Custom, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let room = self.rooms.get_mut(&room_id).unwrap();
//同志房间其他成员
room.room_add_member_notice(&user_id);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
room.remove_member(notice_type, user_id);
let mut slr = S_LEAVE_ROOM::new();
slr.set_is_succ(true);
room.send_2_client(
ClientCode::LeaveRoom,
*user_id,
slr.write_to_bytes().unwrap(),
);
let room_id = room.get_room_id();
Ok(room_id)
}
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
///匹配房数组结构封装体
#[derive(Default, Clone)]
pub struct MatchRooms {
pub match_rooms: HashMap<u8, MatchRoom>,
}
impl MatchRooms {
pub fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
for i in self.match_rooms.iter_mut() {
let res = i.1.rooms.get_mut(&room_id);
if res.is_some() {
return Some(res.unwrap());
}
}
None
}
pub fn rm_room(&mut self, battle_type: u8, room_id: u32) {
let match_room = self.match_rooms.get_mut(&battle_type);
if let Some(match_room) = match_room {
match_room.rm_room(&room_id);
}
}
///离开房间,离线也好,主动离开也好
pub fn leave(
&mut self,
battle_type: BattleType,
room_id: u32,
user_id: &u32,
) -> anyhow::Result<u32> {
let match_room = self.match_rooms.get_mut(&battle_type.into_u8());
if match_room.is_none() {
let str = format!("there is no battle_type:{:?}!", battle_type);
warn!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let match_room = match_room.unwrap();
let res = match_room.leave_room(MemberLeaveNoticeType::Leave as u8, &room_id, user_id);
res
}
pub fn get_match_room_mut(&mut self, battle_type: BattleType) -> &mut MatchRoom {
let res = self.match_rooms.get_mut(&battle_type.into_u8());
if res.is_none() {
let mr = MatchRoom {
battle_type: BattleType::OneVOneVOneVOne,
rooms: HashMap::new(),
room_cache: Vec::new(),
};
self.match_rooms.insert(battle_type.into_u8(), mr);
}
let res = self.match_rooms.get_mut(&battle_type.into_u8());
res.unwrap()
}
}
///匹配房结构体
#[derive(Clone)]
pub struct MatchRoom {
pub battle_type: BattleType, //战斗模式类型
pub rooms: HashMap<u32, Room>, //key:房间id value:房间结构体
pub room_cache: Vec<RoomCache>, //key:房间id value:房间人数
}
impl RoomModel for MatchRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Match
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
if res.is_none() {
return None;
}
let room = res.unwrap();
Some(room)
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let mut room = Room::new(owner, RoomType::Match, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = 1;
self.room_cache.push(rc);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
let room_id = *room_id;
let member_count = room.get_member_count();
room.remove_member(notice_type, user_id);
let need_remove = room.is_empty();
let now_count = room.get_member_count();
let mut need_add_cache = false;
//如果房间之前是满都,就给所有人取消准备
if room.get_state() == RoomState::Await
&& member_count == MEMBER_MAX as usize
&& now_count < member_count
{
let map = room.members.clone();
for id in map.keys() {
room.prepare_cancel(id, false);
}
if room.get_state() == RoomState::Await {
need_add_cache = true;
}
}
if need_remove {
return Ok(room_id);
}
let room_cache = self.get_room_cache_mut(&room_id);
if room_cache.is_some() {
let rc = room_cache.unwrap();
rc.count -= 1;
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
} else if room_cache.is_none() && need_add_cache {
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = now_count as u8;
self.room_cache.push(rc);
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
info!(
"玩家离开房间匹配房间,满足条件,将放进重新放入匹配队列,room_id:{}",
room_id
);
}
Ok(room_id)
}
///删除房间
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
self.remove_room_cache(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
impl MatchRoom {
pub fn get_room_cache_mut(&mut self, room_id: &u32) -> Option<&mut RoomCache> {
let res = self.room_cache.iter_mut().find(|x| x.room_id == *room_id);
res
}
///删除缓存房间
pub fn remove_room_cache(&mut self, room_id: &u32) {
let mut index = -1_isize;
for i in self.room_cache.iter() {
index += 1;
if i.room_id != *room_id {
continue;
}
break;
}
if ind | self.room_cache.remove(index as usize);
}
///快速加入
pub fn quickly_start(
&mut self,
member: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let room_id: u32;
let user_id = member.user_id;
//如果房间缓存里没有,则创建新房间
if self.room_cache.is_empty() {
//校验地图配置
let room_tmp_ref: &TileMapTempMgr = TEMPLATES.get_tile_map_temp_mgr_ref();
if room_tmp_ref.is_empty() {
anyhow::bail!("TileMapTempMgr is None")
}
//创建房间
room_id = self.create_room(BattleType::OneVOneVOneVOne, member, sender, task_sender)?;
info!("创建匹配房间,room_id:{},user_id:{}", room_id, user_id);
} else {
//如果有,则往房间里塞
room_id = self.get_room_cache_last_room_id()?;
//将成员加进房间
let room_mut = self.get_mut_room_by_room_id(&room_id)?;
if room_mut.get_member_count() >= MEMBER_MAX as usize {
anyhow::bail!("room is None,room_id:{}", room_id)
}
//将成员加入到房间中
room_mut.add_member(member)?;
//解决房间队列缓存
let room_cache_array: &mut Vec<RoomCache> = self.room_cache.as_mut();
let room_cache = room_cache_array.last_mut().unwrap();
//cache人数加1
room_cache.count += 1;
//如果人满里,则从缓存房间列表中弹出
if room_cache.count >= MEMBER_MAX {
room_cache_array.pop();
info!("匹配房人满,将房间从匹配队列移除!room_id:{}", room_id);
//创建延迟任务,并发送给定时器接收方执行
let mut task = Task::default();
let time_limit = TEMPLATES
.get_constant_temp_mgr_ref()
.temps
.get("kick_not_prepare_time");
if let Some(time) = time_limit {
let time = u64::from_str(time.value.as_str())?;
task.delay = time + 500;
} else {
task.delay = 60000_u64;
warn!("the Constant kick_not_prepare_time is None!pls check!");
}
task.cmd = TaskCmd::MatchRoomStart as u16;
let mut map = Map::new();
map.insert(
"battle_type".to_owned(),
Value::from(self.battle_type.into_u8()),
);
map.insert("room_id".to_owned(), Value::from(room_id));
task.data = Value::from(map);
let res = task_sender.send(task);
if let Err(e) = res {
error!("{:?}", e);
}
}
//重新排序
room_cache_array.par_sort_by(|a, b| b.count.cmp(&a.count));
}
Ok(room_id)
}
fn get_room_cache_last_room_id(&self) -> anyhow::Result<u32> {
let room_cache = self.room_cache.last();
if room_cache.is_none() {
let str = "room_cache is empty!".to_owned();
error!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let room_id = room_cache.unwrap().room_id;
Ok(room_id)
}
}
| ex < 0 {
return;
}
| identifier_body |
room_model.rs | use crate::room::member::Member;
use crate::room::room::{MemberLeaveNoticeType, RoomState};
use crate::room::room::{Room, MEMBER_MAX};
use crate::task_timer::{Task, TaskCmd};
use crate::TEMPLATES;
use log::{error, info, warn};
use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
use protobuf::Message;
use rayon::slice::ParallelSliceMut;
use serde_json::{Map, Value};
use std::borrow::BorrowMut;
use std::collections::hash_map::RandomState;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::str::FromStr;
use tools::cmd_code::ClientCode;
use tools::protos::base::RoomSettingPt;
use tools::protos::room::S_LEAVE_ROOM;
use tools::tcp::TcpSender;
use tools::templates::template::TemplateMgrTrait;
use tools::templates::tile_map_temp::TileMapTempMgr;
///teamID枚举
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum TeamId {
Min = 1, //最小teamid
Max = 4, //最大teamid
}
///房间类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum RoomType {
None = 0, //无效
Custom = 1, //自定义房间
Match = 2, //匹配房间
SeasonPve = 3, //赛季PVE房间
WorldBossPve = 4, //世界boss房间
}
impl RoomType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///战斗模式类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum BattleType {
None = 0, //无效初始值
OneVOneVOneVOne = 1, //1v1v1v1
TwoVTwo = 2, //2v2
OneVOne = 3, //1v1
}
impl Default for BattleType {
fn default() -> Self {
BattleType::OneVOneVOneVOne
}
}
impl BattleType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///房间设置
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomSetting {
pub battle_type: BattleType, //战斗类型
pub turn_limit_time: u32, //回合限制时间
pub season_id: u32, //赛季id
pub is_open_ai: bool, //是否开启ai
pub victory_condition: u32, //胜利条件
}
impl From<RoomSettingPt> for RoomSetting {
fn from(rs_pt: RoomSettingPt) -> Self {
let battle_type = BattleType::try_from(rs_pt.battle_type as u8).unwrap();
let is_open_ai = rs_pt.is_open_ai;
let victory_condition = rs_pt.victory_condition;
let turn_limit_time = rs_pt.turn_limit_time;
let season_id = rs_pt.season_id;
let rs = RoomSetting {
battle_type,
turn_limit_time,
season_id,
is_open_ai,
victory_condition,
};
rs
}
}
impl From<RoomSetting> for RoomSettingPt {
fn from(r: RoomSetting) -> Self {
let mut rsp = RoomSettingPt::new();
rsp.set_victory_condition(r.victory_condition);
rsp.set_battle_type(r.battle_type as u32);
rsp.set_season_id(r.season_id);
rsp.set_turn_limit_time(r.turn_limit_time);
rsp.set_is_open_ai(r.is_open_ai);
rsp
}
}
///房间缓存
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomCache {
room_id: u32,
count: u8,
}
pub trait RoomModel {
fn get_room_type(&self) -> RoomType;
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room>;
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32>;
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32>;
fn rm_room(&mut self, room_id: &u32);
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room>;
///根据房间id获得房间的可变指针
fn get_mut_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&mut Room> {
let res = self.get_rooms_mut().get_mut(room_id);
if res.is_none() {
let s = format!("this room is not exit!room_id:{}", room_id);
anyhow::bail!(s)
}
Ok(res.unwrap())
}
///根据房间id获得房间的只读指针
fn get_ref_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&Room> {
let res = self.get_rooms_mut().get(room_id);
if res.is_none() {
anyhow::bail!("this room is not exit,room_id:{}", room_id)
}
Ok(res.unwrap())
}
}
///好友房结构体
#[derive(Clone, Default)]
pub struct CustomRoom {
pub rooms: HashMap<u32, Room>, //封装房间房间id->房间结构体实例
}
impl RoomModel for CustomRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Custom
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
res
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let user_id = owner.user_id;
let mut room = Room::new(owner.clone(), RoomType::Custom, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let room = self.rooms.get_mut(&room_id).unwrap();
//同志房间其他成员
room.room_add_member_notice(&user_id);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
room.remove_member(notice_type, user_id);
let mut slr = S_LEAVE_ROOM::new();
slr.set_is_succ(true);
room.send_2_client(
ClientCode::LeaveRoom,
*user_id,
slr.write_to_bytes().unwrap(),
);
let room_id = room.get_room_id();
Ok(room_id)
}
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
///匹配房数组结构封装体
#[derive(Default, Clone)]
pub struct MatchRooms {
pub match_rooms: HashMap<u8, MatchRoom>,
}
impl MatchRooms {
pub fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
for i in self.match_rooms.iter_mut() {
let res = i.1.rooms.get_mut(&room_id);
if res.is_some() {
return Some(res.unwrap());
}
}
None
}
pub fn rm_room(&mut self, battle_type: u8, room_id: u32) {
let match_room = self.match_rooms.get_mut(&battle_type);
if let Some(match_room) = match_room {
match_room.rm_room(&room_id);
}
}
///离开房间,离线也好,主动离开也好
pub fn leave(
&mut self,
battle_type: BattleType,
room_id: u32,
user_id: &u32,
) -> anyhow::Result<u32> {
let match_room = self.match_rooms.get_mut(&battle_type.into_u8());
if match_room.is_none() {
let str = format!("there is no battle_type:{:?}!", battle_type);
warn!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let match_room = match_room.unwrap();
let res = match_room.leave_room(MemberLeaveNoticeType::Leave as u8, &room_id, user_id);
res
}
pub fn get_match_room_mut(&mut self, battle_type: BattleType) -> &mut MatchRoom {
let res = self.match_rooms.get_mut(&battle_type.into_u8());
if res.is_none() {
let mr = MatchRoom {
battle_type: BattleType::OneVOneVOneVOne,
rooms: HashMap::new(),
room_cache: Vec::new(),
};
self.match_rooms.insert(battle_type.into_u8(), mr);
}
let res = self.match_rooms.get_mut(&battle_type.into_u8());
res.unwrap()
}
}
///匹配房结构体
#[derive(Clone)]
pub struct MatchRoom {
pub battle_type: BattleType, //战斗模式类型
pub rooms: HashMap<u32, Room>, //key:房间id value:房间结构体
pub room_cache: Vec<RoomCache>, //key:房间id value:房间人数
}
impl RoomModel for MatchRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Match
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
if res.is_none() {
return None;
}
let room = res.unwrap();
Some(room)
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let mut room = Room::new(owner, RoomType::Match, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = 1;
self.room_cache.push(rc);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
let room_id = *room_id;
let member_count = room.get_member_count();
room.remove_member(notice_type, user_id);
let need_remove = room.is_empty();
let now_count = room.get_member_count();
let mut need_add_cache = false;
//如果房间之前是满都,就给所有人取消准备
if room.get_state() == RoomState::Await
&& member_count == MEMBER_MAX as usize
&& now_count < member_count
{
let map = room.members.clone();
for id in map.keys() {
room.prepare_cancel(id, false);
}
if room.get_state() == RoomState::Await {
need_add_cache = true;
}
}
if need_remove {
return Ok(room_id);
}
let room_cache = self.get_room_cache_mut(&room_id);
if room_cache.is_some() {
let rc = room_cache.unwrap();
rc.count -= 1;
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
} else if room_cache.is_none() && need_add_cache {
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = now_count as u8;
self.room_cache.push(rc);
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
info!(
"玩家离开房间匹配房间,满足条件,将放进重新放入匹配队列,room_id:{}",
room_id
);
}
Ok(room_id)
}
///删除房间
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
self.remove_room_cache(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
impl MatchRoom {
pub fn get_room_cache_mut(&mut self, room_id: &u32) -> Option<&mut RoomCache> {
let res = self.room_cache.iter_mut().find(|x| x.room_id == *room_id);
res
}
///删除缓存房间
pub fn remove_room_cache(&mut self, room_id: &u32) {
let mut index = -1_isize;
for i in self.room_cache.iter() {
index += 1;
if i.room_id != *room_id {
continue;
}
break;
}
if index < 0 {
return;
}
self.room_cache.remove(index as usize);
}
///快速加入
pub fn quickly_start(
&mut self,
member: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> { | _id: u32;
let user_id = member.user_id;
//如果房间缓存里没有,则创建新房间
if self.room_cache.is_empty() {
//校验地图配置
let room_tmp_ref: &TileMapTempMgr = TEMPLATES.get_tile_map_temp_mgr_ref();
if room_tmp_ref.is_empty() {
anyhow::bail!("TileMapTempMgr is None")
}
//创建房间
room_id = self.create_room(BattleType::OneVOneVOneVOne, member, sender, task_sender)?;
info!("创建匹配房间,room_id:{},user_id:{}", room_id, user_id);
} else {
//如果有,则往房间里塞
room_id = self.get_room_cache_last_room_id()?;
//将成员加进房间
let room_mut = self.get_mut_room_by_room_id(&room_id)?;
if room_mut.get_member_count() >= MEMBER_MAX as usize {
anyhow::bail!("room is None,room_id:{}", room_id)
}
//将成员加入到房间中
room_mut.add_member(member)?;
//解决房间队列缓存
let room_cache_array: &mut Vec<RoomCache> = self.room_cache.as_mut();
let room_cache = room_cache_array.last_mut().unwrap();
//cache人数加1
room_cache.count += 1;
//如果人满里,则从缓存房间列表中弹出
if room_cache.count >= MEMBER_MAX {
room_cache_array.pop();
info!("匹配房人满,将房间从匹配队列移除!room_id:{}", room_id);
//创建延迟任务,并发送给定时器接收方执行
let mut task = Task::default();
let time_limit = TEMPLATES
.get_constant_temp_mgr_ref()
.temps
.get("kick_not_prepare_time");
if let Some(time) = time_limit {
let time = u64::from_str(time.value.as_str())?;
task.delay = time + 500;
} else {
task.delay = 60000_u64;
warn!("the Constant kick_not_prepare_time is None!pls check!");
}
task.cmd = TaskCmd::MatchRoomStart as u16;
let mut map = Map::new();
map.insert(
"battle_type".to_owned(),
Value::from(self.battle_type.into_u8()),
);
map.insert("room_id".to_owned(), Value::from(room_id));
task.data = Value::from(map);
let res = task_sender.send(task);
if let Err(e) = res {
error!("{:?}", e);
}
}
//重新排序
room_cache_array.par_sort_by(|a, b| b.count.cmp(&a.count));
}
Ok(room_id)
}
fn get_room_cache_last_room_id(&self) -> anyhow::Result<u32> {
let room_cache = self.room_cache.last();
if room_cache.is_none() {
let str = "room_cache is empty!".to_owned();
error!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let room_id = room_cache.unwrap().room_id;
Ok(room_id)
}
}
|
let room | identifier_name |
room_model.rs | use crate::room::member::Member;
use crate::room::room::{MemberLeaveNoticeType, RoomState};
use crate::room::room::{Room, MEMBER_MAX};
use crate::task_timer::{Task, TaskCmd};
use crate::TEMPLATES;
use log::{error, info, warn};
use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
use protobuf::Message;
use rayon::slice::ParallelSliceMut;
use serde_json::{Map, Value};
use std::borrow::BorrowMut;
use std::collections::hash_map::RandomState;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::str::FromStr;
use tools::cmd_code::ClientCode;
use tools::protos::base::RoomSettingPt;
use tools::protos::room::S_LEAVE_ROOM;
use tools::tcp::TcpSender;
use tools::templates::template::TemplateMgrTrait;
use tools::templates::tile_map_temp::TileMapTempMgr;
///teamID枚举
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum TeamId {
Min = 1, //最小teamid
Max = 4, //最大teamid
}
///房间类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum RoomType {
None = 0, //无效
Custom = 1, //自定义房间
Match = 2, //匹配房间
SeasonPve = 3, //赛季PVE房间
WorldBossPve = 4, //世界boss房间
}
impl RoomType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///战斗模式类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum BattleType {
None = 0, //无效初始值
OneVOneVOneVOne = 1, //1v1v1v1
TwoVTwo = 2, //2v2
OneVOne = 3, //1v1
}
impl Default for BattleType {
fn default() -> Self {
BattleType::OneVOneVOneVOne
}
}
impl BattleType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///房间设置
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomSetting {
pub battle_type: BattleType, //战斗类型
pub turn_limit_time: u32, //回合限制时间
pub season_id: u32, //赛季id
pub is_open_ai: bool, //是否开启ai
pub victory_condition: u32, //胜利条件
}
impl From<RoomSettingPt> for RoomSetting {
fn from(rs_pt: RoomSettingPt) -> Self {
let battle_type = BattleType::try_from(rs_pt.battle_type as u8).unwrap();
let is_open_ai = rs_pt.is_open_ai;
let victory_condition = rs_pt.victory_condition;
let turn_limit_time = rs_pt.turn_limit_time;
let season_id = rs_pt.season_id;
let rs = RoomSetting {
battle_type,
turn_limit_time,
season_id,
is_open_ai,
victory_condition,
};
rs
}
}
impl From<RoomSetting> for RoomSettingPt {
fn from(r: RoomSetting) -> Self {
let mut rsp = RoomSettingPt::new();
rsp.set_victory_condition(r.victory_condition);
rsp.set_battle_type(r.battle_type as u32);
rsp.set_season_id(r.season_id);
rsp.set_turn_limit_time(r.turn_limit_time);
rsp.set_is_open_ai(r.is_open_ai);
rsp
}
}
///房间缓存
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomCache {
room_id: u32,
count: u8,
}
pub trait RoomModel {
fn get_room_type(&self) -> RoomType;
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room>;
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32>;
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32>;
fn rm_room(&mut self, room_id: &u32);
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room>;
///根据房间id获得房间的可变指针
fn get_mut_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&mut Room> {
let res = self.get_rooms_mut().get_mut(room_id);
if res.is_none() {
let s = format!("this room is not exit!room_id:{}", room_id);
anyhow::bail!(s)
}
Ok(res.unwrap())
}
///根据房间id获得房间的只读指针
fn get_ref_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&Room> {
let res = self.get_rooms_mut().get(room_id);
if res.is_none() {
anyhow::bail!("this room is not exit,room_id:{}", room_id)
}
Ok(res.unwrap())
}
}
///好友房结构体
#[derive(Clone, Default)]
pub struct CustomRoom {
pub rooms: HashMap<u32, Room>, //封装房间房间id->房间结构体实例
}
impl RoomModel for CustomRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Custom
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
res
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let user_id = owner.user_id;
let mut room = Room::new(owner.clone(), RoomType::Custom, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let room = self.rooms.get_mut(&room_id).unwrap();
//同志房间其他成员
room.room_add_member_notice(&user_id);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
room.remove_member(notice_type, user_id);
let mut slr = S_LEAVE_ROOM::new();
slr.set_is_succ(true);
room.send_2_client(
ClientCode::LeaveRoom,
*user_id,
slr.write_to_bytes().unwrap(),
);
let room_id = room.get_room_id();
Ok(room_id)
}
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
///匹配房数组结构封装体
#[derive(Default, Clone)]
pub struct MatchRooms {
pub match_rooms: HashMap<u8, MatchRoom>,
}
impl MatchRooms {
pub fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
for i in self.match_rooms.iter_mut() {
let res = i.1.rooms.get_mut(&room_id);
if res.is_some() {
return Some(res.unwrap());
}
}
None
}
pub fn rm_room(&mut self, battle_type: u8, room_id: u32) {
let match_room = self.match_rooms.get_mut(&battle_type);
if let Some(match_room) = match_room {
match_room.rm_room(&room_id);
}
}
///离开房间,离线也好,主动离开也好
pub fn leave(
&mut self,
battle_type: BattleType,
room_id: u32,
user_id: &u32,
) -> anyhow::Result<u32> {
let match_room = self.match_rooms.get_mut(&battle_type.into_u8());
if match_room.is_none() {
let str = format!("there is no battle_type:{:?}!", battle_type);
warn!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let match_room = match_room.unwrap();
let res = match_room.leave_room(MemberLeaveNoticeType::Leave as u8, &room_id, user_id);
res
}
pub fn get_match_room_mut(&mut self, battle_type: BattleType) -> &mut MatchRoom {
let res = self.match_rooms.get_mut(&battle_type.into_u8());
if res.is_none() {
let mr = MatchRoom {
battle_type: BattleType::OneVOneVOneVOne,
rooms: HashMap::new(),
room_cache: Vec::new(),
};
self.match_rooms.insert(battle_type.into_u8(), mr);
}
let res = self.match_rooms.get_mut(&battle_type.into_u8());
res.unwrap()
}
}
///匹配房结构体
#[derive(Clone)]
pub struct MatchRoom {
pub battle_type: BattleType, //战斗模式类型
pub rooms: HashMap<u32, Room>, //key:房间id value:房间结构体
pub room_cache: Vec<RoomCache>, //key:房间id value:房间人数
}
impl RoomModel for MatchRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Match
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
if res.is_none() {
return None;
}
let room = res.unwrap();
Some(room)
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let mut room = Room::new(owner, RoomType::Match, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = 1;
self.room_cache.push(rc);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
let room_id = *room_id;
let member_count = room.get_member_count();
room.remove_member(notice_type, user_id);
let need_remove = room.is_empty();
let now_count = room.get_member_count();
let mut need_add_cache = false;
//如果房间之前是满都,就给所有人取消准备
if room.get_state() == RoomState::Await
&& member_count == MEMBER_MAX as usize
&& now_count < member_count
{
let map = room.members.clone();
for id in map.keys() {
room.prepare_cancel(id, false);
}
if room.get_state() == RoomState::Await {
need_add_cache = true;
}
}
if need_remove {
return Ok(room_id); |
let room_cache = self.get_room_cache_mut(&room_id);
if room_cache.is_some() {
let rc = room_cache.unwrap();
rc.count -= 1;
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
} else if room_cache.is_none() && need_add_cache {
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = now_count as u8;
self.room_cache.push(rc);
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
info!(
"玩家离开房间匹配房间,满足条件,将放进重新放入匹配队列,room_id:{}",
room_id
);
}
Ok(room_id)
}
///删除房间
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
self.remove_room_cache(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
impl MatchRoom {
pub fn get_room_cache_mut(&mut self, room_id: &u32) -> Option<&mut RoomCache> {
let res = self.room_cache.iter_mut().find(|x| x.room_id == *room_id);
res
}
///删除缓存房间
pub fn remove_room_cache(&mut self, room_id: &u32) {
let mut index = -1_isize;
for i in self.room_cache.iter() {
index += 1;
if i.room_id != *room_id {
continue;
}
break;
}
if index < 0 {
return;
}
self.room_cache.remove(index as usize);
}
///快速加入
pub fn quickly_start(
&mut self,
member: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let room_id: u32;
let user_id = member.user_id;
//如果房间缓存里没有,则创建新房间
if self.room_cache.is_empty() {
//校验地图配置
let room_tmp_ref: &TileMapTempMgr = TEMPLATES.get_tile_map_temp_mgr_ref();
if room_tmp_ref.is_empty() {
anyhow::bail!("TileMapTempMgr is None")
}
//创建房间
room_id = self.create_room(BattleType::OneVOneVOneVOne, member, sender, task_sender)?;
info!("创建匹配房间,room_id:{},user_id:{}", room_id, user_id);
} else {
//如果有,则往房间里塞
room_id = self.get_room_cache_last_room_id()?;
//将成员加进房间
let room_mut = self.get_mut_room_by_room_id(&room_id)?;
if room_mut.get_member_count() >= MEMBER_MAX as usize {
anyhow::bail!("room is None,room_id:{}", room_id)
}
//将成员加入到房间中
room_mut.add_member(member)?;
//解决房间队列缓存
let room_cache_array: &mut Vec<RoomCache> = self.room_cache.as_mut();
let room_cache = room_cache_array.last_mut().unwrap();
//cache人数加1
room_cache.count += 1;
//如果人满里,则从缓存房间列表中弹出
if room_cache.count >= MEMBER_MAX {
room_cache_array.pop();
info!("匹配房人满,将房间从匹配队列移除!room_id:{}", room_id);
//创建延迟任务,并发送给定时器接收方执行
let mut task = Task::default();
let time_limit = TEMPLATES
.get_constant_temp_mgr_ref()
.temps
.get("kick_not_prepare_time");
if let Some(time) = time_limit {
let time = u64::from_str(time.value.as_str())?;
task.delay = time + 500;
} else {
task.delay = 60000_u64;
warn!("the Constant kick_not_prepare_time is None!pls check!");
}
task.cmd = TaskCmd::MatchRoomStart as u16;
let mut map = Map::new();
map.insert(
"battle_type".to_owned(),
Value::from(self.battle_type.into_u8()),
);
map.insert("room_id".to_owned(), Value::from(room_id));
task.data = Value::from(map);
let res = task_sender.send(task);
if let Err(e) = res {
error!("{:?}", e);
}
}
//重新排序
room_cache_array.par_sort_by(|a, b| b.count.cmp(&a.count));
}
Ok(room_id)
}
fn get_room_cache_last_room_id(&self) -> anyhow::Result<u32> {
let room_cache = self.room_cache.last();
if room_cache.is_none() {
let str = "room_cache is empty!".to_owned();
error!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let room_id = room_cache.unwrap().room_id;
Ok(room_id)
}
} | } | random_line_split |
room_model.rs | use crate::room::member::Member;
use crate::room::room::{MemberLeaveNoticeType, RoomState};
use crate::room::room::{Room, MEMBER_MAX};
use crate::task_timer::{Task, TaskCmd};
use crate::TEMPLATES;
use log::{error, info, warn};
use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
use protobuf::Message;
use rayon::slice::ParallelSliceMut;
use serde_json::{Map, Value};
use std::borrow::BorrowMut;
use std::collections::hash_map::RandomState;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::str::FromStr;
use tools::cmd_code::ClientCode;
use tools::protos::base::RoomSettingPt;
use tools::protos::room::S_LEAVE_ROOM;
use tools::tcp::TcpSender;
use tools::templates::template::TemplateMgrTrait;
use tools::templates::tile_map_temp::TileMapTempMgr;
///teamID枚举
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum TeamId {
Min = 1, //最小teamid
Max = 4, //最大teamid
}
///房间类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum RoomType {
None = 0, //无效
Custom = 1, //自定义房间
Match = 2, //匹配房间
SeasonPve = 3, //赛季PVE房间
WorldBossPve = 4, //世界boss房间
}
impl RoomType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///战斗模式类型
#[derive(Debug, Clone, Copy, Eq, PartialEq, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum BattleType {
None = 0, //无效初始值
OneVOneVOneVOne = 1, //1v1v1v1
TwoVTwo = 2, //2v2
OneVOne = 3, //1v1
}
impl Default for BattleType {
fn default() -> Self {
BattleType::OneVOneVOneVOne
}
}
impl BattleType {
pub fn into_u8(self) -> u8 {
let res: u8 = self.into();
res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///房间设置
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomSetting {
pub battle_type: BattleType, //战斗类型
pub turn_limit_time: u32, //回合限制时间
pub season_id: u32, //赛季id
pub is_open_ai: bool, //是否开启ai
pub victory_condition: u32, //胜利条件
}
impl From<RoomSettingPt> for RoomSetting {
fn from(rs_pt: RoomSettingPt) -> Self {
let battle_type = BattleType::try_from(rs_pt.battle_type as u8).unwrap();
let is_open_ai = rs_pt.is_open_ai;
let victory_condition = rs_pt.victory_condition;
let turn_limit_time = rs_pt.turn_limit_time;
let season_id = rs_pt.season_id;
let rs = RoomSetting {
battle_type,
turn_limit_time,
season_id,
is_open_ai,
victory_condition,
};
rs
}
}
impl From<RoomSetting> for RoomSettingPt {
fn from(r: RoomSetting) -> Self {
let mut rsp = RoomSettingPt::new();
rsp.set_victory_condition(r.victory_condition);
rsp.set_battle_type(r.battle_type as u32);
rsp.set_season_id(r.season_id);
rsp.set_turn_limit_time(r.turn_limit_time);
rsp.set_is_open_ai(r.is_open_ai);
rsp
}
}
///房间缓存
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomCache {
room_id: u32,
count: u8,
}
pub trait RoomModel {
fn get_room_type(&self) -> RoomType;
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room>;
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32>;
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32>;
fn rm_room(&mut self, room_id: &u32);
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room>;
///根据房间id获得房间的可变指针
fn get_mut_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&mut Room> {
let res = self.get_rooms_mut().get_mut(room_id);
if res.is_none() {
let s = format!("this room is not exit!room_id:{}", room_id);
anyhow::bail!(s)
}
Ok(res.unwrap())
}
///根据房间id获得房间的只读指针
fn get_ref_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&Room> {
let res = self.get_rooms_mut().get(room_id);
if res.is_none() {
anyhow::bail!("this room is not exit,room_id:{}", room_id)
}
Ok(res.unwrap())
}
}
///好友房结构体
#[derive(Clone, Default)]
pub struct CustomRoom {
pub rooms: HashMap<u32, Room>, //封装房间房间id->房间结构体实例
}
impl RoomModel for CustomRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Custom
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
res
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let user_id = owner.user_id;
let mut room = Room::new(owner.clone(), RoomType::Custom, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let room = self.rooms.get_mut(&room_id).unwrap();
//同志房间其他成员
room.room_add_member_notice(&user_id);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
room.remove_member(notice_type, user_id);
let mut slr = S_LEAVE_ROOM::new();
slr.set_is_succ(true);
room.send_2_client(
ClientCode::LeaveRoom,
*user_id,
slr.write_to_bytes().unwrap(),
);
let room_id = room.get_room_id();
Ok(room_id)
}
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
///匹配房数组结构封装体
#[derive(Default, Clone)]
pub struct MatchRooms {
pub match_rooms: HashMap<u8, MatchRoom>,
}
impl MatchRooms {
pub fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
for i in self.match_rooms.iter_mut() {
let res = i.1.rooms.get_mut(&room_id);
if res.is_some() {
return Some(res.unwrap());
}
}
None
}
pub fn rm_room(&mut self, battle_type: u8, room_id: u32) {
let match_room = self.match_rooms.get_mut(&battle_type);
if let Some(match_room) = match_room {
match_room.rm_room(&room | leave(
&mut self,
battle_type: BattleType,
room_id: u32,
user_id: &u32,
) -> anyhow::Result<u32> {
let match_room = self.match_rooms.get_mut(&battle_type.into_u8());
if match_room.is_none() {
let str = format!("there is no battle_type:{:?}!", battle_type);
warn!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let match_room = match_room.unwrap();
let res = match_room.leave_room(MemberLeaveNoticeType::Leave as u8, &room_id, user_id);
res
}
pub fn get_match_room_mut(&mut self, battle_type: BattleType) -> &mut MatchRoom {
let res = self.match_rooms.get_mut(&battle_type.into_u8());
if res.is_none() {
let mr = MatchRoom {
battle_type: BattleType::OneVOneVOneVOne,
rooms: HashMap::new(),
room_cache: Vec::new(),
};
self.match_rooms.insert(battle_type.into_u8(), mr);
}
let res = self.match_rooms.get_mut(&battle_type.into_u8());
res.unwrap()
}
}
///匹配房结构体
#[derive(Clone)]
pub struct MatchRoom {
pub battle_type: BattleType, //战斗模式类型
pub rooms: HashMap<u32, Room>, //key:房间id value:房间结构体
pub room_cache: Vec<RoomCache>, //key:房间id value:房间人数
}
impl RoomModel for MatchRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Match
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
if res.is_none() {
return None;
}
let room = res.unwrap();
Some(room)
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let mut room = Room::new(owner, RoomType::Match, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = 1;
self.room_cache.push(rc);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
let room_id = *room_id;
let member_count = room.get_member_count();
room.remove_member(notice_type, user_id);
let need_remove = room.is_empty();
let now_count = room.get_member_count();
let mut need_add_cache = false;
//如果房间之前是满都,就给所有人取消准备
if room.get_state() == RoomState::Await
&& member_count == MEMBER_MAX as usize
&& now_count < member_count
{
let map = room.members.clone();
for id in map.keys() {
room.prepare_cancel(id, false);
}
if room.get_state() == RoomState::Await {
need_add_cache = true;
}
}
if need_remove {
return Ok(room_id);
}
let room_cache = self.get_room_cache_mut(&room_id);
if room_cache.is_some() {
let rc = room_cache.unwrap();
rc.count -= 1;
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
} else if room_cache.is_none() && need_add_cache {
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = now_count as u8;
self.room_cache.push(rc);
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
info!(
"玩家离开房间匹配房间,满足条件,将放进重新放入匹配队列,room_id:{}",
room_id
);
}
Ok(room_id)
}
///删除房间
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
self.remove_room_cache(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
impl MatchRoom {
pub fn get_room_cache_mut(&mut self, room_id: &u32) -> Option<&mut RoomCache> {
let res = self.room_cache.iter_mut().find(|x| x.room_id == *room_id);
res
}
///删除缓存房间
pub fn remove_room_cache(&mut self, room_id: &u32) {
let mut index = -1_isize;
for i in self.room_cache.iter() {
index += 1;
if i.room_id != *room_id {
continue;
}
break;
}
if index < 0 {
return;
}
self.room_cache.remove(index as usize);
}
///快速加入
pub fn quickly_start(
&mut self,
member: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let room_id: u32;
let user_id = member.user_id;
//如果房间缓存里没有,则创建新房间
if self.room_cache.is_empty() {
//校验地图配置
let room_tmp_ref: &TileMapTempMgr = TEMPLATES.get_tile_map_temp_mgr_ref();
if room_tmp_ref.is_empty() {
anyhow::bail!("TileMapTempMgr is None")
}
//创建房间
room_id = self.create_room(BattleType::OneVOneVOneVOne, member, sender, task_sender)?;
info!("创建匹配房间,room_id:{},user_id:{}", room_id, user_id);
} else {
//如果有,则往房间里塞
room_id = self.get_room_cache_last_room_id()?;
//将成员加进房间
let room_mut = self.get_mut_room_by_room_id(&room_id)?;
if room_mut.get_member_count() >= MEMBER_MAX as usize {
anyhow::bail!("room is None,room_id:{}", room_id)
}
//将成员加入到房间中
room_mut.add_member(member)?;
//解决房间队列缓存
let room_cache_array: &mut Vec<RoomCache> = self.room_cache.as_mut();
let room_cache = room_cache_array.last_mut().unwrap();
//cache人数加1
room_cache.count += 1;
//如果人满里,则从缓存房间列表中弹出
if room_cache.count >= MEMBER_MAX {
room_cache_array.pop();
info!("匹配房人满,将房间从匹配队列移除!room_id:{}", room_id);
//创建延迟任务,并发送给定时器接收方执行
let mut task = Task::default();
let time_limit = TEMPLATES
.get_constant_temp_mgr_ref()
.temps
.get("kick_not_prepare_time");
if let Some(time) = time_limit {
let time = u64::from_str(time.value.as_str())?;
task.delay = time + 500;
} else {
task.delay = 60000_u64;
warn!("the Constant kick_not_prepare_time is None!pls check!");
}
task.cmd = TaskCmd::MatchRoomStart as u16;
let mut map = Map::new();
map.insert(
"battle_type".to_owned(),
Value::from(self.battle_type.into_u8()),
);
map.insert("room_id".to_owned(), Value::from(room_id));
task.data = Value::from(map);
let res = task_sender.send(task);
if let Err(e) = res {
error!("{:?}", e);
}
}
//重新排序
room_cache_array.par_sort_by(|a, b| b.count.cmp(&a.count));
}
Ok(room_id)
}
fn get_room_cache_last_room_id(&self) -> anyhow::Result<u32> {
let room_cache = self.room_cache.last();
if room_cache.is_none() {
let str = "room_cache is empty!".to_owned();
error!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let room_id = room_cache.unwrap().room_id;
Ok(room_id)
}
}
| _id);
}
}
///离开房间,离线也好,主动离开也好
pub fn | conditional_block |
Console.py | #!/usr/bin/env python
# coding=utf-8
'''
Created on Jun 18, 2012
@author: gf
'''
import wx
import os
import subprocess
import threading
import base64
import urllib2
import Setting
import Session
import Util
import VM
import Logger
import consoleInfo
import havclient
import MainFrame
import Wfile
import Main
from SendRequests import SpicePorxyRequests,RestartDeviceRequests,SaveVmLoginInfo
from Setting import AdminShadow,FirstUser
#from ovirtsdk.xml.params import Action, Ticket
#IP_TRANSLATE_FILE = os.curdir + '/ip.conf'
IP_TRANSLATE_TABLE = {}
SPICE_ERR_TABLE = (
u"成功",
u"错误",
u"获取服务器地址失败",
u"连接失败\n\n可能的原因:\n1.网络故障,请检查网络。\n2.其他用户登录到这台桌面云。\n3.桌面云出现错误。",
u"套接字失败",
u"发送失败",
u"接收失败",
u"SSL过程失败",
u"内存不足",
u"代理超时",
u"代理错误",
u"版本匹配失败",
u"权限不足",
u"无效的参数",
u"命令行错误",
)
CA_DOWNLOAD_CACHE = []
RDP_PORT = 3389
def loadIPConfig():
if not os.access(IP_TRANSLATE_FILE, os.F_OK):
with open(IP_TRANSLATE_FILE, "w") as file:
file.write('# IP Translate File\n')
file.write('# Source IP Destination IP\n')
file.write('# \n')
file.write('# eg.\n')
file.write('# 10.10.10.1 100.10.10.1\n')
return
with open(IP_TRANSLATE_FILE, "r") as file:
lines = file.readlines()
for line in lines:
# skip comment line
if line.startswith('#') :
continue
splits = line.strip().split()
if len(splits) >= 2:
IP_TRANSLATE_TABLE[splits[0]] = splits[1]
class LaunchThread(threading.Thread):
def __init__(self, p_id, vm, Type, window):
threading.Thread.__init__(self)
self.window = window
self.p_id = p_id
self.vm = vm
self.Type = Type
self.cancel = False
self.ret = -1
self.msg = ""
def get_argument(self):
try:
control = havclient.get_control(AdminShadow['admins'], self.vm, self.p_id)
except :
Logger.error("Get control failed!")
try:
usb = control.get('usb', None)
Logger.info("The %s USB_policy was allowde by server." , self.vm.name)
except :
usb = False
Logger.error('The usb_policy has not been provided!')
try:
broadcast = control.get("allow_screen_broadcast", None)
Logger.info("The %s was allowed screen_broadcast by server", self.vm.name)
except:
Logger.error("The %s was not allowed screen_broadcast by server", self.vm.name)
if usb:
arg_usb = " --spice-usbredir-auto-redirect-filter='-1,-1,-1-1,1' --spice-usbredir-redirect-on-connect='-1,-1,-1,-1,1' "
else:
arg_usb = "--spice-disable-usbredir"
if broadcast:
arg_bro = " --teacher "
else:
arg_bro = ""
try:
if len(Main.get_device_connected()) == 1:
arg_screen = ""
else :
w = os.popen("ps -ef | grep remote*", 'r')
ww = w.readlines()
w.close()
if len(ww) == 2:
arg_screen = "--extend"
else:
arg_screen = ""
except Exception as e:
Logger.error("extend screen failed: %s",e)
'''
try:
spice_secure = havclient.get_spice_secure(AdminShadow['admins'], self.vm, self.p_id)
if spice_secure:
try:
filename = '/tmp/ca-cert.pem'
addrip= Setting.getServer()
url = ''.join(["http://", addrip, ":5009/ca-cert.pem"])
info = urllib2.urlopen(url, timeout = 1)
f = open(filename, 'w')
while True:
buf = info.read(4096)
f.write(buf)
if buf == '':
break
Logger.info("Download ca-cert.pem succeed.")
finally:
info.close()
f.close()
arg_sec = '--spice-ca-file=/tmp/ca-cert.pem --spice-secure-channels=main,inputs --spice-host-subject="C=IL, L=Raanana, O=Red Hat, CN=my server"'
else:
arg_sec = ""
except Exception as e:
Logger.error('Get spice secure failed: %s' % str(e))
'''
arg = ' '.join([arg_usb, arg_bro, arg_screen])
return arg
def get_rdpsetting(self):
if Setting.getAllow_device().lower() == 'true':
arg_device = '/drive:USB,/run/media/root '
else:
arg_device = ''
#if Setting.getAuto_connect().lower() == 'true':
# arg_auto = '+auto-reconnect '
#else:
# arg_auto = ''
#if Setting.getHeadset_micro().lower() == 'true':
# arg_head_mic = '/sound:sys:pulse /microphone:sys:pulse '
#else:
# arg_head_mic = ''
if Setting.getRemotefx().lower() == 'true':
arg_rem = '/codec-cache:jpeg /gfx '
else:
arg_rem = ''
if Setting.getPublic().lower() == 'true':
arg_net = '/network:wan '
else:
arg_net = '/network:lan '
arg = ' '.join([arg_device, arg_rem, arg_net])
return arg
def stop(self):
self.cancel = True
def run(self):
import pdb
#pdb.set_trace()
try:
count = 0
win = self.window
wx.CallAfter(win.Update, 1, u'更新桌面云状态...')
if self.cancel:
return
if self.Type == 'JSP' or self.Type == 'UNKNOWN':
#import pdb
#pdb.set_trace()
vminfo = havclient.server_port(AdminShadow['admins'], self.p_id, self.vm, search_opts=None, all_tenants=None)
passwd = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#vmcipher = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#passwd = vmcipher.cipher
vmconsole = consoleInfo.ConsoleInfo(vminfo['console'])
host = vmconsole.host
port = vmconsole.port
dsport = vmconsole.tlsport
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s", public_ip)
dport=port
if public_ip == "false":
dhost=Wfile.Parsing_hosts(host)
try:
os.environ["SPICE_PROXY"] = ""
except:
Logger.error("Configure the environment variables failed!")
pass
elif public_ip == "true":
dhost=host
proxy_status, proxy_port = SpicePorxyRequests()
if proxy_status:
try:
os.environ["SPICE_PROXY"] = "http://%s:%s" % (Setting.getServer(),proxy_port)
except:
Logger.error("Configure the environment variables failed!")
else:
Util.MessageBox(self, '连接失败!\n\n系统管理员未设置公网代理!', u'错误', wx.OK | wx.ICON_ERROR)
return
wx.CallAfter(win.WorkFinished, u'获取桌面云信息... 成功')
wx.CallAfter(win.Update, 1, u'打开桌面云...')
argument = self.get_argument()
#cmdline = 'scdaemon spice://%s/?port=%s\&tls-port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,dsport,passwd,self.vm.name) + argument
cmdline = 'scdaemon spice://%s/?port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,passwd,self.vm.name) + argument
Logger.info("Spice Cmd:%s",cmdline)
elif self.Type == 'RDP':
user = Setting.getUser()
Logger.info("The user of VM is %s",user)
password = Setting.getCipher()
Logger.info("The password of user is %s",password)
ip = havclient.get_vm_ip(self.vm)
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s" , public_ip)
argu = self.get_rdpsetting()
if public_ip == 'false':
ipaddress = ip
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
elif public_ip == 'true':
ip, port = Wfile.getmapping_prot(ip, '3389')
ipaddress = ip + ':' + port
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
Logger.info("Rdp Cmd:%s",cmdline)
body={}
vmlogintime = Setting.getVMLoginTime()
clientip = Setting.getClientIP()
body['client_time'] = vmlogintime
body['client_ip'] = clientip
# try:
# havclient.connect_client_info(AdminShadow['admins'], self.p_id, self.vm.id, body)
# except:
# Logger.error("Send vmlogininfo failed!")
Logger.debug(cmdline)
ret = Util.RunConnectGuestWithLog(cmdline)
if self.cancel:
return
wx.CallAfter(win.WorkFinished, u'打开桌面云... 成功')
wx.CallAfter(win.Finish)
name=self.vm.name
desktop=name.encode('utf-8')
Setting.setDesktop(desktop)
Setting.save()
RestartDeviceRequests()
SaveVmLoginInfo()
self.stop()
self.ret = ret
except Exception, e:
self.ret = 1
self.msg = "%s : %s" %(SPICE_ERR_TABLE[1], e)
Logger.debug("Console Thread Ended!")
#loadIPConfig()
if __name__ == '__main__':
app = wx.PySimpleApp()
auth_url = 'http://192.168.8.150:5000/v2.0'
user = 'admin'
password = '123'
Session.login(auth_url, user, password, tenant=None, otp=None)
vms = havclient.vm_list(Session.User, search_opts=None, all_tenants=None)
vm = havclient.server_port(Session.User, vms[0], search_opts=None, all_tenants=None)
print '**************8 %s ' % vms
class Test(object):
def Update(self, value, msg):
print value, msg
def WorkFinished(self, msg):
print msg
def Finish(self):
pass
dlg = Test()
thread = LaunchThread(vm, dlg)
thread.start()
app.MainLoop()
thread.join()
for key in IP_TRANSLATE_TABLE.keys():
print key, '->', IP_TRANSLATE_TABLE[key]
| identifier_body | ||
Console.py | #!/usr/bin/env python
# coding=utf-8
'''
Created on Jun 18, 2012
@author: gf
'''
import wx
import os
import subprocess
import threading
import base64
import urllib2
import Setting
import Session
import Util
import VM
import Logger
import consoleInfo
import havclient
import MainFrame
import Wfile
import Main
from SendRequests import SpicePorxyRequests,RestartDeviceRequests,SaveVmLoginInfo
from Setting import AdminShadow,FirstUser
#from ovirtsdk.xml.params import Action, Ticket
#IP_TRANSLATE_FILE = os.curdir + '/ip.conf'
IP_TRANSLATE_TABLE = {}
SPICE_ERR_TABLE = (
u"成功",
u"错误",
u"获取服务器地址失败",
u"连接失败\n\n可能的原因:\n1.网络故障,请检查网络。\n2.其他用户登录到这台桌面云。\n3.桌面云出现错误。",
u"套接字失败",
u"发送失败",
u"接收失败",
u"SSL过程失败",
u"内存不足",
u"代理超时",
u"代理错误",
u"版本匹配失败",
u"权限不足",
u"无效的参数",
u"命令行错误",
)
CA_DOWNLOAD_CACHE = []
RDP_PORT = 3389
def loadIPConfig():
if not os.access(IP_TRANSLATE_FILE, os.F_OK):
with open(IP_TRANSLATE_FILE, "w") as file:
file.write('# IP Translate File\n')
file.write('# Source IP Destination IP\n')
file.write('# \n')
file.write('# eg.\n')
file.write('# 10.10.10.1 100.10.10.1\n')
return
with open(IP_TRANSLATE_FILE, "r") as file:
lines = file.readlines()
for line in lines:
# skip comment line
if line.startswith('#') :
continue
splits = line.strip().split()
if len(splits) >= 2:
IP_TRANSLATE_TABLE[splits[0]] = splits[1]
class LaunchThread(threading.Thread):
def __init__(self, p_id, vm, Type, window):
threading.Thread.__init__(self)
self.window = window
self.p_id = p_id
self.vm = vm
self.Type = Type
self.cancel = False
self.ret = -1
self.msg = ""
def get_argument(self):
try:
control = havclient.get_control(AdminShadow['admins'], self.vm, self.p_id)
except :
Logger.error("Get control failed!")
try:
usb = control.get('usb', None)
Logger.info("The %s USB_policy was allowde by server." , self.vm.name)
except :
usb = False
Logger.error('The usb_policy has not been provided!')
try:
broadcast = control.get("allow_screen_broadcast", None)
Logger.info("The %s was allowed screen_broadcast by server", self.vm.name)
except:
Logger.error("The %s was not allowed screen_broadcast by server", self.vm.name)
if usb:
arg_usb = " --spice-usbredir-auto-redirect-filter='-1,-1,-1-1,1' --spice-usbredir-redirect-on-connect='-1,-1,-1,-1,1' "
else:
arg_usb = "--spice-disable-usbredir"
if broadcast:
arg_bro = " --teacher "
else:
arg_bro = ""
try:
if len(Main.get_device_connected()) == 1:
arg_screen = ""
else :
w = os.popen("ps -ef | grep remote*", 'r')
ww = w.readlines()
w.close()
if len(ww) == 2:
arg_screen = "--extend"
else:
arg_screen = ""
except Exception as e:
Logger.error("extend screen failed: %s",e)
'''
try:
spice_secure = havclient.get_spice_secure(AdminShadow['admins'], self.vm, self.p_id)
if spice_secure:
try:
filename = '/tmp/ca-cert.pem'
addrip= Setting.getServer()
url = ''.join(["http://", addrip, ":5009/ca-cert.pem"])
info = urllib2.urlopen(url, timeout = 1)
f = open(filename, 'w')
while True:
buf = info.read(4096)
f.write(buf)
if buf == '':
break
Logger.info("Download ca-cert.pem succeed.")
finally:
info.close()
f.close()
arg_sec = '--spice-ca-file=/tmp/ca-cert.pem --spice-secure-channels=main,inputs --spice-host-subject="C=IL, L=Raanana, O=Red Hat, CN=my server"'
else:
arg_sec = ""
except Exception as e:
Logger.error('Get spice secure failed: %s' % str(e))
'''
arg = ' '.join([arg_usb, arg_bro, arg_screen])
return arg
def get_rdpsetting(self):
if Setting.getAllow_device().lower() == 'true':
arg_device = '/drive:USB,/run/media/root '
else:
arg_device = ''
#if Setting.getAuto_connect().lower() == 'true':
# arg_auto = '+auto-reconnect '
#else:
# arg_auto = ''
#if Setting.getHeadset_micro().lower() == 'true':
# arg_head_mic = '/sound:sys:pulse /microphone:sys:pulse '
#else:
# arg_head_mic = ''
if Setting.getRemotefx().lower() == 'true':
arg_rem = '/codec-cache:jpeg /gfx '
else:
arg_rem = ''
if Setting.getPublic().lower() == 'true':
arg_net = '/network:wan '
else:
arg_net = '/network:lan '
arg = ' '.join([arg_device, arg_rem, arg_net])
return arg
def stop(self):
self.cancel = True
def run(self):
import pdb
#pdb.set_trace()
try:
count = 0
win = self.window
wx.CallAfter(win.Update, 1, u'更新桌面云状态...')
if self.cancel:
return
if self.Type == 'JSP' or self.Type == 'UNKNOWN':
#import pdb
#pdb.set_trace()
vminfo = havclient.server_port(AdminShadow['admins'], self.p_id, self.vm, search_opts=None, all_tenants=None)
passwd = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#vmcipher = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#passwd = vmcipher.cipher
vmconsole = consoleInfo.ConsoleInfo(vminfo['console'])
host = vmconsole.host
port = vmconsole.port
dsport = vmconsole.tlsport
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s", public_ip)
dport=port
if public_ip == "false":
dhost=Wfile.Parsing_hosts(host)
try:
os.environ["SPICE_PROXY"] = ""
except:
Logger.error("Configure the environment variables failed!")
pass
elif public_ip == "true":
dhost=host
proxy_status, proxy_port = SpicePorxyRequests()
if proxy_status:
try:
os.environ["SPICE_PROXY"] = "http://%s:%s" % (Setting.getServer(),proxy_port)
except:
Logger.error("Configure the environment variables failed!")
else:
Util.MessageBox(self, '连接失败!\n\n系统管理员未设置公网代理!', u'错误', wx.OK | wx.ICON_ERROR)
return
wx.CallAfter(win.WorkFinished, u'获取桌面云信息... 成功')
wx.CallAfter(win.Update, 1, u'打开桌面云...')
argument = self.get_argument()
#cmdline = 'scdaemon spice://%s/?port=%s\&tls-port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,dsport,passwd,self.vm.name) + argument
cmdline = 'scdaemon spice://%s/?port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,passwd,self.vm.name) + argument
Logger.info("Spice Cmd:%s",cmdline)
elif self.Type == 'RDP':
user = Setting.getUser()
Logger.info("The user of VM is %s",user)
password = Setting.getCipher()
Logger.info("The password of user is %s",password)
ip = havclient.get_vm_ip(self.vm)
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s" , public_ip)
argu = self.get_rdpsetting()
if public_ip == 'false':
ipaddress = ip
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
elif public_ip == 'true':
ip, port = Wfile.getmapping_prot(ip, '3389')
ipaddress = ip + ':' + port
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
Logger.info("Rdp Cmd:%s",cmdline)
body={}
vmlogintime = Setting.getVMLoginTime()
clientip = Setting.getClientIP()
body['client_time'] = vmlogintime
body['client_ip'] = clientip
# try:
# havclient.connect_client_info(AdminShadow['admins'], self.p_id, self.vm.id, body)
# except:
# Logger.error("Send vmlogininfo failed!")
Logger.debug(cmdline)
ret = Util.RunConnectGuestWithLog(cmdline)
if self.cancel:
return
wx.CallAfter(win.WorkFinished, u'打开桌面云... 成功')
wx.CallAfter(win.Finish)
name=self.vm.name
desktop=name.encode('utf-8')
Setting.setDesktop(desktop)
Setting.save()
RestartDeviceRequests()
SaveVmLoginInfo()
self.stop()
self.ret = ret
except Exception, e:
self.ret = 1
self.msg = "%s : %s" %(SPICE_ERR_TABLE[1], e)
Logger.debug("Console Thread Ended!")
#loadIPConfig()
if __name__ == '__main__':
app = wx.PySimpleApp()
auth_url = 'http://192.168.8.150:5000/v2.0'
user = 'admin'
password = '123'
Session.login(auth_url, user, password, tenant=None, otp=None)
vms = havclient.vm_list(Session.User, search_opts=None, all_tenants=None)
vm = havclient.server_port(Session.User, vms[0], search_opts=None, all_tenants=None)
print '**************8 %s ' % vms
class Test(object):
def Update(self, value, msg):
print value, msg
def WorkFinished(self, msg):
print msg
def Finish(self):
pass
dlg = Test()
thread = LaunchThread(vm, dlg)
thread.start()
app.MainLoop()
thread.join()
for key in IP_TRANSLATE_TABLE.keys():
print key, '->', IP_TRANSLATE_TABLE[key]
| identifier_name | ||
Console.py | #!/usr/bin/env python
# coding=utf-8
'''
Created on Jun 18, 2012
@author: gf
'''
import wx
import os
import subprocess
import threading
import base64
import urllib2
import Setting
import Session
import Util
import VM
import Logger
import consoleInfo
import havclient
import MainFrame
import Wfile
import Main
from SendRequests import SpicePorxyRequests,RestartDeviceRequests,SaveVmLoginInfo
from Setting import AdminShadow,FirstUser
#from ovirtsdk.xml.params import Action, Ticket
#IP_TRANSLATE_FILE = os.curdir + '/ip.conf'
IP_TRANSLATE_TABLE = {}
SPICE_ERR_TABLE = (
u"成功",
u"错误",
u"获取服务器地址失败",
u"连接失败\n\n可能的原因:\n1.网络故障,请检查网络。\n2.其他用户登录到这台桌面云。\n3.桌面云出现错误。",
u"套接字失败",
u"发送失败",
u"接收失败",
u"SSL过程失败",
u"内存不足",
u"代理超时",
u"代理错误",
u"版本匹配失败",
u"权限不足",
u"无效的参数",
u"命令行错误",
)
CA_DOWNLOAD_CACHE = []
RDP_PORT = 3389
def loadIPConfig():
if not os.access(IP_TRANSLATE_FILE, os.F_OK):
with open(IP_TRANSLATE_FILE, "w") as file:
file.write('# IP Translate File\n')
file.write('# Source IP Destination IP\n')
file.write('# \n')
file.write('# eg.\n')
file.write('# 10.10.10.1 100.10.10.1\n')
return
with open(IP_TRANSLATE_FILE, "r") as file:
lines = file.readlines()
for line in lines:
# skip comment line
if line.startswith('#') :
continue
splits = line.strip().split()
if len(splits) >= 2:
IP_TRANSLATE_TABLE[splits[0]] = splits[1]
class LaunchThread(threading.Thread):
def __init__(self, p_id, vm, Type, window):
threading.Thread.__init__(self)
self.window = window
self.p_id = p_id
self.vm = vm
self.Type = Type
self.cancel = False
self.ret = -1
self.msg = ""
def get_argument(self):
try:
control = havclient.get_control(AdminShadow['admins'], self.vm, self.p_id)
except :
Logger.error("Get control failed!")
try:
usb = control.get('usb', None)
Logger.info("The %s USB_policy was allowde by server." , self.vm.name)
except :
usb = False
Logger.error('The usb_policy has not been provided!')
try:
broadcast = control.get("allow_screen_broadcast", None)
Logger.info("The %s was allowed screen_broadcast by server", self.vm.name)
except:
Logger.error("The %s was not allowed screen_broadcast by server", self.vm.name)
if usb:
arg_usb = " --spice-usbredir-auto-redirect-filter='-1,-1,-1-1,1' --spice-usbredir-redirect-on-connect='-1,-1,-1,-1,1' "
else:
arg_usb = "--spice-disable-usbredir"
if broadcast:
arg_bro = " --teacher "
else:
arg_bro = ""
try:
if len(Main.get_device_connected()) == 1:
arg_screen = ""
else :
w = os.popen("ps -ef | grep remote*", 'r')
ww = w.readlines()
w.close()
if len(ww) == 2:
arg_screen = "--extend"
else:
arg_screen = ""
except Exception as e:
Logger.error("extend screen failed: %s",e)
'''
try:
spice_secure = havclient.get_spice_secure(AdminShadow['admins'], self.vm, self.p_id)
if spice_secure:
try:
filename = '/tmp/ca-cert.pem'
addrip= Setting.getServer()
url = ''.join(["http://", addrip, ":5009/ca-cert.pem"])
info = urllib2.urlopen(url, timeout = 1)
f = open(filename, 'w')
while True:
buf = info.read(4096)
f.write(buf)
if buf == '':
break
Logger.info("Download ca-cert.pem succeed.")
finally:
info.close()
f.close()
arg_sec = '--spice-ca-file=/tmp/ca-cert.pem --spice-secure-channels=main,inputs --spice-host-subject="C=IL, L=Raanana, O=Red Hat, CN=my server"'
else:
arg_sec = ""
except Exception as e:
Logger.error('Get spice secure failed: %s' % str(e))
'''
arg = ' '.join([arg_usb, arg_bro, arg_screen])
return arg
def get_rdpsetting(self):
if Setting.getAllow_device().lower() == 'true':
arg_device = '/drive:USB,/run/media/root '
else:
arg_device = ''
#if Setting.getAuto_connect().lower() == 'true':
# arg_auto = '+auto-reconnect '
#else:
# arg_auto = ''
#if Setting.getHeadset_micro().lower() == 'true':
# arg_head_mic = '/sound:sys:pulse /microphone:sys:pulse '
#else:
# arg_head_mic = ''
if Setting.getRemotefx().lower() == 'true':
arg_rem = '/codec-cache:jpeg /gfx '
else:
arg_rem = ''
if Setting.getPublic().lower() == 'true':
arg_net = '/network:wan '
else:
arg_net = '/network:lan '
arg = ' '.join([arg_device, arg_rem, arg_net])
return arg
def stop(self):
self.cancel = True
def run(self):
import pdb
#pdb.set_trace()
try:
count = 0
win = self.window
wx.CallAfter(win.Update, 1, u'更新桌面云状态...')
if self.cancel:
return
if self.Type == 'JSP' or self.Type == 'UNKNOWN':
#import pdb
#pdb.set_trace()
vminfo = havclient.server_port(AdminShadow['admins'], self.p_id, self.vm, search_opts=None, all_tenants=None)
passwd = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#vmcipher = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#passwd = vmcipher.cipher
vmconsole = consoleInfo.ConsoleInfo(vminfo['console'])
host = vmconsole.host
port = vmconsole.port
dsport = vmconsole.tlsport
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s", public_ip)
dport=port
if public_ip == "false":
dhost=Wfile.Parsing_hosts(host)
try:
os.environ["SPICE_PROXY"] = ""
except:
Logger.error("Configure the environment variables failed!")
pass
elif public_ip == "true":
dhost=host
proxy_status, proxy_port = SpicePorxyRequests()
if proxy_status:
try:
os.environ["SPICE_PROXY"] = "http://%s:%s" % (Setting.getServer(),proxy_port)
except:
Logger.error("Configure the environment variables failed!")
else:
Util.MessageBox(self, '连接失败!\n\n系统管理员未设置公网代理!', u'错误', wx.OK | wx.ICON_ERROR)
return
wx.CallAfter(win.WorkFinished, u'获取桌面云信息... 成功')
wx.CallAfter(win.Update, 1, u'打开桌面云...')
argument = self.get_argument()
#cmdline = 'scdaemon spice://%s/?port=%s\&tls-port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,dsport,passwd,self.vm.name) + argument
cmdline = 'scdaemon spice://%s/?port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,passwd,self.vm.name) + argument
Logger.info("Spice Cmd:%s",cmdline)
elif self.Type == 'RDP':
user = Setting.getUser()
Logger.info("The user of VM is %s",user)
password = Setting.getCipher()
Logger.info("The password of user is %s",password)
ip = havclient.get_vm_ip(self.vm)
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s" , public_ip)
argu = self.get_rdpsetting()
if public_ip == 'false':
ipaddress = ip
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
elif public_ip == 'true':
ip, port = Wfile.getmapping_prot(ip, '3389')
ipaddress = ip + ':' + port
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
Logger.info("Rdp Cmd:%s",cmdline)
body={}
vmlogintime = Setting.getVMLoginTime()
clientip = Setting.getClientIP()
body['client_time'] = vmlogintime
body['client_ip'] = clientip
# try:
# havclient.connect_client_info(AdminShadow['admins'], self.p_id, self.vm.id, body)
# except:
# Logger.error("Send vmlogininfo failed!")
Logger.debug(cmdline)
ret = Util.RunConnectGuestWithLog(cmdline)
if self.cancel:
return
wx.CallAfter(win.WorkFinished, u'打开桌面云... 成功')
wx.CallAfter(win.Finish)
name=self.vm.name
desktop=name.encode('utf-8')
Setting.setDesktop(desktop)
Setting.save()
RestartDeviceRequests()
SaveVmLoginInfo()
self.stop()
self.ret = ret
except Exception, e:
self.ret = 1
self.msg = "%s : %s" %(SPICE_ERR_TABLE[1], e)
Logger.debug("Console Thread Ended!")
#loadIPConfig()
if __name__ == '__main__':
app = wx.PySimpleApp()
auth_url = 'http://192.168.8.150:5000/v2.0'
user = 'admin'
password = '123'
Session.login(auth_url, user, password, tenant=None, otp=None)
vms = havclient.vm_list(Session.User, search_opts=None, all_tenants=None)
vm = havclient.server_port(Session.User, vms[0], search_opts=None, all_tenants=None)
print '**************8 %s ' % vms
class Test(object):
def Update(self, value, msg):
print value, msg
def WorkFinished(self, msg):
print msg
def Finish(self):
pass
dlg = Test()
thread = LaunchThread(vm, dlg)
thread.start()
app.MainLoop()
thread.join() |
for key in IP_TRANSLATE_TABLE.keys():
print key, '->', IP_TRANSLATE_TABLE[key] | random_line_split | |
Console.py | #!/usr/bin/env python
# coding=utf-8
'''
Created on Jun 18, 2012
@author: gf
'''
import wx
import os
import subprocess
import threading
import base64
import urllib2
import Setting
import Session
import Util
import VM
import Logger
import consoleInfo
import havclient
import MainFrame
import Wfile
import Main
from SendRequests import SpicePorxyRequests,RestartDeviceRequests,SaveVmLoginInfo
from Setting import AdminShadow,FirstUser
#from ovirtsdk.xml.params import Action, Ticket
#IP_TRANSLATE_FILE = os.curdir + '/ip.conf'
IP_TRANSLATE_TABLE = {}
SPICE_ERR_TABLE = (
u"成功",
u"错误",
u"获取服务器地址失败",
u"连接失败\n\n可能的原因:\n1.网络故障,请检查网络。\n2.其他用户登录到这台桌面云。\n3.桌面云出现错误。",
u"套接字失败",
u"发送失败",
u"接收失败",
u"SSL过程失败",
u"内存不足",
u"代理超时",
u"代理错误",
u"版本匹配失败",
u"权限不足",
u"无效的参数",
u"命令行错误",
)
CA_DOWNLOAD_CACHE = []
RDP_PORT = 3389
def loadIPConfig():
if not os.access(IP_TRANSLATE_FILE, os.F_OK):
with open(IP_TRANSLATE_FILE, "w") as file:
file.write('# IP Translate File\n')
file.write('# Source IP Destination IP\n')
file.write('# \n')
file.write('# eg.\n')
file.write('# 10.10.10.1 100.10.10.1\n')
return
with open(IP_TRANSLATE_FILE, "r") as file:
lines = file.readlines()
for line in lines:
# skip comment line
if line.startswith('#') :
continue
splits = line.strip().split()
if len(splits) >= 2:
IP_TRANSLATE_TABLE[splits[0]] = splits[1]
class LaunchThread(threading.Thread):
def __init__(self, p_id, vm, Type, window):
threading.Thread.__init__(self)
self.window = window
self.p_id = p_id
self.vm = vm
self.Type = Type
self.cancel = False
self.ret = -1
self.msg = ""
def get_argument(self):
try:
control = havclient.get_control(AdminShadow['admins'], self.vm, self.p_id)
except :
Logger.error("Get control failed!")
try:
usb = control.get('usb', None)
Logger.info("The %s USB_policy was allowde by server." , self.vm.name)
except :
usb = False
Logger.error('The usb_policy has not been provided!')
try:
broadcast = control.get("allow_screen_broadcast", None)
Logger.info("The %s was allowed screen_broadcast by server", self.vm.name)
except:
Logger.error("The %s was not allowed screen_broadcast by server", self.vm.name)
if usb:
arg_usb = " --spice-usbredir-auto-redirect-filter='-1,-1,-1-1,1' --spice-usbredir-redirect-on-connect='-1,-1,-1,-1,1' "
else:
arg_usb = "--spice-disable-usbredir"
if broadcast:
arg_bro = " --teacher "
else:
arg_bro = ""
try:
if len(Main.get_device_connected()) == 1:
arg_screen = ""
else :
w = os.popen("ps -ef | grep remote*", 'r')
ww = w.readlines()
w.close()
if len(ww) == 2:
arg_screen = "--extend"
else:
arg_screen = ""
except Exception as e:
Logger.error("extend screen failed: %s",e)
'''
try:
spice_secure = havclient.get_spice_secure(AdminShadow['admins'], self.vm, self.p_id)
if spice_secure:
try:
filename = '/tmp/ca-cert.pem'
addrip= Setting.getServer()
url = ''.join(["http://", addrip, ":5009/ca-cert.pem"])
info = urllib2.urlopen(url, timeout = 1)
f = open(filename, 'w')
while True:
buf = info.read(4096)
f.write(buf)
if buf == '':
break
Logger.info("Download ca-cert.pem succeed.")
finally:
info.close()
f.close()
arg_sec = '--spice-ca-file=/tmp/ca-cert.pem --spice-secure-channels=main,inputs --spice-host-subject="C=IL, L=Raanana, O=Red Hat, CN=my server"'
else:
arg_sec = ""
except Exception as e:
Logger.error('Get spice secure failed: %s' % str(e))
'''
arg = ' '.join([arg_usb, arg_bro, arg_screen])
return arg
def get_rdpsetting(self):
if Setting.getAllow_device().lower() == 'true':
arg_device = '/drive:USB,/run/media/root '
else:
arg_device = ''
#if Setting.getAuto_connect().lower() == 'true':
# arg_auto = '+auto-reconnect '
#else:
# arg_auto = ''
#if Setting.getHeadset_micro().lower() == 'true':
# arg_head_mic = '/sound:sys:pulse /microphone:sys:pulse '
#else:
# arg_head_mic = ''
if Setting.getRemotefx().lower() == 'true':
arg_rem = '/codec-cache:jpeg /gfx '
else:
arg_rem = ''
if Setting.getPublic().lower() == 'true':
arg_net = '/network:wan '
else:
arg_net = '/network:lan '
arg = ' '.join([arg_device, arg_rem, arg_net])
return arg
def stop(self):
self.cancel = True
def run(self):
import pdb
| try:
count = 0
win = self.window
wx.CallAfter(win.Update, 1, u'更新桌面云状态...')
if self.cancel:
return
if self.Type == 'JSP' or self.Type == 'UNKNOWN':
#import pdb
#pdb.set_trace()
vminfo = havclient.server_port(AdminShadow['admins'], self.p_id, self.vm, search_opts=None, all_tenants=None)
passwd = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#vmcipher = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#passwd = vmcipher.cipher
vmconsole = consoleInfo.ConsoleInfo(vminfo['console'])
host = vmconsole.host
port = vmconsole.port
dsport = vmconsole.tlsport
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s", public_ip)
dport=port
if public_ip == "false":
dhost=Wfile.Parsing_hosts(host)
try:
os.environ["SPICE_PROXY"] = ""
except:
Logger.error("Configure the environment variables failed!")
pass
elif public_ip == "true":
dhost=host
proxy_status, proxy_port = SpicePorxyRequests()
if proxy_status:
try:
os.environ["SPICE_PROXY"] = "http://%s:%s" % (Setting.getServer(),proxy_port)
except:
Logger.error("Configure the environment variables failed!")
else:
Util.MessageBox(self, '连接失败!\n\n系统管理员未设置公网代理!', u'错误', wx.OK | wx.ICON_ERROR)
return
wx.CallAfter(win.WorkFinished, u'获取桌面云信息... 成功')
wx.CallAfter(win.Update, 1, u'打开桌面云...')
argument = self.get_argument()
#cmdline = 'scdaemon spice://%s/?port=%s\&tls-port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,dsport,passwd,self.vm.name) + argument
cmdline = 'scdaemon spice://%s/?port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,passwd,self.vm.name) + argument
Logger.info("Spice Cmd:%s",cmdline)
elif self.Type == 'RDP':
user = Setting.getUser()
Logger.info("The user of VM is %s",user)
password = Setting.getCipher()
Logger.info("The password of user is %s",password)
ip = havclient.get_vm_ip(self.vm)
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s" , public_ip)
argu = self.get_rdpsetting()
if public_ip == 'false':
ipaddress = ip
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
elif public_ip == 'true':
ip, port = Wfile.getmapping_prot(ip, '3389')
ipaddress = ip + ':' + port
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
Logger.info("Rdp Cmd:%s",cmdline)
body={}
vmlogintime = Setting.getVMLoginTime()
clientip = Setting.getClientIP()
body['client_time'] = vmlogintime
body['client_ip'] = clientip
# try:
# havclient.connect_client_info(AdminShadow['admins'], self.p_id, self.vm.id, body)
# except:
# Logger.error("Send vmlogininfo failed!")
Logger.debug(cmdline)
ret = Util.RunConnectGuestWithLog(cmdline)
if self.cancel:
return
wx.CallAfter(win.WorkFinished, u'打开桌面云... 成功')
wx.CallAfter(win.Finish)
name=self.vm.name
desktop=name.encode('utf-8')
Setting.setDesktop(desktop)
Setting.save()
RestartDeviceRequests()
SaveVmLoginInfo()
self.stop()
self.ret = ret
except Exception, e:
self.ret = 1
self.msg = "%s : %s" %(SPICE_ERR_TABLE[1], e)
Logger.debug("Console Thread Ended!")
#loadIPConfig()
if __name__ == '__main__':
app = wx.PySimpleApp()
auth_url = 'http://192.168.8.150:5000/v2.0'
user = 'admin'
password = '123'
Session.login(auth_url, user, password, tenant=None, otp=None)
vms = havclient.vm_list(Session.User, search_opts=None, all_tenants=None)
vm = havclient.server_port(Session.User, vms[0], search_opts=None, all_tenants=None)
print '**************8 %s ' % vms
class Test(object):
def Update(self, value, msg):
print value, msg
def WorkFinished(self, msg):
print msg
def Finish(self):
pass
dlg = Test()
thread = LaunchThread(vm, dlg)
thread.start()
app.MainLoop()
thread.join()
for key in IP_TRANSLATE_TABLE.keys():
print key, '->', IP_TRANSLATE_TABLE[key]
| #pdb.set_trace()
| conditional_block |
products.js | let x = 0;
let products = [];
let cartProducts = [];
let listOfTotal = [];
class Product {
constructor(name, price, image, description) {
this.id = x++;
this.name = name;
this.price = price;
this.image = image;
this.description = description;
this.inCart = false;
}
}
class CartItem {
constructor(product, qty) {
this.product = product;
this.qty = qty;
}
}
let p1 = new Product(
"Rolex",
86890,
"<img src='./../img/rolexsilver.jpg'/>",
"Rolex presents the new generation of its Oyster Perpetual watches and brings a new model to the range, the Oyster Perpetual 41, as well as versions of the Oyster Perpetual 36 displaying dials in vivid colours. The light reflections on the case sides highlight the elegant profile of the Oyster case, which is made from Oystersteel. Measuring 41 mm or 36 mm respectively, it is fitted with a domed bezel."
);
let p2 = new Product(
"Gant",
1145,
"<img src='./../img/gantsilver.png'/>",
"Classic Gant Time Mens watch W108411 . Colors > Dial: Black, Strap: Brown, Case: Rosé gold. Very comfortable Calf leather strap. The water resistance is 5 ATM. A scratch proove Mineral glass, hardened protects your watch from involuntary injuries."
);
let p3 = new Product(
"Timex",
1395,
"<img src='./../img/timexbrown.jpg'/>",
"Metropolitan R has all the functionality you need including up to 2 weeks of battery life. This sleek design boasts an impressive AMOLED display that you can customize with over 20+ dial designs. Plus, 24/7 activity and sleep tracking for your health and fitness goals, on-board GPS, optical heart rate sensor, notifications and much more. This is the perfect smartwatch to fit your busy lifestyle. "
);
let p4 = new Product(
"Longines",
10725,
"<img src='./../img/longinessilver.jpg'/>",
"Conquest 24 has a simple yet appealing design. It features a robust package, with a 41 mm wide round steel case (with a screw-in back and a screw-in crown with protection) and a three-link bracelet (with the thickest central row) of the same material. The dial is available in three classic colors: black, silvered and blue; and it is dominated with two Roman numerals, a 24-hour scale and its attached red hand. For improved visibility, hands, numerals and indices are coated with luminescent SuperLuminova material."
);
let p5 = new Product(
"Hunters Race",
2495,
"<img src='./../img/huntersbrown.jpg'/>",
"Hunters Race is a curated collection of the finest timepieces designed in Auckland, New Zealand by father and son team Scott and Jack Ramsay. Scott has over thirty years experience in the watch industry and ensures that their watches are of the highest quality, along with providing a very professional back up service. The watch has a 12 month guarantee and comes in a beautiful gift box along with a spare strap. "
);
let p6 = new Product(
"Tommy Hilfiger",
1595,
"<img src='./../img/tommyblack.jpg'/>",
"Make a premium mark of style on your outfit with a timeless Tommy Hilfiger watch. Whether you're looking for classic leather, smart stainless steel or dressy silver watch, why not indulge in our range of men's and ladies watches and discover luxurious high fashion, sophistication in enduring designs that will inevitably stand the test of time."
);
let p7 = new Product(
"Emporio Armani",
2745,
"<img src='./../img/armanisilver.jpg'/>",
"Emporio Armani, as a brand, is known for its ready-to-wear and runway collections. It focuses on current trends and modern traits."
);
let p8 = new Product(
"Fossil",
1295,
"<img src='./../img/fossilsilver.jpg'/>",
"This watch really does remind me of Omega’s moonwatch. It’s not a direct homage but is certainly very similar in its styling. And really, it’s the appearance that attracted me to this watch. It is a mid-sized, racing chronograph with real retro charm."
);
let p9 = new Product(
"Thomas Sabo",
2625,
"<img src='./../img/thomasblack.jpg'/>",
"Black and silver men's watch: The Statement chronograph from THOMAS SABO convinces with a rebellious design. The dial of the dial, the ring of the case, the crown and the buttons of the case and also the detailed bracelet in stainless steel attract the eye thanks to its pyramid design and typically THOMAS SABO DNA."
);
let p10 = new Product(
"Braun",
2495,
"<img src='./../img/braunblack.jpg'/>",
"A collaborative design project that partners German design with British fashion. Braun and Paul Smith have teamed up on a limited-edition clock and watch project. Paul Smith is one of Britain’s foremost designers, renowned for his creative spirit, which combines tradition and modernity."
);
let p11 = new Product(
"Omega",
16305,
"<img src='./../img/omega.jpg'/>",
" The OMEGA Speedmaster is one of OMEGA’s most iconic timepieces. Having been a part of all six lunar missions, the legendary Speedmaster is an impressive representation of the brand’s adventurous pioneering spirit."
);
let p12 = new Product(
"Oris",
52900,
"<img src='./../img/oris.jpg'/>",
"The Aquis collection of diver’s watches offers real-world functionality in an innovative package, through a combination of considered design, high-performance materials and fit-for-purpose functions."
);
$(function () {
addProduct();
createProduct();
getFromLocalStorage();
updateCartTotalPrice();
notice();
let select = $(".form-control");
$(select).on("change", function (e) {
console.log($(this).val());
if ($(this).val() == "l2h") {
products.sort((a, b) => {
if (a.price > b.price) {
return 1;
}
if (a.price < b.price) {
return -1;
}
return 0;
});
createProduct();
} else if ($(this).val() == "h2l") {
products.reverse();
}
createProduct();
});
$("#buyButton").on("click", function () {
if (cartProducts.length <= 0) {
alert("Shopping cart is empty");
} else {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts.length >= 0) {
window.location.href = "./../html/checkout.html";
}
}
}
});
$("#dialog").dialog({
autoOpen: false,
position: { my: "right top", at: "right top", of: window },
show: {
effect: "blind",
duration: 1000,
},
hide: {
effect: "blind",
duration: 1000,
},
});
$("#opener").on("click", function () {
if (!$("#dialog").dialog("isOpen")) {
$("#dialog").dialog("open");
} else {
$("#dialog").dialog("close");
}
});
});
function addProduct() {
products.push(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12);
}
function createProduct() {
let productlist = document.getElementById("product-container");
productlist.innerHTML = "";
$.each(products, (i, product) => {
let container = $("<div>").addClass("product").attr("id", product.id);
$("<div>").addClass("image").html(product.image).appendTo(container);
$("<h3>").html(product.name).appendTo(container);
$("<p>")
.addClass("price")
.html(product.price + " " + "SEK")
.appendTo(container);
$("<a>")
.attr("href", "javascript:;")
.addClass("show_hide")
.attr("data-content", "toggle-text")
.attr("id", "atag")
.text("Read More")
.appendTo(container);
$("<p>")
.addClass("description")
.html(product.description)
.appendTo(container);
let addToCartButtons = $("<button>Add to Cart</button>")
.addClass("AddToCartButton")
.appendTo(container);
addToCartButtons.on("click", () => {
clickedAddToCart(products[i]);
});
container.appendTo($("#product-container"));
});
$(".description").hide();
$(".show_hide").on("click", function () {
let txt = $(".description").is(":visible") ? "Read More" : "Read Less";
$(".show_hide").text(txt);
$(this).next(".description").slideToggle(200);
});
}
function createShoppingCart() {
let shoppingcart = document.getElementById("shoppingCart-container");
shoppingcart.innerHTML = "";
$.each(cartProducts, (i, cartProduct) => {
let shoppingCartContainer = $("<div>")
.addClass("cartproduct")
.attr("id", cartProducts[i].product.id);
$("<div>")
.addClass("image")
.html(cartProducts[i].product.image)
.appendTo(shoppingCartContainer);
$("<h3>")
.html(cartProducts[i].product.name)
.appendTo(shoppingCartContainer);
$("<p>")
.html(cartProducts[i].product.price + " " + "SEK")
.appendTo(shoppingCartContainer);
let deleteButton = $("<button>Delete</button>")
.addClass("deleteButton")
.appendTo(shoppingCartContainer);
deleteButton.on("click", () => {
deleteCartProduct(cartProducts[i]);
});
let counterdiv = $("<div>")
.addClass("counterdiv")
.appendTo(shoppingCartContainer);
let displayCounter = $("<div>").addClass("counter").appendTo(counterdiv);
$("<p>")
.addClass("activeCount")
.html(cartProducts[i].qty)
.appendTo(displayCounter);
let minus = $("<button>-</button>")
.html('<i class="fas fa-minus-circle"></i>')
.addClass("subbtn")
.on("click", () => {
subtractOneProduct(cartProducts[i]);
});
minus.appendTo(counterdiv);
let add = $("<button>+</button>")
.addClass("addbtn")
.html('<i class="fas fa-plus-circle"></i>')
.on("click", () => {
addOneProduct(cartProducts[i]);
});
add.appendTo(counterdiv);
shoppingCartContainer.appendTo($("#shoppingCart-container"));
});
}
function addOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty++;
createShoppingCart();
}
if (cartProducts[i].qty > 1) {
let tempsum = cartProducts[i].qty * 1;
let total = tempsum * parseInt(cartProducts[i].product.price);
listOfTotal.push(total);
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
} else {
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
}
function subtractOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty--;
}
if (cartProducts[i].qty < 1) {
cartProducts.splice(i, 1);
cartProduct.product.inCart = false;
}
createShoppingCart();
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
function deleteCartProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts.splice(i, 1);
cartProduct.product.inCart = false;
}
createShoppingCart();
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts); | function clickedAddToCart(product) {
for (let i = 0; i < products.length; i++) {
if (product.id === products[i].id) {
if (product.inCart == false) {
cartProducts.push(new CartItem(product, 1));
product.inCart = true;
createShoppingCart();
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
} else if (product.inCart == true) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id === product.id) {
cartProducts[i].qty++;
createShoppingCart();
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
}
}
}
}
}
}
function updateCartTotalPrice() {
let sum = 0;
$.each(cartProducts, (i, cartProduct) => {
sum += cartProducts[i].qty * cartProducts[i].product.price;
});
$("#totalPrice").html("Total Price:" + " " + sum + " " + "SEK");
return sum;
}
function notice() {
let amount = 0;
if (cartProducts.length <= 0) {
let total = 0;
let totalamount = $(".notice");
totalamount.html("");
let noticeAmount = $("<p>").addClass("amount").html(total);
noticeAmount.appendTo(totalamount);
} else {
for (let i = 0; i < cartProducts.length; i++) {
let total = (amount += cartProducts[i].qty);
let totalamount = $(".notice");
totalamount.html("");
let noticeAmount = $("<p>").addClass("amount").html(total);
noticeAmount.appendTo(totalamount);
}
}
}
function addToLocalStorage(cartProducts) {
localStorage.setItem("cartProducts", JSON.stringify(cartProducts));
createShoppingCart(cartProducts);
}
function getFromLocalStorage() {
let cartProductFromLS = localStorage.getItem("cartProducts");
if (cartProductFromLS) {
cartProducts = JSON.parse(cartProductFromLS);
createShoppingCart(cartProducts);
}
} | }
}
| random_line_split |
products.js | let x = 0;
let products = [];
let cartProducts = [];
let listOfTotal = [];
class Product {
constructor(name, price, image, description) {
this.id = x++;
this.name = name;
this.price = price;
this.image = image;
this.description = description;
this.inCart = false;
}
}
class CartItem {
constructor(product, qty) {
this.product = product;
this.qty = qty;
}
}
let p1 = new Product(
"Rolex",
86890,
"<img src='./../img/rolexsilver.jpg'/>",
"Rolex presents the new generation of its Oyster Perpetual watches and brings a new model to the range, the Oyster Perpetual 41, as well as versions of the Oyster Perpetual 36 displaying dials in vivid colours. The light reflections on the case sides highlight the elegant profile of the Oyster case, which is made from Oystersteel. Measuring 41 mm or 36 mm respectively, it is fitted with a domed bezel."
);
let p2 = new Product(
"Gant",
1145,
"<img src='./../img/gantsilver.png'/>",
"Classic Gant Time Mens watch W108411 . Colors > Dial: Black, Strap: Brown, Case: Rosé gold. Very comfortable Calf leather strap. The water resistance is 5 ATM. A scratch proove Mineral glass, hardened protects your watch from involuntary injuries."
);
let p3 = new Product(
"Timex",
1395,
"<img src='./../img/timexbrown.jpg'/>",
"Metropolitan R has all the functionality you need including up to 2 weeks of battery life. This sleek design boasts an impressive AMOLED display that you can customize with over 20+ dial designs. Plus, 24/7 activity and sleep tracking for your health and fitness goals, on-board GPS, optical heart rate sensor, notifications and much more. This is the perfect smartwatch to fit your busy lifestyle. "
);
let p4 = new Product(
"Longines",
10725,
"<img src='./../img/longinessilver.jpg'/>",
"Conquest 24 has a simple yet appealing design. It features a robust package, with a 41 mm wide round steel case (with a screw-in back and a screw-in crown with protection) and a three-link bracelet (with the thickest central row) of the same material. The dial is available in three classic colors: black, silvered and blue; and it is dominated with two Roman numerals, a 24-hour scale and its attached red hand. For improved visibility, hands, numerals and indices are coated with luminescent SuperLuminova material."
);
let p5 = new Product(
"Hunters Race",
2495,
"<img src='./../img/huntersbrown.jpg'/>",
"Hunters Race is a curated collection of the finest timepieces designed in Auckland, New Zealand by father and son team Scott and Jack Ramsay. Scott has over thirty years experience in the watch industry and ensures that their watches are of the highest quality, along with providing a very professional back up service. The watch has a 12 month guarantee and comes in a beautiful gift box along with a spare strap. "
);
let p6 = new Product(
"Tommy Hilfiger",
1595,
"<img src='./../img/tommyblack.jpg'/>",
"Make a premium mark of style on your outfit with a timeless Tommy Hilfiger watch. Whether you're looking for classic leather, smart stainless steel or dressy silver watch, why not indulge in our range of men's and ladies watches and discover luxurious high fashion, sophistication in enduring designs that will inevitably stand the test of time."
);
let p7 = new Product(
"Emporio Armani",
2745,
"<img src='./../img/armanisilver.jpg'/>",
"Emporio Armani, as a brand, is known for its ready-to-wear and runway collections. It focuses on current trends and modern traits."
);
let p8 = new Product(
"Fossil",
1295,
"<img src='./../img/fossilsilver.jpg'/>",
"This watch really does remind me of Omega’s moonwatch. It’s not a direct homage but is certainly very similar in its styling. And really, it’s the appearance that attracted me to this watch. It is a mid-sized, racing chronograph with real retro charm."
);
let p9 = new Product(
"Thomas Sabo",
2625,
"<img src='./../img/thomasblack.jpg'/>",
"Black and silver men's watch: The Statement chronograph from THOMAS SABO convinces with a rebellious design. The dial of the dial, the ring of the case, the crown and the buttons of the case and also the detailed bracelet in stainless steel attract the eye thanks to its pyramid design and typically THOMAS SABO DNA."
);
let p10 = new Product(
"Braun",
2495,
"<img src='./../img/braunblack.jpg'/>",
"A collaborative design project that partners German design with British fashion. Braun and Paul Smith have teamed up on a limited-edition clock and watch project. Paul Smith is one of Britain’s foremost designers, renowned for his creative spirit, which combines tradition and modernity."
);
let p11 = new Product(
"Omega",
16305,
"<img src='./../img/omega.jpg'/>",
" The OMEGA Speedmaster is one of OMEGA’s most iconic timepieces. Having been a part of all six lunar missions, the legendary Speedmaster is an impressive representation of the brand’s adventurous pioneering spirit."
);
let p12 = new Product(
"Oris",
52900,
"<img src='./../img/oris.jpg'/>",
"The Aquis collection of diver’s watches offers real-world functionality in an innovative package, through a combination of considered design, high-performance materials and fit-for-purpose functions."
);
$(function () {
addProduct();
createProduct();
getFromLocalStorage();
updateCartTotalPrice();
notice();
let select = $(".form-control");
$(select).on("change", function (e) {
console.log($(this).val());
if ($(this).val() == "l2h") {
products.sort((a, b) => {
if (a.price > b.price) {
return 1;
}
if (a.price < b.price) {
return -1;
}
return 0;
});
createProduct();
} else if ($(this).val() == "h2l") {
products.reverse();
}
createProduct();
});
$("#buyButton").on("click", function () {
if (cartProducts.length <= 0) {
alert("Shopping cart is empty");
} else {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts.length >= 0) {
window.location.href = "./../html/checkout.html";
}
}
}
});
$("#dialog").dialog({
autoOpen: false,
position: { my: "right top", at: "right top", of: window },
show: {
effect: "blind",
duration: 1000,
},
hide: {
effect: "blind",
duration: 1000,
},
});
$("#opener").on("click", function () {
if (!$("#dialog").dialog("isOpen")) {
$("#dialog").dialog("open");
} else {
$("#dialog").dialog("close");
}
});
});
function addProduct() {
products.push(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12);
}
function createProduct() {
let product | teShoppingCart() {
let shoppingcart = document.getElementById("shoppingCart-container");
shoppingcart.innerHTML = "";
$.each(cartProducts, (i, cartProduct) => {
let shoppingCartContainer = $("<div>")
.addClass("cartproduct")
.attr("id", cartProducts[i].product.id);
$("<div>")
.addClass("image")
.html(cartProducts[i].product.image)
.appendTo(shoppingCartContainer);
$("<h3>")
.html(cartProducts[i].product.name)
.appendTo(shoppingCartContainer);
$("<p>")
.html(cartProducts[i].product.price + " " + "SEK")
.appendTo(shoppingCartContainer);
let deleteButton = $("<button>Delete</button>")
.addClass("deleteButton")
.appendTo(shoppingCartContainer);
deleteButton.on("click", () => {
deleteCartProduct(cartProducts[i]);
});
let counterdiv = $("<div>")
.addClass("counterdiv")
.appendTo(shoppingCartContainer);
let displayCounter = $("<div>").addClass("counter").appendTo(counterdiv);
$("<p>")
.addClass("activeCount")
.html(cartProducts[i].qty)
.appendTo(displayCounter);
let minus = $("<button>-</button>")
.html('<i class="fas fa-minus-circle"></i>')
.addClass("subbtn")
.on("click", () => {
subtractOneProduct(cartProducts[i]);
});
minus.appendTo(counterdiv);
let add = $("<button>+</button>")
.addClass("addbtn")
.html('<i class="fas fa-plus-circle"></i>')
.on("click", () => {
addOneProduct(cartProducts[i]);
});
add.appendTo(counterdiv);
shoppingCartContainer.appendTo($("#shoppingCart-container"));
});
}
function addOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty++;
createShoppingCart();
}
if (cartProducts[i].qty > 1) {
let tempsum = cartProducts[i].qty * 1;
let total = tempsum * parseInt(cartProducts[i].product.price);
listOfTotal.push(total);
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
} else {
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
}
function subtractOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty--;
}
if (cartProducts[i].qty < 1) {
cartProducts.splice(i, 1);
cartProduct.product.inCart = false;
}
createShoppingCart();
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
function deleteCartProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts.splice(i, 1);
cartProduct.product.inCart = false;
}
createShoppingCart();
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
function clickedAddToCart(product) {
for (let i = 0; i < products.length; i++) {
if (product.id === products[i].id) {
if (product.inCart == false) {
cartProducts.push(new CartItem(product, 1));
product.inCart = true;
createShoppingCart();
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
} else if (product.inCart == true) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id === product.id) {
cartProducts[i].qty++;
createShoppingCart();
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
}
}
}
}
}
}
function updateCartTotalPrice() {
let sum = 0;
$.each(cartProducts, (i, cartProduct) => {
sum += cartProducts[i].qty * cartProducts[i].product.price;
});
$("#totalPrice").html("Total Price:" + " " + sum + " " + "SEK");
return sum;
}
function notice() {
let amount = 0;
if (cartProducts.length <= 0) {
let total = 0;
let totalamount = $(".notice");
totalamount.html("");
let noticeAmount = $("<p>").addClass("amount").html(total);
noticeAmount.appendTo(totalamount);
} else {
for (let i = 0; i < cartProducts.length; i++) {
let total = (amount += cartProducts[i].qty);
let totalamount = $(".notice");
totalamount.html("");
let noticeAmount = $("<p>").addClass("amount").html(total);
noticeAmount.appendTo(totalamount);
}
}
}
function addToLocalStorage(cartProducts) {
localStorage.setItem("cartProducts", JSON.stringify(cartProducts));
createShoppingCart(cartProducts);
}
function getFromLocalStorage() {
let cartProductFromLS = localStorage.getItem("cartProducts");
if (cartProductFromLS) {
cartProducts = JSON.parse(cartProductFromLS);
createShoppingCart(cartProducts);
}
}
| list = document.getElementById("product-container");
productlist.innerHTML = "";
$.each(products, (i, product) => {
let container = $("<div>").addClass("product").attr("id", product.id);
$("<div>").addClass("image").html(product.image).appendTo(container);
$("<h3>").html(product.name).appendTo(container);
$("<p>")
.addClass("price")
.html(product.price + " " + "SEK")
.appendTo(container);
$("<a>")
.attr("href", "javascript:;")
.addClass("show_hide")
.attr("data-content", "toggle-text")
.attr("id", "atag")
.text("Read More")
.appendTo(container);
$("<p>")
.addClass("description")
.html(product.description)
.appendTo(container);
let addToCartButtons = $("<button>Add to Cart</button>")
.addClass("AddToCartButton")
.appendTo(container);
addToCartButtons.on("click", () => {
clickedAddToCart(products[i]);
});
container.appendTo($("#product-container"));
});
$(".description").hide();
$(".show_hide").on("click", function () {
let txt = $(".description").is(":visible") ? "Read More" : "Read Less";
$(".show_hide").text(txt);
$(this).next(".description").slideToggle(200);
});
}
function crea | identifier_body |
products.js | let x = 0;
let products = [];
let cartProducts = [];
let listOfTotal = [];
class Product {
constructor(name, price, image, description) {
this.id = x++;
this.name = name;
this.price = price;
this.image = image;
this.description = description;
this.inCart = false;
}
}
class CartItem {
constructor(product, qty) {
this.product = product;
this.qty = qty;
}
}
let p1 = new Product(
"Rolex",
86890,
"<img src='./../img/rolexsilver.jpg'/>",
"Rolex presents the new generation of its Oyster Perpetual watches and brings a new model to the range, the Oyster Perpetual 41, as well as versions of the Oyster Perpetual 36 displaying dials in vivid colours. The light reflections on the case sides highlight the elegant profile of the Oyster case, which is made from Oystersteel. Measuring 41 mm or 36 mm respectively, it is fitted with a domed bezel."
);
let p2 = new Product(
"Gant",
1145,
"<img src='./../img/gantsilver.png'/>",
"Classic Gant Time Mens watch W108411 . Colors > Dial: Black, Strap: Brown, Case: Rosé gold. Very comfortable Calf leather strap. The water resistance is 5 ATM. A scratch proove Mineral glass, hardened protects your watch from involuntary injuries."
);
let p3 = new Product(
"Timex",
1395,
"<img src='./../img/timexbrown.jpg'/>",
"Metropolitan R has all the functionality you need including up to 2 weeks of battery life. This sleek design boasts an impressive AMOLED display that you can customize with over 20+ dial designs. Plus, 24/7 activity and sleep tracking for your health and fitness goals, on-board GPS, optical heart rate sensor, notifications and much more. This is the perfect smartwatch to fit your busy lifestyle. "
);
let p4 = new Product(
"Longines",
10725,
"<img src='./../img/longinessilver.jpg'/>",
"Conquest 24 has a simple yet appealing design. It features a robust package, with a 41 mm wide round steel case (with a screw-in back and a screw-in crown with protection) and a three-link bracelet (with the thickest central row) of the same material. The dial is available in three classic colors: black, silvered and blue; and it is dominated with two Roman numerals, a 24-hour scale and its attached red hand. For improved visibility, hands, numerals and indices are coated with luminescent SuperLuminova material."
);
let p5 = new Product(
"Hunters Race",
2495,
"<img src='./../img/huntersbrown.jpg'/>",
"Hunters Race is a curated collection of the finest timepieces designed in Auckland, New Zealand by father and son team Scott and Jack Ramsay. Scott has over thirty years experience in the watch industry and ensures that their watches are of the highest quality, along with providing a very professional back up service. The watch has a 12 month guarantee and comes in a beautiful gift box along with a spare strap. "
);
let p6 = new Product(
"Tommy Hilfiger",
1595,
"<img src='./../img/tommyblack.jpg'/>",
"Make a premium mark of style on your outfit with a timeless Tommy Hilfiger watch. Whether you're looking for classic leather, smart stainless steel or dressy silver watch, why not indulge in our range of men's and ladies watches and discover luxurious high fashion, sophistication in enduring designs that will inevitably stand the test of time."
);
let p7 = new Product(
"Emporio Armani",
2745,
"<img src='./../img/armanisilver.jpg'/>",
"Emporio Armani, as a brand, is known for its ready-to-wear and runway collections. It focuses on current trends and modern traits."
);
let p8 = new Product(
"Fossil",
1295,
"<img src='./../img/fossilsilver.jpg'/>",
"This watch really does remind me of Omega’s moonwatch. It’s not a direct homage but is certainly very similar in its styling. And really, it’s the appearance that attracted me to this watch. It is a mid-sized, racing chronograph with real retro charm."
);
let p9 = new Product(
"Thomas Sabo",
2625,
"<img src='./../img/thomasblack.jpg'/>",
"Black and silver men's watch: The Statement chronograph from THOMAS SABO convinces with a rebellious design. The dial of the dial, the ring of the case, the crown and the buttons of the case and also the detailed bracelet in stainless steel attract the eye thanks to its pyramid design and typically THOMAS SABO DNA."
);
let p10 = new Product(
"Braun",
2495,
"<img src='./../img/braunblack.jpg'/>",
"A collaborative design project that partners German design with British fashion. Braun and Paul Smith have teamed up on a limited-edition clock and watch project. Paul Smith is one of Britain’s foremost designers, renowned for his creative spirit, which combines tradition and modernity."
);
let p11 = new Product(
"Omega",
16305,
"<img src='./../img/omega.jpg'/>",
" The OMEGA Speedmaster is one of OMEGA’s most iconic timepieces. Having been a part of all six lunar missions, the legendary Speedmaster is an impressive representation of the brand’s adventurous pioneering spirit."
);
let p12 = new Product(
"Oris",
52900,
"<img src='./../img/oris.jpg'/>",
"The Aquis collection of diver’s watches offers real-world functionality in an innovative package, through a combination of considered design, high-performance materials and fit-for-purpose functions."
);
$(function () {
addProduct();
createProduct();
getFromLocalStorage();
updateCartTotalPrice();
notice();
let select = $(".form-control");
$(select).on("change", function (e) {
console.log($(this).val());
if ($(this).val() == "l2h") {
products.sort((a, b) => {
if (a.price > b.price) {
return 1;
}
if (a.price < b.price) {
return -1;
}
return 0;
});
createProduct();
} else if ($(this).val() == "h2l") {
products.reverse();
}
createProduct();
});
$("#buyButton").on("click", function () {
if (cartProducts.length <= 0) {
alert("Shopping cart is empty");
} else {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts.length >= 0) {
window.location.href = "./../html/checkout.html";
}
}
}
});
$("#dialog").dialog({
autoOpen: false,
position: { my: "right top", at: "right top", of: window },
show: {
effect: "blind",
duration: 1000,
},
hide: {
effect: "blind",
duration: 1000,
},
});
$("#opener").on("click", function () {
if (!$("#dialog").dialog("isOpen")) {
$("#dialog").dialog("open");
} else {
$("#dialog").dialog("close");
}
});
});
function addProduct() {
products.push(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12);
}
function createProduct() {
let productlist = document.getElementById("product-container");
productlist.innerHTML = "";
$.each(products, (i, product) => {
let container = $("<div>").addClass("product").attr("id", product.id);
$("<div>").addClass("image").html(product.image).appendTo(container);
$("<h3>").html(product.name).appendTo(container);
$("<p>")
.addClass("price")
.html(product.price + " " + "SEK")
.appendTo(container);
$("<a>")
.attr("href", "javascript:;")
.addClass("show_hide")
.attr("data-content", "toggle-text")
.attr("id", "atag")
.text("Read More")
.appendTo(container);
$("<p>")
.addClass("description")
.html(product.description)
.appendTo(container);
let addToCartButtons = $("<button>Add to Cart</button>")
.addClass("AddToCartButton")
.appendTo(container);
addToCartButtons.on("click", () => {
clickedAddToCart(products[i]);
});
container.appendTo($("#product-container"));
});
$(".description").hide();
$(".show_hide").on("click", function () {
let txt = $(".description").is(":visible") ? "Read More" : "Read Less";
$(".show_hide").text(txt);
$(this).next(".description").slideToggle(200);
});
}
function createShoppingCart() {
let shoppingcart = document.getElementById("shoppingCart-container");
shoppingcart.innerHTML = "";
$.each(cartProducts, (i, cartProduct) => {
let shoppingCartContainer = $("<div>")
.addClass("cartproduct")
.attr("id", cartProducts[i].product.id);
$("<div>")
.addClass("image")
.html(cartProducts[i].product.image)
.appendTo(shoppingCartContainer);
$("<h3>")
.html(cartProducts[i].product.name)
.appendTo(shoppingCartContainer);
$("<p>")
.html(cartProducts[i].product.price + " " + "SEK")
.appendTo(shoppingCartContainer);
let deleteButton = $("<button>Delete</button>")
.addClass("deleteButton")
.appendTo(shoppingCartContainer);
deleteButton.on("click", () => {
deleteCartProduct(cartProducts[i]);
});
let counterdiv = $("<div>")
.addClass("counterdiv")
.appendTo(shoppingCartContainer);
let displayCounter = $("<div>").addClass("counter").appendTo(counterdiv);
$("<p>")
.addClass("activeCount")
.html(cartProducts[i].qty)
.appendTo(displayCounter);
let minus = $("<button>-</button>")
.html('<i class="fas fa-minus-circle"></i>')
.addClass("subbtn")
.on("click", () => {
subtractOneProduct(cartProducts[i]);
});
minus.appendTo(counterdiv);
let add = $("<button>+</button>")
.addClass("addbtn")
.html('<i class="fas fa-plus-circle"></i>')
.on("click", () => {
addOneProduct(cartProducts[i]);
});
add.appendTo(counterdiv);
shoppingCartContainer.appendTo($("#shoppingCart-container"));
});
}
function addOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty++;
createShoppingCart();
}
if (cartProducts[i].qty > 1) {
let tempsum = cartProducts[i].qty * 1;
let total = tempsum * parseInt(cartProducts[i].product.price);
listOfTotal.push(total);
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
} else {
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
}
function subtractOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty--;
}
if (cartProducts[i].qty < 1) {
cartProducts.splice(i, 1);
cartProduct.product.inCart = false;
}
createShoppingCart();
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
function deleteCartProdu |
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts.splice(i, 1);
cartProduct.product.inCart = false;
}
createShoppingCart();
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
function clickedAddToCart(product) {
for (let i = 0; i < products.length; i++) {
if (product.id === products[i].id) {
if (product.inCart == false) {
cartProducts.push(new CartItem(product, 1));
product.inCart = true;
createShoppingCart();
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
} else if (product.inCart == true) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id === product.id) {
cartProducts[i].qty++;
createShoppingCart();
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
}
}
}
}
}
}
function updateCartTotalPrice() {
let sum = 0;
$.each(cartProducts, (i, cartProduct) => {
sum += cartProducts[i].qty * cartProducts[i].product.price;
});
$("#totalPrice").html("Total Price:" + " " + sum + " " + "SEK");
return sum;
}
function notice() {
let amount = 0;
if (cartProducts.length <= 0) {
let total = 0;
let totalamount = $(".notice");
totalamount.html("");
let noticeAmount = $("<p>").addClass("amount").html(total);
noticeAmount.appendTo(totalamount);
} else {
for (let i = 0; i < cartProducts.length; i++) {
let total = (amount += cartProducts[i].qty);
let totalamount = $(".notice");
totalamount.html("");
let noticeAmount = $("<p>").addClass("amount").html(total);
noticeAmount.appendTo(totalamount);
}
}
}
function addToLocalStorage(cartProducts) {
localStorage.setItem("cartProducts", JSON.stringify(cartProducts));
createShoppingCart(cartProducts);
}
function getFromLocalStorage() {
let cartProductFromLS = localStorage.getItem("cartProducts");
if (cartProductFromLS) {
cartProducts = JSON.parse(cartProductFromLS);
createShoppingCart(cartProducts);
}
}
| ct(cartProduct) { | identifier_name |
products.js | let x = 0;
let products = [];
let cartProducts = [];
let listOfTotal = [];
class Product {
constructor(name, price, image, description) {
this.id = x++;
this.name = name;
this.price = price;
this.image = image;
this.description = description;
this.inCart = false;
}
}
class CartItem {
constructor(product, qty) {
this.product = product;
this.qty = qty;
}
}
let p1 = new Product(
"Rolex",
86890,
"<img src='./../img/rolexsilver.jpg'/>",
"Rolex presents the new generation of its Oyster Perpetual watches and brings a new model to the range, the Oyster Perpetual 41, as well as versions of the Oyster Perpetual 36 displaying dials in vivid colours. The light reflections on the case sides highlight the elegant profile of the Oyster case, which is made from Oystersteel. Measuring 41 mm or 36 mm respectively, it is fitted with a domed bezel."
);
let p2 = new Product(
"Gant",
1145,
"<img src='./../img/gantsilver.png'/>",
"Classic Gant Time Mens watch W108411 . Colors > Dial: Black, Strap: Brown, Case: Rosé gold. Very comfortable Calf leather strap. The water resistance is 5 ATM. A scratch proove Mineral glass, hardened protects your watch from involuntary injuries."
);
let p3 = new Product(
"Timex",
1395,
"<img src='./../img/timexbrown.jpg'/>",
"Metropolitan R has all the functionality you need including up to 2 weeks of battery life. This sleek design boasts an impressive AMOLED display that you can customize with over 20+ dial designs. Plus, 24/7 activity and sleep tracking for your health and fitness goals, on-board GPS, optical heart rate sensor, notifications and much more. This is the perfect smartwatch to fit your busy lifestyle. "
);
let p4 = new Product(
"Longines",
10725,
"<img src='./../img/longinessilver.jpg'/>",
"Conquest 24 has a simple yet appealing design. It features a robust package, with a 41 mm wide round steel case (with a screw-in back and a screw-in crown with protection) and a three-link bracelet (with the thickest central row) of the same material. The dial is available in three classic colors: black, silvered and blue; and it is dominated with two Roman numerals, a 24-hour scale and its attached red hand. For improved visibility, hands, numerals and indices are coated with luminescent SuperLuminova material."
);
let p5 = new Product(
"Hunters Race",
2495,
"<img src='./../img/huntersbrown.jpg'/>",
"Hunters Race is a curated collection of the finest timepieces designed in Auckland, New Zealand by father and son team Scott and Jack Ramsay. Scott has over thirty years experience in the watch industry and ensures that their watches are of the highest quality, along with providing a very professional back up service. The watch has a 12 month guarantee and comes in a beautiful gift box along with a spare strap. "
);
let p6 = new Product(
"Tommy Hilfiger",
1595,
"<img src='./../img/tommyblack.jpg'/>",
"Make a premium mark of style on your outfit with a timeless Tommy Hilfiger watch. Whether you're looking for classic leather, smart stainless steel or dressy silver watch, why not indulge in our range of men's and ladies watches and discover luxurious high fashion, sophistication in enduring designs that will inevitably stand the test of time."
);
let p7 = new Product(
"Emporio Armani",
2745,
"<img src='./../img/armanisilver.jpg'/>",
"Emporio Armani, as a brand, is known for its ready-to-wear and runway collections. It focuses on current trends and modern traits."
);
let p8 = new Product(
"Fossil",
1295,
"<img src='./../img/fossilsilver.jpg'/>",
"This watch really does remind me of Omega’s moonwatch. It’s not a direct homage but is certainly very similar in its styling. And really, it’s the appearance that attracted me to this watch. It is a mid-sized, racing chronograph with real retro charm."
);
let p9 = new Product(
"Thomas Sabo",
2625,
"<img src='./../img/thomasblack.jpg'/>",
"Black and silver men's watch: The Statement chronograph from THOMAS SABO convinces with a rebellious design. The dial of the dial, the ring of the case, the crown and the buttons of the case and also the detailed bracelet in stainless steel attract the eye thanks to its pyramid design and typically THOMAS SABO DNA."
);
let p10 = new Product(
"Braun",
2495,
"<img src='./../img/braunblack.jpg'/>",
"A collaborative design project that partners German design with British fashion. Braun and Paul Smith have teamed up on a limited-edition clock and watch project. Paul Smith is one of Britain’s foremost designers, renowned for his creative spirit, which combines tradition and modernity."
);
let p11 = new Product(
"Omega",
16305,
"<img src='./../img/omega.jpg'/>",
" The OMEGA Speedmaster is one of OMEGA’s most iconic timepieces. Having been a part of all six lunar missions, the legendary Speedmaster is an impressive representation of the brand’s adventurous pioneering spirit."
);
let p12 = new Product(
"Oris",
52900,
"<img src='./../img/oris.jpg'/>",
"The Aquis collection of diver’s watches offers real-world functionality in an innovative package, through a combination of considered design, high-performance materials and fit-for-purpose functions."
);
$(function () {
addProduct();
createProduct();
getFromLocalStorage();
updateCartTotalPrice();
notice();
let select = $(".form-control");
$(select).on("change", function (e) {
console.log($(this).val());
if ($(this).val() == "l2h") {
products.sort((a, b) => {
if (a.price > b.price) {
return 1;
}
if (a.price < b.price) {
return -1;
}
return 0;
});
createProduct();
} else if ($(this).val() == "h2l") {
products.reverse();
}
createProduct();
});
$("#buyButton").on("click", function () {
if (cartProducts.length <= 0) {
alert("Shopping cart is empty");
} else {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts.length >= 0) {
window.location.href = "./../html/checkout.html";
}
}
}
});
$("#dialog").dialog({
autoOpen: false,
position: { my: "right top", at: "right top", of: window },
show: {
effect: "blind",
duration: 1000,
},
hide: {
effect: "blind",
duration: 1000,
},
});
$("#opener").on("click", function () {
if (!$("#dialog").dialog("isOpen")) {
$("#dialog").dialog("open");
} else {
$("#dialog").dialog("close");
}
});
});
function addProduct() {
products.push(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12);
}
function createProduct() {
let productlist = document.getElementById("product-container");
productlist.innerHTML = "";
$.each(products, (i, product) => {
let container = $("<div>").addClass("product").attr("id", product.id);
$("<div>").addClass("image").html(product.image).appendTo(container);
$("<h3>").html(product.name).appendTo(container);
$("<p>")
.addClass("price")
.html(product.price + " " + "SEK")
.appendTo(container);
$("<a>")
.attr("href", "javascript:;")
.addClass("show_hide")
.attr("data-content", "toggle-text")
.attr("id", "atag")
.text("Read More")
.appendTo(container);
$("<p>")
.addClass("description")
.html(product.description)
.appendTo(container);
let addToCartButtons = $("<button>Add to Cart</button>")
.addClass("AddToCartButton")
.appendTo(container);
addToCartButtons.on("click", () => {
clickedAddToCart(products[i]);
});
container.appendTo($("#product-container"));
});
$(".description").hide();
$(".show_hide").on("click", function () {
let txt = $(".description").is(":visible") ? "Read More" : "Read Less";
$(".show_hide").text(txt);
$(this).next(".description").slideToggle(200);
});
}
function createShoppingCart() {
let shoppingcart = document.getElementById("shoppingCart-container");
shoppingcart.innerHTML = "";
$.each(cartProducts, (i, cartProduct) => {
let shoppingCartContainer = $("<div>")
.addClass("cartproduct")
.attr("id", cartProducts[i].product.id);
$("<div>")
.addClass("image")
.html(cartProducts[i].product.image)
.appendTo(shoppingCartContainer);
$("<h3>")
.html(cartProducts[i].product.name)
.appendTo(shoppingCartContainer);
$("<p>")
.html(cartProducts[i].product.price + " " + "SEK")
.appendTo(shoppingCartContainer);
let deleteButton = $("<button>Delete</button>")
.addClass("deleteButton")
.appendTo(shoppingCartContainer);
deleteButton.on("click", () => {
deleteCartProduct(cartProducts[i]);
});
let counterdiv = $("<div>")
.addClass("counterdiv")
.appendTo(shoppingCartContainer);
let displayCounter = $("<div>").addClass("counter").appendTo(counterdiv);
$("<p>")
.addClass("activeCount")
.html(cartProducts[i].qty)
.appendTo(displayCounter);
let minus = $("<button>-</button>")
.html('<i class="fas fa-minus-circle"></i>')
.addClass("subbtn")
.on("click", () => {
subtractOneProduct(cartProducts[i]);
});
minus.appendTo(counterdiv);
let add = $("<button>+</button>")
.addClass("addbtn")
.html('<i class="fas fa-plus-circle"></i>')
.on("click", () => {
addOneProduct(cartProducts[i]);
});
add.appendTo(counterdiv);
shoppingCartContainer.appendTo($("#shoppingCart-container"));
});
}
function addOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty++;
createShoppingCart();
}
if (cartProducts[i].qty > 1) {
let tempsum = cartProducts[i].qty * 1;
let total = tempsum * parseInt(cartProducts[i].product.price);
listOfTotal.push(total);
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
} else {
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
}
function subtractOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty--;
}
if (cartProducts[i].qty < 1) {
cartPro | pingCart();
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
function deleteCartProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts.splice(i, 1);
cartProduct.product.inCart = false;
}
createShoppingCart();
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
function clickedAddToCart(product) {
for (let i = 0; i < products.length; i++) {
if (product.id === products[i].id) {
if (product.inCart == false) {
cartProducts.push(new CartItem(product, 1));
product.inCart = true;
createShoppingCart();
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
} else if (product.inCart == true) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id === product.id) {
cartProducts[i].qty++;
createShoppingCart();
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
}
}
}
}
}
}
function updateCartTotalPrice() {
let sum = 0;
$.each(cartProducts, (i, cartProduct) => {
sum += cartProducts[i].qty * cartProducts[i].product.price;
});
$("#totalPrice").html("Total Price:" + " " + sum + " " + "SEK");
return sum;
}
function notice() {
let amount = 0;
if (cartProducts.length <= 0) {
let total = 0;
let totalamount = $(".notice");
totalamount.html("");
let noticeAmount = $("<p>").addClass("amount").html(total);
noticeAmount.appendTo(totalamount);
} else {
for (let i = 0; i < cartProducts.length; i++) {
let total = (amount += cartProducts[i].qty);
let totalamount = $(".notice");
totalamount.html("");
let noticeAmount = $("<p>").addClass("amount").html(total);
noticeAmount.appendTo(totalamount);
}
}
}
function addToLocalStorage(cartProducts) {
localStorage.setItem("cartProducts", JSON.stringify(cartProducts));
createShoppingCart(cartProducts);
}
function getFromLocalStorage() {
let cartProductFromLS = localStorage.getItem("cartProducts");
if (cartProductFromLS) {
cartProducts = JSON.parse(cartProductFromLS);
createShoppingCart(cartProducts);
}
}
| ducts.splice(i, 1);
cartProduct.product.inCart = false;
}
createShop | conditional_block |
cli.py | """Command line interface to the OSF
These functions implement the functionality of the command-line interface.
"""
from __future__ import print_function
from functools import wraps
import getpass
import os
import sys
from six.moves import configparser
from six.moves import input
from tqdm import tqdm
from .api import OSF
from .exceptions import UnauthorizedException
from .utils import norm_remote_path, split_storage, makedirs, checksum
def config_from_file():
if os.path.exists(".osfcli.config"):
config_ = configparser.ConfigParser()
config_.read(".osfcli.config")
# for python2 compatibility
config = dict(config_.items('osf'))
else:
config = {}
return config
def config_from_env(config):
username = os.getenv("OSF_USERNAME")
if username is not None:
config['username'] = username
project = os.getenv("OSF_PROJECT")
if project is not None:
config['project'] = project
return config
def | (args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
# Command line options have precedence over environment variables,
# which have precedence over the config file.
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
# still None? We are in trouble
if args.project is None:
sys.exit('You have to specify a project ID via the command line,'
' configuration file or environment variable.')
password = None
if username is not None:
password = os.getenv("OSF_PASSWORD")
# Prompt user when password is not set
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit("Please set a username (run `osf -h` for details).")
else:
sys.exit("You are not authorized to access this project.")
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
# reading existing config file, convert to configparser object
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
# now we can start asking for new values
print('Provide a username for the config file [current username: {}]:'.format(
config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.format(
config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open(".osfcli.config", "w")
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project.
The output directory defaults to the current directory.
If the project is private you need to specify a username.
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, "wb") as f:
file_.write_to(f)
pbar.update()
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit("Local file %s already exists, not overwriting." % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print("Local file %s already matches remote." % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
# only fetching one file so we are done
break
@might_need_auth
def list_(args):
"""List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path))
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To upload a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError("Expected source ({}) to be a directory when "
"using recursive mode.".format(args.source))
# local name of the directory that is being uploaded
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
# build the remote path + fname
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force,
update=args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force,
update=args.update)
@might_need_auth
def remove(args):
"""Remove a file from the project's storage.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To remove a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.target)
store = project.storage(storage)
for f in store.files:
if norm_remote_path(f.path) == remote_path:
f.remove()
| _get_username | identifier_name |
cli.py | """Command line interface to the OSF
These functions implement the functionality of the command-line interface.
"""
from __future__ import print_function
from functools import wraps
import getpass
import os
import sys
from six.moves import configparser
from six.moves import input
from tqdm import tqdm
from .api import OSF
from .exceptions import UnauthorizedException
from .utils import norm_remote_path, split_storage, makedirs, checksum
def config_from_file():
if os.path.exists(".osfcli.config"):
config_ = configparser.ConfigParser()
config_.read(".osfcli.config")
# for python2 compatibility
config = dict(config_.items('osf'))
else:
config = {}
return config
def config_from_env(config):
username = os.getenv("OSF_USERNAME")
if username is not None:
config['username'] = username
project = os.getenv("OSF_PROJECT")
if project is not None:
config['project'] = project
return config
def _get_username(args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
# Command line options have precedence over environment variables,
# which have precedence over the config file.
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
# still None? We are in trouble
if args.project is None:
sys.exit('You have to specify a project ID via the command line,'
' configuration file or environment variable.')
password = None
if username is not None:
password = os.getenv("OSF_PASSWORD")
# Prompt user when password is not set
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit("Please set a username (run `osf -h` for details).")
else:
sys.exit("You are not authorized to access this project.")
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
# reading existing config file, convert to configparser object
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
# now we can start asking for new values
print('Provide a username for the config file [current username: {}]:'.format(
config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.format(
config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open(".osfcli.config", "w")
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project.
The output directory defaults to the current directory.
If the project is private you need to specify a username.
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
|
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit("Local file %s already exists, not overwriting." % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print("Local file %s already matches remote." % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
# only fetching one file so we are done
break
@might_need_auth
def list_(args):
"""List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path))
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To upload a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError("Expected source ({}) to be a directory when "
"using recursive mode.".format(args.source))
# local name of the directory that is being uploaded
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
# build the remote path + fname
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force,
update=args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force,
update=args.update)
@might_need_auth
def remove(args):
"""Remove a file from the project's storage.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To remove a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.target)
store = project.storage(storage)
for f in store.files:
if norm_remote_path(f.path) == remote_path:
f.remove()
| prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, "wb") as f:
file_.write_to(f)
pbar.update() | conditional_block |
cli.py | """Command line interface to the OSF
These functions implement the functionality of the command-line interface.
"""
from __future__ import print_function
from functools import wraps
import getpass
import os
import sys
from six.moves import configparser
from six.moves import input
from tqdm import tqdm
from .api import OSF
from .exceptions import UnauthorizedException
from .utils import norm_remote_path, split_storage, makedirs, checksum
def config_from_file():
if os.path.exists(".osfcli.config"):
config_ = configparser.ConfigParser()
config_.read(".osfcli.config")
# for python2 compatibility
config = dict(config_.items('osf'))
else:
config = {}
return config
def config_from_env(config):
username = os.getenv("OSF_USERNAME")
if username is not None:
config['username'] = username
project = os.getenv("OSF_PROJECT")
if project is not None:
config['project'] = project
return config
def _get_username(args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
# Command line options have precedence over environment variables,
# which have precedence over the config file.
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
# still None? We are in trouble
if args.project is None:
sys.exit('You have to specify a project ID via the command line,'
' configuration file or environment variable.')
password = None
if username is not None:
password = os.getenv("OSF_PASSWORD")
# Prompt user when password is not set
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit("Please set a username (run `osf -h` for details).")
else:
sys.exit("You are not authorized to access this project.")
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
# reading existing config file, convert to configparser object
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
# now we can start asking for new values
print('Provide a username for the config file [current username: {}]:'.format(
config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.format(
config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open(".osfcli.config", "w")
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project.
The output directory defaults to the current directory.
If the project is private you need to specify a username.
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, "wb") as f:
file_.write_to(f)
pbar.update()
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit("Local file %s already exists, not overwriting." % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print("Local file %s already matches remote." % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
# only fetching one file so we are done
break
@might_need_auth
def list_(args):
|
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To upload a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError("Expected source ({}) to be a directory when "
"using recursive mode.".format(args.source))
# local name of the directory that is being uploaded
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
# build the remote path + fname
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force,
update=args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force,
update=args.update)
@might_need_auth
def remove(args):
"""Remove a file from the project's storage.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To remove a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.target)
store = project.storage(storage)
for f in store.files:
if norm_remote_path(f.path) == remote_path:
f.remove()
| """List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path)) | identifier_body |
cli.py | """Command line interface to the OSF
These functions implement the functionality of the command-line interface.
"""
from __future__ import print_function
from functools import wraps
import getpass
import os
import sys
from six.moves import configparser
from six.moves import input
from tqdm import tqdm
from .api import OSF
from .exceptions import UnauthorizedException
from .utils import norm_remote_path, split_storage, makedirs, checksum
def config_from_file():
if os.path.exists(".osfcli.config"):
config_ = configparser.ConfigParser()
config_.read(".osfcli.config")
# for python2 compatibility
config = dict(config_.items('osf'))
else:
config = {}
return config
def config_from_env(config):
username = os.getenv("OSF_USERNAME")
if username is not None:
config['username'] = username
project = os.getenv("OSF_PROJECT")
if project is not None:
config['project'] = project
return config
def _get_username(args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
# Command line options have precedence over environment variables,
# which have precedence over the config file.
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
# still None? We are in trouble
if args.project is None:
sys.exit('You have to specify a project ID via the command line,'
' configuration file or environment variable.')
password = None
if username is not None:
password = os.getenv("OSF_PASSWORD")
# Prompt user when password is not set
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit("Please set a username (run `osf -h` for details).")
else:
sys.exit("You are not authorized to access this project.")
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
# reading existing config file, convert to configparser object
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
# now we can start asking for new values
print('Provide a username for the config file [current username: {}]:'.format(
config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.format(
config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open(".osfcli.config", "w")
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project. |
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, "wb") as f:
file_.write_to(f)
pbar.update()
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit("Local file %s already exists, not overwriting." % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print("Local file %s already matches remote." % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
# only fetching one file so we are done
break
@might_need_auth
def list_(args):
"""List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path))
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To upload a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError("Expected source ({}) to be a directory when "
"using recursive mode.".format(args.source))
# local name of the directory that is being uploaded
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
# build the remote path + fname
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force,
update=args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force,
update=args.update)
@might_need_auth
def remove(args):
"""Remove a file from the project's storage.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To remove a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.target)
store = project.storage(storage)
for f in store.files:
if norm_remote_path(f.path) == remote_path:
f.remove() |
The output directory defaults to the current directory.
If the project is private you need to specify a username. | random_line_split |
jax_sinusoid.py | from .jax_model import MAML
from utils.priority import PriorityQueue
import copy
import math
import random
import numpy as np
import matplotlib.pyplot as plt
import warnings
from typing import Any, Dict, List, Tuple
from jax.experimental import stax # neural network library
from jax.experimental import optimizers
from jax.experimental.stax import Conv, Dense, MaxPool, Relu, Flatten, LogSoftmax
class SineMAML(MAML):
def __init__(self, params, device):
|
def _get_model(self):
"""
Return jax network initialisation and forward method.
"""
layers = []
# inner / hidden network layers + non-linearities
for l in self.network_layers:
layers.append(Dense(l))
layers.append(Relu)
# output layer (no non-linearity)
layers.append(Dense(self.output_dimension))
# make jax stax object
model = stax.serial(*layers)
return model
def _get_optimiser(self):
"""
Return jax optimiser: initialisation, update method and parameter getter method.
Optimiser learning rate is given by config (meta_lr).
"""
return optimizers.adam(step_size=self.meta_lr)
def _get_priority_queue(self):
"""Initiate priority queue"""
if self.task_type == 'sin3d':
param_ranges = self.params.get(["priority_queue", "param_ranges_3d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_3d"])
elif self.task_type == 'sin2d':
param_ranges = self.params.get(["priority_queue", "param_ranges_2d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_2d"])
return SinePriorityQueue(
queue_resume=self.params.get(["resume", "priority_queue"]),
counts_resume=self.params.get(["resume", "queue_counts"]),
sample_type=self.params.get(["priority_queue", "sample_type"]),
block_sizes=block_sizes,
param_ranges=param_ranges,
initial_value=self.params.get(["priority_queue", "initial_value"]),
epsilon_start=self.params.get(["priority_queue", "epsilon_start"]),
epsilon_final=self.params.get(["priority_queue", "epsilon_final"]),
epsilon_decay_start=self.params.get(["priority_queue", "epsilon_decay_start"]),
epsilon_decay_rate=self.params.get(["priority_queue", "epsilon_decay_rate"]),
burn_in=self.params.get(["priority_queue", "burn_in"]),
save_path=self.checkpoint_path
)
def _sample_task(self, batch_size, validate=False, step_count=None):
"""
Sample specific task(s) from defined distribution of tasks
E.g. one specific sine function from family of sines
:param batch_size: number of tasks to sample
:param validate: whether or not tasks are being used for validation
:param step_count: step count during training
:return tasks: batch of tasks
:return task_indices: indices of priority queue associated with batch of tasks
:return task_probabilities: probabilities of tasks sampled being chosen a priori
Returns batch of sin functions shifted in x direction by a phase parameter sampled randomly between phase_bounds
(set by config) enlarged in the y direction by an amplitude parameter sampled randomly between amplitude_bounds
(also set by config). For 3d sine option, function is also squeezed in x direction by freuency parameter.
"""
tasks = []
task_probabilities = []
all_max_indices = [] if self.priority_sample else None
for _ in range(batch_size):
# sample a task from task distribution and generate x, y tensors for that task
if self.priority_sample and not validate:
# query queue for next task parameters
max_indices, task_parameters, task_probability = self.priority_queue.query(step=step_count)
all_max_indices.append(max_indices)
task_probabilities.append(task_probability)
# get epsilon value
epsilon = self.priority_queue.get_epsilon()
# get task from parameters returned from query
amplitude = task_parameters[0]
phase = task_parameters[1]
if self.task_type == 'sin3d':
frequency_scaling = task_parameters[2]
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
# compute metrics for tb logging
queue_count_loss_correlation = self.priority_queue.compute_count_loss_correlation()
queue_mean = np.mean(self.priority_queue.get_queue())
queue_std = np.std(self.priority_queue.get_queue())
# write to tensorboard
if epsilon:
self.writer.add_scalar('queue_metrics/epsilon', epsilon, step_count)
self.writer.add_scalar('queue_metrics/queue_correlation', queue_count_loss_correlation, step_count)
self.writer.add_scalar('queue_metrics/queue_mean', queue_mean, step_count)
self.writer.add_scalar('queue_metrics/queue_std', queue_std, step_count)
else:
# sample randomly (vanilla maml)
amplitude = random.uniform(self.amplitude_bounds[0], self.amplitude_bounds[1])
phase = random.uniform(self.phase_bounds[0], self.phase_bounds[1])
if self.task_type == 'sin3d':
frequency_scaling = random.uniform(self.frequency_bounds[0], self.frequency_bounds[1])
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
tasks.append(task)
return tasks, all_max_indices, task_probabilities
def _get_task_from_params(self, parameters: List) -> Any:
"""
Return sine function defined by parameters given
:param parameters: parameters defining the specific sin task in the distribution
:return modified_sin: sin function
(method differs from _sample_task in that it is not a random sample but
defined by parameters given)
"""
amplitude = parameters[0]
phase = parameters[1]
frequency_scaling = parameters[2]
def modified_sin(x):
return amplitude * np.sin(phase + frequency_scaling * x)
return modified_sin
def _generate_batch(self, tasks: List):
"""
Obtain batch of training examples from a list of tasks
:param tasks: list of tasks for which data points need to be sampled
:return x_batch: x points sampled from data
:return y_batch: y points associated with x_batch
"""
x_batch = np.stack([np.random.uniform(low=self.domain_bounds[0], high=self.domain_bounds[1], size=(self.inner_update_k, 1)) for _ in range(len(tasks))])
y_batch = np.stack([[tasks[t](x) for x in x_batch[t]] for t in range(len(tasks))])
return x_batch, y_batch
def _compute_loss(self, parameters, inputs, ground_truth):
"""
Computes loss of network
:param parameters: current weights of model
:param inputs: x data
:param ground_truth: y_data
:return loss: loss on ground truth vs output of network applied to inputs
"""
predictions = self.network_forward(parameters, inputs)
loss = np.mean((ground_truth - predictions) ** 2)
return loss
def _visualise(self, model_iterations, task, validation_x, validation_y, save_name, visualise_all=True):
"""
Visualise qualitative run.
:param validation_model_iterations: parameters of model after successive fine-tuning steps
:param val_task: task being evaluated
:param validation_x_batch: k data points fed to model for finetuning
:param validation_y_batch: ground truth data associated with validation_x_batch
:param save_name: name of file to be saved
:param visualise_all: whether to visualise all fine-tuning steps or just final
"""
# ground truth
plot_x = np.linspace(self.domain_bounds[0], self.domain_bounds[1], 100)
plot_y_ground_truth = [task(xi) for xi in plot_x]
fig = plt.figure()
plt.plot(plot_x, plot_y_ground_truth, label="Ground Truth")
final_plot_y_prediction = self.network_forward(model_iterations[-1], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, final_plot_y_prediction, linestyle='dashed', linewidth=3.0, label='Fine-tuned MAML final update')
no_tuning_y_prediction = self.network_forward(model_iterations[0], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, no_tuning_y_prediction, linestyle='dashed', linewidth=3.0, label='Untuned MAML prediction')
if visualise_all:
for i, (model_iteration) in enumerate(model_iterations[1:-1]):
plot_y_prediction = self.network_forward(model_iteration, plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, plot_y_prediction, linestyle='dashed') #, label='Fine-tuned MAML {} update'.format(i))
plt.scatter(validation_x, validation_y, marker='o', label='K Points')
plt.title("Validation of Sinusoid Meta-Regression")
plt.xlabel(r"x")
plt.ylabel(r"sin(x)")
plt.legend()
# fig.savefig(self.params.get("checkpoint_path") + save_name)
plt.close()
return fig
def _get_fixed_validation_tasks(self):
"""
If using fixed validation this method returns a set of tasks that are
equally spread across the task distribution space.
"""
# mesh of equally partitioned state space
if self.task_type == 'sin3d':
amplitude_spectrum, phase_spectrum, frequency_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase_bounds[0]:self.phase_bounds[1]:self.validation_block_sizes[1],
self.frequency_bounds[0]:self.frequency_bounds[1]:self.validation_block_sizes[2]
]
parameter_space_tuples = np.vstack((amplitude_spectrum.flatten(), phase_spectrum.flatten(), frequency_spectrum.flatten())).T
else:
amplitude_spectrum, phase_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase_bounds[0]:self.phase_bounds[1]:self.validation_block_sizes[1]
]
parameter_space_tuples = np.vstack((amplitude_spectrum.flatten(), phase_spectrum.flatten())).T
fixed_validation_tasks = []
def generate_sin(amplitude, phase, frequency=1):
def modified_sin(x):
return amplitude * np.sin(phase + frequency * x)
return modified_sin
for param_pair in parameter_space_tuples:
if self.task_type == 'sin3d':
fixed_validation_tasks.append(generate_sin(amplitude=param_pair[0], phase=param_pair[1], frequency=param_pair[2]))
else:
fixed_validation_tasks.append(generate_sin(amplitude=param_pair[0], phase=param_pair[1]))
return parameter_space_tuples, fixed_validation_tasks
class SinePriorityQueue(PriorityQueue):
def __init__(self,
block_sizes: Dict[str, float], param_ranges: List[Tuple[float, float]],
sample_type: str, epsilon_start: float, epsilon_final: float, epsilon_decay_rate: float, epsilon_decay_start: int,
queue_resume: str, counts_resume: str, save_path: str, burn_in: int=None, initial_value: float=None
):
# convert phase bounds/ phase block_size from degrees to radians
phase_ranges = [
param_ranges[1][0] * (2 * np.pi) / 360, param_ranges[1][1] * (2 * np.pi) / 360
]
phase_block_size = block_sizes[1] * (2 * np.pi) / 360
param_ranges[1] = phase_ranges
block_sizes[1] = phase_block_size
super().__init__(
block_sizes=block_sizes, param_ranges=param_ranges, sample_type=sample_type, epsilon_start=epsilon_start,
epsilon_final=epsilon_final, epsilon_decay_rate=epsilon_decay_rate, epsilon_decay_start=epsilon_decay_start, queue_resume=queue_resume,
counts_resume=counts_resume, save_path=save_path, burn_in=burn_in, initial_value=initial_value
)
self.figure_locsx, self.figure_locsy, self.figure_labelsx, self.figure_labelsy = self._get_figure_labels()
def _get_figure_labels(self):
xlocs = np.arange(0, self._queue.shape[1])
ylocs = np.arange(0, self._queue.shape[0])
xlabels = np.arange(self.param_ranges[1][0], self.param_ranges[1][1], self.block_sizes[1])
ylabels = np.arange(self.param_ranges[0][0], self.param_ranges[0][1], self.block_sizes[0])
return xlocs, ylocs, xlabels, ylabels
def visualise_priority_queue(self, feature='losses'):
"""
Produces plot of priority queue (losses or counts)
Discrete vs continuous, 2d heatmap vs 3d.
:param feature: which aspect of queue to visualise. 'losses' or 'counts'
:retrun fig: matplotlib figure showing heatmap of priority queue feature
"""
if type(self._queue) == np.ndarray:
if len(self._queue.shape) == 2:
fig = plt.figure()
if feature == 'losses':
plt.imshow(self._queue)
elif feature == 'counts':
plt.imshow(self.sample_counts)
else:
raise ValueError("feature type not recognised. Use 'losses' or 'counts'")
plt.colorbar()
plt.xlabel("Phase")
plt.ylabel("Amplitude")
# set labels to sine specific parameter ranges
# plt.xticks(
# locs=self.figure_locsx,
# labels=self.figure_labelsx
# )
# plt.yticks(
# locs=self.figure_locsy,
# labels=self.figure_labelsy
# )
return fig
else:
warnings.warn("Visualisation with parameter space dimension > 2 not supported", Warning)
return None
else:
raise NotImplementedError("Visualisation for dictionary queue not implemented")
def visualise_priority_queue_loss_distribution(self):
"""
Produces probability distribution plot of losses in the priority queue
"""
all_losses = self._queue.flatten()
hist, bin_edges = np.histogram(all_losses, bins=int(0.1 * len(all_losses)))
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
fig = plt.figure()
plt.plot(bin_centers, hist)
return fig
| self.device = device
self.task_type = params.get('task_type')
# extract relevant task-specific parameters
self.amplitude_bounds = params.get(['sin2d', 'amplitude_bounds'])
self.domain_bounds = params.get(['sin2d', 'domain_bounds'])
degree_phase_bounds = params.get(['sin2d', 'phase_bounds']) # phase given in degrees
if self.task_type == 'sin3d':
self.frequency_bounds = params.get(['sin3d', 'frequency_bounds'])
block_sizes = params.get(['sin3d', 'fixed_val_blocks'])
else:
block_sizes = params.get(['sin2d', 'fixed_val_blocks'])
# convert phase bounds/ fixed_val_interval from degrees to radians
self.phase_bounds = [
degree_phase_bounds[0] * (2 * np.pi) / 360, degree_phase_bounds[1] * (2 * np.pi) / 360
]
block_sizes[1] = block_sizes[1] * (2 * np.pi) / 360
self.validation_block_sizes = block_sizes
MAML.__init__(self, params) | identifier_body |
jax_sinusoid.py | from .jax_model import MAML
from utils.priority import PriorityQueue
import copy
import math
import random
import numpy as np
import matplotlib.pyplot as plt
import warnings
from typing import Any, Dict, List, Tuple
from jax.experimental import stax # neural network library
from jax.experimental import optimizers
from jax.experimental.stax import Conv, Dense, MaxPool, Relu, Flatten, LogSoftmax
class SineMAML(MAML):
def __init__(self, params, device):
self.device = device
self.task_type = params.get('task_type')
# extract relevant task-specific parameters
self.amplitude_bounds = params.get(['sin2d', 'amplitude_bounds'])
self.domain_bounds = params.get(['sin2d', 'domain_bounds'])
degree_phase_bounds = params.get(['sin2d', 'phase_bounds']) # phase given in degrees
if self.task_type == 'sin3d':
self.frequency_bounds = params.get(['sin3d', 'frequency_bounds'])
block_sizes = params.get(['sin3d', 'fixed_val_blocks'])
else:
block_sizes = params.get(['sin2d', 'fixed_val_blocks'])
# convert phase bounds/ fixed_val_interval from degrees to radians
self.phase_bounds = [
degree_phase_bounds[0] * (2 * np.pi) / 360, degree_phase_bounds[1] * (2 * np.pi) / 360
]
block_sizes[1] = block_sizes[1] * (2 * np.pi) / 360
self.validation_block_sizes = block_sizes
MAML.__init__(self, params)
def _get_model(self):
"""
Return jax network initialisation and forward method.
"""
layers = []
# inner / hidden network layers + non-linearities
for l in self.network_layers:
layers.append(Dense(l))
layers.append(Relu)
# output layer (no non-linearity)
layers.append(Dense(self.output_dimension))
# make jax stax object
model = stax.serial(*layers)
return model
def _get_optimiser(self):
"""
Return jax optimiser: initialisation, update method and parameter getter method.
Optimiser learning rate is given by config (meta_lr).
"""
return optimizers.adam(step_size=self.meta_lr)
def _get_priority_queue(self):
"""Initiate priority queue"""
if self.task_type == 'sin3d':
param_ranges = self.params.get(["priority_queue", "param_ranges_3d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_3d"])
elif self.task_type == 'sin2d':
param_ranges = self.params.get(["priority_queue", "param_ranges_2d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_2d"])
return SinePriorityQueue(
queue_resume=self.params.get(["resume", "priority_queue"]),
counts_resume=self.params.get(["resume", "queue_counts"]),
sample_type=self.params.get(["priority_queue", "sample_type"]),
block_sizes=block_sizes,
param_ranges=param_ranges,
initial_value=self.params.get(["priority_queue", "initial_value"]),
epsilon_start=self.params.get(["priority_queue", "epsilon_start"]),
epsilon_final=self.params.get(["priority_queue", "epsilon_final"]),
epsilon_decay_start=self.params.get(["priority_queue", "epsilon_decay_start"]),
epsilon_decay_rate=self.params.get(["priority_queue", "epsilon_decay_rate"]),
burn_in=self.params.get(["priority_queue", "burn_in"]),
save_path=self.checkpoint_path
)
def _sample_task(self, batch_size, validate=False, step_count=None):
"""
Sample specific task(s) from defined distribution of tasks
E.g. one specific sine function from family of sines |
:param batch_size: number of tasks to sample
:param validate: whether or not tasks are being used for validation
:param step_count: step count during training
:return tasks: batch of tasks
:return task_indices: indices of priority queue associated with batch of tasks
:return task_probabilities: probabilities of tasks sampled being chosen a priori
Returns batch of sin functions shifted in x direction by a phase parameter sampled randomly between phase_bounds
(set by config) enlarged in the y direction by an amplitude parameter sampled randomly between amplitude_bounds
(also set by config). For 3d sine option, function is also squeezed in x direction by freuency parameter.
"""
tasks = []
task_probabilities = []
all_max_indices = [] if self.priority_sample else None
for _ in range(batch_size):
# sample a task from task distribution and generate x, y tensors for that task
if self.priority_sample and not validate:
# query queue for next task parameters
max_indices, task_parameters, task_probability = self.priority_queue.query(step=step_count)
all_max_indices.append(max_indices)
task_probabilities.append(task_probability)
# get epsilon value
epsilon = self.priority_queue.get_epsilon()
# get task from parameters returned from query
amplitude = task_parameters[0]
phase = task_parameters[1]
if self.task_type == 'sin3d':
frequency_scaling = task_parameters[2]
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
# compute metrics for tb logging
queue_count_loss_correlation = self.priority_queue.compute_count_loss_correlation()
queue_mean = np.mean(self.priority_queue.get_queue())
queue_std = np.std(self.priority_queue.get_queue())
# write to tensorboard
if epsilon:
self.writer.add_scalar('queue_metrics/epsilon', epsilon, step_count)
self.writer.add_scalar('queue_metrics/queue_correlation', queue_count_loss_correlation, step_count)
self.writer.add_scalar('queue_metrics/queue_mean', queue_mean, step_count)
self.writer.add_scalar('queue_metrics/queue_std', queue_std, step_count)
else:
# sample randomly (vanilla maml)
amplitude = random.uniform(self.amplitude_bounds[0], self.amplitude_bounds[1])
phase = random.uniform(self.phase_bounds[0], self.phase_bounds[1])
if self.task_type == 'sin3d':
frequency_scaling = random.uniform(self.frequency_bounds[0], self.frequency_bounds[1])
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
tasks.append(task)
return tasks, all_max_indices, task_probabilities
def _get_task_from_params(self, parameters: List) -> Any:
"""
Return sine function defined by parameters given
:param parameters: parameters defining the specific sin task in the distribution
:return modified_sin: sin function
(method differs from _sample_task in that it is not a random sample but
defined by parameters given)
"""
amplitude = parameters[0]
phase = parameters[1]
frequency_scaling = parameters[2]
def modified_sin(x):
return amplitude * np.sin(phase + frequency_scaling * x)
return modified_sin
def _generate_batch(self, tasks: List):
"""
Obtain batch of training examples from a list of tasks
:param tasks: list of tasks for which data points need to be sampled
:return x_batch: x points sampled from data
:return y_batch: y points associated with x_batch
"""
x_batch = np.stack([np.random.uniform(low=self.domain_bounds[0], high=self.domain_bounds[1], size=(self.inner_update_k, 1)) for _ in range(len(tasks))])
y_batch = np.stack([[tasks[t](x) for x in x_batch[t]] for t in range(len(tasks))])
return x_batch, y_batch
def _compute_loss(self, parameters, inputs, ground_truth):
"""
Computes loss of network
:param parameters: current weights of model
:param inputs: x data
:param ground_truth: y_data
:return loss: loss on ground truth vs output of network applied to inputs
"""
predictions = self.network_forward(parameters, inputs)
loss = np.mean((ground_truth - predictions) ** 2)
return loss
def _visualise(self, model_iterations, task, validation_x, validation_y, save_name, visualise_all=True):
"""
Visualise qualitative run.
:param validation_model_iterations: parameters of model after successive fine-tuning steps
:param val_task: task being evaluated
:param validation_x_batch: k data points fed to model for finetuning
:param validation_y_batch: ground truth data associated with validation_x_batch
:param save_name: name of file to be saved
:param visualise_all: whether to visualise all fine-tuning steps or just final
"""
# ground truth
plot_x = np.linspace(self.domain_bounds[0], self.domain_bounds[1], 100)
plot_y_ground_truth = [task(xi) for xi in plot_x]
fig = plt.figure()
plt.plot(plot_x, plot_y_ground_truth, label="Ground Truth")
final_plot_y_prediction = self.network_forward(model_iterations[-1], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, final_plot_y_prediction, linestyle='dashed', linewidth=3.0, label='Fine-tuned MAML final update')
no_tuning_y_prediction = self.network_forward(model_iterations[0], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, no_tuning_y_prediction, linestyle='dashed', linewidth=3.0, label='Untuned MAML prediction')
if visualise_all:
for i, (model_iteration) in enumerate(model_iterations[1:-1]):
plot_y_prediction = self.network_forward(model_iteration, plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, plot_y_prediction, linestyle='dashed') #, label='Fine-tuned MAML {} update'.format(i))
plt.scatter(validation_x, validation_y, marker='o', label='K Points')
plt.title("Validation of Sinusoid Meta-Regression")
plt.xlabel(r"x")
plt.ylabel(r"sin(x)")
plt.legend()
# fig.savefig(self.params.get("checkpoint_path") + save_name)
plt.close()
return fig
def _get_fixed_validation_tasks(self):
"""
If using fixed validation this method returns a set of tasks that are
equally spread across the task distribution space.
"""
# mesh of equally partitioned state space
if self.task_type == 'sin3d':
amplitude_spectrum, phase_spectrum, frequency_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase_bounds[0]:self.phase_bounds[1]:self.validation_block_sizes[1],
self.frequency_bounds[0]:self.frequency_bounds[1]:self.validation_block_sizes[2]
]
parameter_space_tuples = np.vstack((amplitude_spectrum.flatten(), phase_spectrum.flatten(), frequency_spectrum.flatten())).T
else:
amplitude_spectrum, phase_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase_bounds[0]:self.phase_bounds[1]:self.validation_block_sizes[1]
]
parameter_space_tuples = np.vstack((amplitude_spectrum.flatten(), phase_spectrum.flatten())).T
fixed_validation_tasks = []
def generate_sin(amplitude, phase, frequency=1):
def modified_sin(x):
return amplitude * np.sin(phase + frequency * x)
return modified_sin
for param_pair in parameter_space_tuples:
if self.task_type == 'sin3d':
fixed_validation_tasks.append(generate_sin(amplitude=param_pair[0], phase=param_pair[1], frequency=param_pair[2]))
else:
fixed_validation_tasks.append(generate_sin(amplitude=param_pair[0], phase=param_pair[1]))
return parameter_space_tuples, fixed_validation_tasks
class SinePriorityQueue(PriorityQueue):
def __init__(self,
block_sizes: Dict[str, float], param_ranges: List[Tuple[float, float]],
sample_type: str, epsilon_start: float, epsilon_final: float, epsilon_decay_rate: float, epsilon_decay_start: int,
queue_resume: str, counts_resume: str, save_path: str, burn_in: int=None, initial_value: float=None
):
# convert phase bounds/ phase block_size from degrees to radians
phase_ranges = [
param_ranges[1][0] * (2 * np.pi) / 360, param_ranges[1][1] * (2 * np.pi) / 360
]
phase_block_size = block_sizes[1] * (2 * np.pi) / 360
param_ranges[1] = phase_ranges
block_sizes[1] = phase_block_size
super().__init__(
block_sizes=block_sizes, param_ranges=param_ranges, sample_type=sample_type, epsilon_start=epsilon_start,
epsilon_final=epsilon_final, epsilon_decay_rate=epsilon_decay_rate, epsilon_decay_start=epsilon_decay_start, queue_resume=queue_resume,
counts_resume=counts_resume, save_path=save_path, burn_in=burn_in, initial_value=initial_value
)
self.figure_locsx, self.figure_locsy, self.figure_labelsx, self.figure_labelsy = self._get_figure_labels()
def _get_figure_labels(self):
xlocs = np.arange(0, self._queue.shape[1])
ylocs = np.arange(0, self._queue.shape[0])
xlabels = np.arange(self.param_ranges[1][0], self.param_ranges[1][1], self.block_sizes[1])
ylabels = np.arange(self.param_ranges[0][0], self.param_ranges[0][1], self.block_sizes[0])
return xlocs, ylocs, xlabels, ylabels
def visualise_priority_queue(self, feature='losses'):
"""
Produces plot of priority queue (losses or counts)
Discrete vs continuous, 2d heatmap vs 3d.
:param feature: which aspect of queue to visualise. 'losses' or 'counts'
:retrun fig: matplotlib figure showing heatmap of priority queue feature
"""
if type(self._queue) == np.ndarray:
if len(self._queue.shape) == 2:
fig = plt.figure()
if feature == 'losses':
plt.imshow(self._queue)
elif feature == 'counts':
plt.imshow(self.sample_counts)
else:
raise ValueError("feature type not recognised. Use 'losses' or 'counts'")
plt.colorbar()
plt.xlabel("Phase")
plt.ylabel("Amplitude")
# set labels to sine specific parameter ranges
# plt.xticks(
# locs=self.figure_locsx,
# labels=self.figure_labelsx
# )
# plt.yticks(
# locs=self.figure_locsy,
# labels=self.figure_labelsy
# )
return fig
else:
warnings.warn("Visualisation with parameter space dimension > 2 not supported", Warning)
return None
else:
raise NotImplementedError("Visualisation for dictionary queue not implemented")
def visualise_priority_queue_loss_distribution(self):
"""
Produces probability distribution plot of losses in the priority queue
"""
all_losses = self._queue.flatten()
hist, bin_edges = np.histogram(all_losses, bins=int(0.1 * len(all_losses)))
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
fig = plt.figure()
plt.plot(bin_centers, hist)
return fig | random_line_split | |
jax_sinusoid.py | from .jax_model import MAML
from utils.priority import PriorityQueue
import copy
import math
import random
import numpy as np
import matplotlib.pyplot as plt
import warnings
from typing import Any, Dict, List, Tuple
from jax.experimental import stax # neural network library
from jax.experimental import optimizers
from jax.experimental.stax import Conv, Dense, MaxPool, Relu, Flatten, LogSoftmax
class SineMAML(MAML):
def __init__(self, params, device):
self.device = device
self.task_type = params.get('task_type')
# extract relevant task-specific parameters
self.amplitude_bounds = params.get(['sin2d', 'amplitude_bounds'])
self.domain_bounds = params.get(['sin2d', 'domain_bounds'])
degree_phase_bounds = params.get(['sin2d', 'phase_bounds']) # phase given in degrees
if self.task_type == 'sin3d':
self.frequency_bounds = params.get(['sin3d', 'frequency_bounds'])
block_sizes = params.get(['sin3d', 'fixed_val_blocks'])
else:
block_sizes = params.get(['sin2d', 'fixed_val_blocks'])
# convert phase bounds/ fixed_val_interval from degrees to radians
self.phase_bounds = [
degree_phase_bounds[0] * (2 * np.pi) / 360, degree_phase_bounds[1] * (2 * np.pi) / 360
]
block_sizes[1] = block_sizes[1] * (2 * np.pi) / 360
self.validation_block_sizes = block_sizes
MAML.__init__(self, params)
def _get_model(self):
"""
Return jax network initialisation and forward method.
"""
layers = []
# inner / hidden network layers + non-linearities
for l in self.network_layers:
layers.append(Dense(l))
layers.append(Relu)
# output layer (no non-linearity)
layers.append(Dense(self.output_dimension))
# make jax stax object
model = stax.serial(*layers)
return model
def _get_optimiser(self):
"""
Return jax optimiser: initialisation, update method and parameter getter method.
Optimiser learning rate is given by config (meta_lr).
"""
return optimizers.adam(step_size=self.meta_lr)
def _get_priority_queue(self):
"""Initiate priority queue"""
if self.task_type == 'sin3d':
param_ranges = self.params.get(["priority_queue", "param_ranges_3d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_3d"])
elif self.task_type == 'sin2d':
param_ranges = self.params.get(["priority_queue", "param_ranges_2d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_2d"])
return SinePriorityQueue(
queue_resume=self.params.get(["resume", "priority_queue"]),
counts_resume=self.params.get(["resume", "queue_counts"]),
sample_type=self.params.get(["priority_queue", "sample_type"]),
block_sizes=block_sizes,
param_ranges=param_ranges,
initial_value=self.params.get(["priority_queue", "initial_value"]),
epsilon_start=self.params.get(["priority_queue", "epsilon_start"]),
epsilon_final=self.params.get(["priority_queue", "epsilon_final"]),
epsilon_decay_start=self.params.get(["priority_queue", "epsilon_decay_start"]),
epsilon_decay_rate=self.params.get(["priority_queue", "epsilon_decay_rate"]),
burn_in=self.params.get(["priority_queue", "burn_in"]),
save_path=self.checkpoint_path
)
def _sample_task(self, batch_size, validate=False, step_count=None):
"""
Sample specific task(s) from defined distribution of tasks
E.g. one specific sine function from family of sines
:param batch_size: number of tasks to sample
:param validate: whether or not tasks are being used for validation
:param step_count: step count during training
:return tasks: batch of tasks
:return task_indices: indices of priority queue associated with batch of tasks
:return task_probabilities: probabilities of tasks sampled being chosen a priori
Returns batch of sin functions shifted in x direction by a phase parameter sampled randomly between phase_bounds
(set by config) enlarged in the y direction by an amplitude parameter sampled randomly between amplitude_bounds
(also set by config). For 3d sine option, function is also squeezed in x direction by freuency parameter.
"""
tasks = []
task_probabilities = []
all_max_indices = [] if self.priority_sample else None
for _ in range(batch_size):
# sample a task from task distribution and generate x, y tensors for that task
|
return tasks, all_max_indices, task_probabilities
def _get_task_from_params(self, parameters: List) -> Any:
"""
Return sine function defined by parameters given
:param parameters: parameters defining the specific sin task in the distribution
:return modified_sin: sin function
(method differs from _sample_task in that it is not a random sample but
defined by parameters given)
"""
amplitude = parameters[0]
phase = parameters[1]
frequency_scaling = parameters[2]
def modified_sin(x):
return amplitude * np.sin(phase + frequency_scaling * x)
return modified_sin
def _generate_batch(self, tasks: List):
"""
Obtain batch of training examples from a list of tasks
:param tasks: list of tasks for which data points need to be sampled
:return x_batch: x points sampled from data
:return y_batch: y points associated with x_batch
"""
x_batch = np.stack([np.random.uniform(low=self.domain_bounds[0], high=self.domain_bounds[1], size=(self.inner_update_k, 1)) for _ in range(len(tasks))])
y_batch = np.stack([[tasks[t](x) for x in x_batch[t]] for t in range(len(tasks))])
return x_batch, y_batch
def _compute_loss(self, parameters, inputs, ground_truth):
"""
Computes loss of network
:param parameters: current weights of model
:param inputs: x data
:param ground_truth: y_data
:return loss: loss on ground truth vs output of network applied to inputs
"""
predictions = self.network_forward(parameters, inputs)
loss = np.mean((ground_truth - predictions) ** 2)
return loss
def _visualise(self, model_iterations, task, validation_x, validation_y, save_name, visualise_all=True):
"""
Visualise qualitative run.
:param validation_model_iterations: parameters of model after successive fine-tuning steps
:param val_task: task being evaluated
:param validation_x_batch: k data points fed to model for finetuning
:param validation_y_batch: ground truth data associated with validation_x_batch
:param save_name: name of file to be saved
:param visualise_all: whether to visualise all fine-tuning steps or just final
"""
# ground truth
plot_x = np.linspace(self.domain_bounds[0], self.domain_bounds[1], 100)
plot_y_ground_truth = [task(xi) for xi in plot_x]
fig = plt.figure()
plt.plot(plot_x, plot_y_ground_truth, label="Ground Truth")
final_plot_y_prediction = self.network_forward(model_iterations[-1], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, final_plot_y_prediction, linestyle='dashed', linewidth=3.0, label='Fine-tuned MAML final update')
no_tuning_y_prediction = self.network_forward(model_iterations[0], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, no_tuning_y_prediction, linestyle='dashed', linewidth=3.0, label='Untuned MAML prediction')
if visualise_all:
for i, (model_iteration) in enumerate(model_iterations[1:-1]):
plot_y_prediction = self.network_forward(model_iteration, plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, plot_y_prediction, linestyle='dashed') #, label='Fine-tuned MAML {} update'.format(i))
plt.scatter(validation_x, validation_y, marker='o', label='K Points')
plt.title("Validation of Sinusoid Meta-Regression")
plt.xlabel(r"x")
plt.ylabel(r"sin(x)")
plt.legend()
# fig.savefig(self.params.get("checkpoint_path") + save_name)
plt.close()
return fig
def _get_fixed_validation_tasks(self):
"""
If using fixed validation this method returns a set of tasks that are
equally spread across the task distribution space.
"""
# mesh of equally partitioned state space
if self.task_type == 'sin3d':
amplitude_spectrum, phase_spectrum, frequency_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase_bounds[0]:self.phase_bounds[1]:self.validation_block_sizes[1],
self.frequency_bounds[0]:self.frequency_bounds[1]:self.validation_block_sizes[2]
]
parameter_space_tuples = np.vstack((amplitude_spectrum.flatten(), phase_spectrum.flatten(), frequency_spectrum.flatten())).T
else:
amplitude_spectrum, phase_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase_bounds[0]:self.phase_bounds[1]:self.validation_block_sizes[1]
]
parameter_space_tuples = np.vstack((amplitude_spectrum.flatten(), phase_spectrum.flatten())).T
fixed_validation_tasks = []
def generate_sin(amplitude, phase, frequency=1):
def modified_sin(x):
return amplitude * np.sin(phase + frequency * x)
return modified_sin
for param_pair in parameter_space_tuples:
if self.task_type == 'sin3d':
fixed_validation_tasks.append(generate_sin(amplitude=param_pair[0], phase=param_pair[1], frequency=param_pair[2]))
else:
fixed_validation_tasks.append(generate_sin(amplitude=param_pair[0], phase=param_pair[1]))
return parameter_space_tuples, fixed_validation_tasks
class SinePriorityQueue(PriorityQueue):
def __init__(self,
block_sizes: Dict[str, float], param_ranges: List[Tuple[float, float]],
sample_type: str, epsilon_start: float, epsilon_final: float, epsilon_decay_rate: float, epsilon_decay_start: int,
queue_resume: str, counts_resume: str, save_path: str, burn_in: int=None, initial_value: float=None
):
# convert phase bounds/ phase block_size from degrees to radians
phase_ranges = [
param_ranges[1][0] * (2 * np.pi) / 360, param_ranges[1][1] * (2 * np.pi) / 360
]
phase_block_size = block_sizes[1] * (2 * np.pi) / 360
param_ranges[1] = phase_ranges
block_sizes[1] = phase_block_size
super().__init__(
block_sizes=block_sizes, param_ranges=param_ranges, sample_type=sample_type, epsilon_start=epsilon_start,
epsilon_final=epsilon_final, epsilon_decay_rate=epsilon_decay_rate, epsilon_decay_start=epsilon_decay_start, queue_resume=queue_resume,
counts_resume=counts_resume, save_path=save_path, burn_in=burn_in, initial_value=initial_value
)
self.figure_locsx, self.figure_locsy, self.figure_labelsx, self.figure_labelsy = self._get_figure_labels()
def _get_figure_labels(self):
xlocs = np.arange(0, self._queue.shape[1])
ylocs = np.arange(0, self._queue.shape[0])
xlabels = np.arange(self.param_ranges[1][0], self.param_ranges[1][1], self.block_sizes[1])
ylabels = np.arange(self.param_ranges[0][0], self.param_ranges[0][1], self.block_sizes[0])
return xlocs, ylocs, xlabels, ylabels
def visualise_priority_queue(self, feature='losses'):
"""
Produces plot of priority queue (losses or counts)
Discrete vs continuous, 2d heatmap vs 3d.
:param feature: which aspect of queue to visualise. 'losses' or 'counts'
:retrun fig: matplotlib figure showing heatmap of priority queue feature
"""
if type(self._queue) == np.ndarray:
if len(self._queue.shape) == 2:
fig = plt.figure()
if feature == 'losses':
plt.imshow(self._queue)
elif feature == 'counts':
plt.imshow(self.sample_counts)
else:
raise ValueError("feature type not recognised. Use 'losses' or 'counts'")
plt.colorbar()
plt.xlabel("Phase")
plt.ylabel("Amplitude")
# set labels to sine specific parameter ranges
# plt.xticks(
# locs=self.figure_locsx,
# labels=self.figure_labelsx
# )
# plt.yticks(
# locs=self.figure_locsy,
# labels=self.figure_labelsy
# )
return fig
else:
warnings.warn("Visualisation with parameter space dimension > 2 not supported", Warning)
return None
else:
raise NotImplementedError("Visualisation for dictionary queue not implemented")
def visualise_priority_queue_loss_distribution(self):
"""
Produces probability distribution plot of losses in the priority queue
"""
all_losses = self._queue.flatten()
hist, bin_edges = np.histogram(all_losses, bins=int(0.1 * len(all_losses)))
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
fig = plt.figure()
plt.plot(bin_centers, hist)
return fig
| if self.priority_sample and not validate:
# query queue for next task parameters
max_indices, task_parameters, task_probability = self.priority_queue.query(step=step_count)
all_max_indices.append(max_indices)
task_probabilities.append(task_probability)
# get epsilon value
epsilon = self.priority_queue.get_epsilon()
# get task from parameters returned from query
amplitude = task_parameters[0]
phase = task_parameters[1]
if self.task_type == 'sin3d':
frequency_scaling = task_parameters[2]
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
# compute metrics for tb logging
queue_count_loss_correlation = self.priority_queue.compute_count_loss_correlation()
queue_mean = np.mean(self.priority_queue.get_queue())
queue_std = np.std(self.priority_queue.get_queue())
# write to tensorboard
if epsilon:
self.writer.add_scalar('queue_metrics/epsilon', epsilon, step_count)
self.writer.add_scalar('queue_metrics/queue_correlation', queue_count_loss_correlation, step_count)
self.writer.add_scalar('queue_metrics/queue_mean', queue_mean, step_count)
self.writer.add_scalar('queue_metrics/queue_std', queue_std, step_count)
else:
# sample randomly (vanilla maml)
amplitude = random.uniform(self.amplitude_bounds[0], self.amplitude_bounds[1])
phase = random.uniform(self.phase_bounds[0], self.phase_bounds[1])
if self.task_type == 'sin3d':
frequency_scaling = random.uniform(self.frequency_bounds[0], self.frequency_bounds[1])
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
tasks.append(task) | conditional_block |
jax_sinusoid.py | from .jax_model import MAML
from utils.priority import PriorityQueue
import copy
import math
import random
import numpy as np
import matplotlib.pyplot as plt
import warnings
from typing import Any, Dict, List, Tuple
from jax.experimental import stax # neural network library
from jax.experimental import optimizers
from jax.experimental.stax import Conv, Dense, MaxPool, Relu, Flatten, LogSoftmax
class SineMAML(MAML):
def __init__(self, params, device):
self.device = device
self.task_type = params.get('task_type')
# extract relevant task-specific parameters
self.amplitude_bounds = params.get(['sin2d', 'amplitude_bounds'])
self.domain_bounds = params.get(['sin2d', 'domain_bounds'])
degree_phase_bounds = params.get(['sin2d', 'phase_bounds']) # phase given in degrees
if self.task_type == 'sin3d':
self.frequency_bounds = params.get(['sin3d', 'frequency_bounds'])
block_sizes = params.get(['sin3d', 'fixed_val_blocks'])
else:
block_sizes = params.get(['sin2d', 'fixed_val_blocks'])
# convert phase bounds/ fixed_val_interval from degrees to radians
self.phase_bounds = [
degree_phase_bounds[0] * (2 * np.pi) / 360, degree_phase_bounds[1] * (2 * np.pi) / 360
]
block_sizes[1] = block_sizes[1] * (2 * np.pi) / 360
self.validation_block_sizes = block_sizes
MAML.__init__(self, params)
def _get_model(self):
"""
Return jax network initialisation and forward method.
"""
layers = []
# inner / hidden network layers + non-linearities
for l in self.network_layers:
layers.append(Dense(l))
layers.append(Relu)
# output layer (no non-linearity)
layers.append(Dense(self.output_dimension))
# make jax stax object
model = stax.serial(*layers)
return model
def _get_optimiser(self):
"""
Return jax optimiser: initialisation, update method and parameter getter method.
Optimiser learning rate is given by config (meta_lr).
"""
return optimizers.adam(step_size=self.meta_lr)
def _get_priority_queue(self):
"""Initiate priority queue"""
if self.task_type == 'sin3d':
param_ranges = self.params.get(["priority_queue", "param_ranges_3d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_3d"])
elif self.task_type == 'sin2d':
param_ranges = self.params.get(["priority_queue", "param_ranges_2d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_2d"])
return SinePriorityQueue(
queue_resume=self.params.get(["resume", "priority_queue"]),
counts_resume=self.params.get(["resume", "queue_counts"]),
sample_type=self.params.get(["priority_queue", "sample_type"]),
block_sizes=block_sizes,
param_ranges=param_ranges,
initial_value=self.params.get(["priority_queue", "initial_value"]),
epsilon_start=self.params.get(["priority_queue", "epsilon_start"]),
epsilon_final=self.params.get(["priority_queue", "epsilon_final"]),
epsilon_decay_start=self.params.get(["priority_queue", "epsilon_decay_start"]),
epsilon_decay_rate=self.params.get(["priority_queue", "epsilon_decay_rate"]),
burn_in=self.params.get(["priority_queue", "burn_in"]),
save_path=self.checkpoint_path
)
def _sample_task(self, batch_size, validate=False, step_count=None):
"""
Sample specific task(s) from defined distribution of tasks
E.g. one specific sine function from family of sines
:param batch_size: number of tasks to sample
:param validate: whether or not tasks are being used for validation
:param step_count: step count during training
:return tasks: batch of tasks
:return task_indices: indices of priority queue associated with batch of tasks
:return task_probabilities: probabilities of tasks sampled being chosen a priori
Returns batch of sin functions shifted in x direction by a phase parameter sampled randomly between phase_bounds
(set by config) enlarged in the y direction by an amplitude parameter sampled randomly between amplitude_bounds
(also set by config). For 3d sine option, function is also squeezed in x direction by freuency parameter.
"""
tasks = []
task_probabilities = []
all_max_indices = [] if self.priority_sample else None
for _ in range(batch_size):
# sample a task from task distribution and generate x, y tensors for that task
if self.priority_sample and not validate:
# query queue for next task parameters
max_indices, task_parameters, task_probability = self.priority_queue.query(step=step_count)
all_max_indices.append(max_indices)
task_probabilities.append(task_probability)
# get epsilon value
epsilon = self.priority_queue.get_epsilon()
# get task from parameters returned from query
amplitude = task_parameters[0]
phase = task_parameters[1]
if self.task_type == 'sin3d':
frequency_scaling = task_parameters[2]
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
# compute metrics for tb logging
queue_count_loss_correlation = self.priority_queue.compute_count_loss_correlation()
queue_mean = np.mean(self.priority_queue.get_queue())
queue_std = np.std(self.priority_queue.get_queue())
# write to tensorboard
if epsilon:
self.writer.add_scalar('queue_metrics/epsilon', epsilon, step_count)
self.writer.add_scalar('queue_metrics/queue_correlation', queue_count_loss_correlation, step_count)
self.writer.add_scalar('queue_metrics/queue_mean', queue_mean, step_count)
self.writer.add_scalar('queue_metrics/queue_std', queue_std, step_count)
else:
# sample randomly (vanilla maml)
amplitude = random.uniform(self.amplitude_bounds[0], self.amplitude_bounds[1])
phase = random.uniform(self.phase_bounds[0], self.phase_bounds[1])
if self.task_type == 'sin3d':
frequency_scaling = random.uniform(self.frequency_bounds[0], self.frequency_bounds[1])
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
tasks.append(task)
return tasks, all_max_indices, task_probabilities
def | (self, parameters: List) -> Any:
"""
Return sine function defined by parameters given
:param parameters: parameters defining the specific sin task in the distribution
:return modified_sin: sin function
(method differs from _sample_task in that it is not a random sample but
defined by parameters given)
"""
amplitude = parameters[0]
phase = parameters[1]
frequency_scaling = parameters[2]
def modified_sin(x):
return amplitude * np.sin(phase + frequency_scaling * x)
return modified_sin
def _generate_batch(self, tasks: List):
"""
Obtain batch of training examples from a list of tasks
:param tasks: list of tasks for which data points need to be sampled
:return x_batch: x points sampled from data
:return y_batch: y points associated with x_batch
"""
x_batch = np.stack([np.random.uniform(low=self.domain_bounds[0], high=self.domain_bounds[1], size=(self.inner_update_k, 1)) for _ in range(len(tasks))])
y_batch = np.stack([[tasks[t](x) for x in x_batch[t]] for t in range(len(tasks))])
return x_batch, y_batch
def _compute_loss(self, parameters, inputs, ground_truth):
"""
Computes loss of network
:param parameters: current weights of model
:param inputs: x data
:param ground_truth: y_data
:return loss: loss on ground truth vs output of network applied to inputs
"""
predictions = self.network_forward(parameters, inputs)
loss = np.mean((ground_truth - predictions) ** 2)
return loss
def _visualise(self, model_iterations, task, validation_x, validation_y, save_name, visualise_all=True):
"""
Visualise qualitative run.
:param validation_model_iterations: parameters of model after successive fine-tuning steps
:param val_task: task being evaluated
:param validation_x_batch: k data points fed to model for finetuning
:param validation_y_batch: ground truth data associated with validation_x_batch
:param save_name: name of file to be saved
:param visualise_all: whether to visualise all fine-tuning steps or just final
"""
# ground truth
plot_x = np.linspace(self.domain_bounds[0], self.domain_bounds[1], 100)
plot_y_ground_truth = [task(xi) for xi in plot_x]
fig = plt.figure()
plt.plot(plot_x, plot_y_ground_truth, label="Ground Truth")
final_plot_y_prediction = self.network_forward(model_iterations[-1], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, final_plot_y_prediction, linestyle='dashed', linewidth=3.0, label='Fine-tuned MAML final update')
no_tuning_y_prediction = self.network_forward(model_iterations[0], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, no_tuning_y_prediction, linestyle='dashed', linewidth=3.0, label='Untuned MAML prediction')
if visualise_all:
for i, (model_iteration) in enumerate(model_iterations[1:-1]):
plot_y_prediction = self.network_forward(model_iteration, plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, plot_y_prediction, linestyle='dashed') #, label='Fine-tuned MAML {} update'.format(i))
plt.scatter(validation_x, validation_y, marker='o', label='K Points')
plt.title("Validation of Sinusoid Meta-Regression")
plt.xlabel(r"x")
plt.ylabel(r"sin(x)")
plt.legend()
# fig.savefig(self.params.get("checkpoint_path") + save_name)
plt.close()
return fig
def _get_fixed_validation_tasks(self):
"""
If using fixed validation this method returns a set of tasks that are
equally spread across the task distribution space.
"""
# mesh of equally partitioned state space
if self.task_type == 'sin3d':
amplitude_spectrum, phase_spectrum, frequency_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase_bounds[0]:self.phase_bounds[1]:self.validation_block_sizes[1],
self.frequency_bounds[0]:self.frequency_bounds[1]:self.validation_block_sizes[2]
]
parameter_space_tuples = np.vstack((amplitude_spectrum.flatten(), phase_spectrum.flatten(), frequency_spectrum.flatten())).T
else:
amplitude_spectrum, phase_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase_bounds[0]:self.phase_bounds[1]:self.validation_block_sizes[1]
]
parameter_space_tuples = np.vstack((amplitude_spectrum.flatten(), phase_spectrum.flatten())).T
fixed_validation_tasks = []
def generate_sin(amplitude, phase, frequency=1):
def modified_sin(x):
return amplitude * np.sin(phase + frequency * x)
return modified_sin
for param_pair in parameter_space_tuples:
if self.task_type == 'sin3d':
fixed_validation_tasks.append(generate_sin(amplitude=param_pair[0], phase=param_pair[1], frequency=param_pair[2]))
else:
fixed_validation_tasks.append(generate_sin(amplitude=param_pair[0], phase=param_pair[1]))
return parameter_space_tuples, fixed_validation_tasks
class SinePriorityQueue(PriorityQueue):
def __init__(self,
block_sizes: Dict[str, float], param_ranges: List[Tuple[float, float]],
sample_type: str, epsilon_start: float, epsilon_final: float, epsilon_decay_rate: float, epsilon_decay_start: int,
queue_resume: str, counts_resume: str, save_path: str, burn_in: int=None, initial_value: float=None
):
# convert phase bounds/ phase block_size from degrees to radians
phase_ranges = [
param_ranges[1][0] * (2 * np.pi) / 360, param_ranges[1][1] * (2 * np.pi) / 360
]
phase_block_size = block_sizes[1] * (2 * np.pi) / 360
param_ranges[1] = phase_ranges
block_sizes[1] = phase_block_size
super().__init__(
block_sizes=block_sizes, param_ranges=param_ranges, sample_type=sample_type, epsilon_start=epsilon_start,
epsilon_final=epsilon_final, epsilon_decay_rate=epsilon_decay_rate, epsilon_decay_start=epsilon_decay_start, queue_resume=queue_resume,
counts_resume=counts_resume, save_path=save_path, burn_in=burn_in, initial_value=initial_value
)
self.figure_locsx, self.figure_locsy, self.figure_labelsx, self.figure_labelsy = self._get_figure_labels()
def _get_figure_labels(self):
xlocs = np.arange(0, self._queue.shape[1])
ylocs = np.arange(0, self._queue.shape[0])
xlabels = np.arange(self.param_ranges[1][0], self.param_ranges[1][1], self.block_sizes[1])
ylabels = np.arange(self.param_ranges[0][0], self.param_ranges[0][1], self.block_sizes[0])
return xlocs, ylocs, xlabels, ylabels
def visualise_priority_queue(self, feature='losses'):
"""
Produces plot of priority queue (losses or counts)
Discrete vs continuous, 2d heatmap vs 3d.
:param feature: which aspect of queue to visualise. 'losses' or 'counts'
:retrun fig: matplotlib figure showing heatmap of priority queue feature
"""
if type(self._queue) == np.ndarray:
if len(self._queue.shape) == 2:
fig = plt.figure()
if feature == 'losses':
plt.imshow(self._queue)
elif feature == 'counts':
plt.imshow(self.sample_counts)
else:
raise ValueError("feature type not recognised. Use 'losses' or 'counts'")
plt.colorbar()
plt.xlabel("Phase")
plt.ylabel("Amplitude")
# set labels to sine specific parameter ranges
# plt.xticks(
# locs=self.figure_locsx,
# labels=self.figure_labelsx
# )
# plt.yticks(
# locs=self.figure_locsy,
# labels=self.figure_labelsy
# )
return fig
else:
warnings.warn("Visualisation with parameter space dimension > 2 not supported", Warning)
return None
else:
raise NotImplementedError("Visualisation for dictionary queue not implemented")
def visualise_priority_queue_loss_distribution(self):
"""
Produces probability distribution plot of losses in the priority queue
"""
all_losses = self._queue.flatten()
hist, bin_edges = np.histogram(all_losses, bins=int(0.1 * len(all_losses)))
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
fig = plt.figure()
plt.plot(bin_centers, hist)
return fig
| _get_task_from_params | identifier_name |
wsrest_face_detect_kinect.py | #! /usr/bin/python
__author__="marcus"
__date__ ="$Aug 22, 2011 5:36:00 PM$"
import web
import cStringIO
import time
import urllib2
import os
import xml_util
import http_util
import global_data
"""
This program is demonstration for face and object detection using haar-like features.
The program finds faces in a camera image or video stream and displays a red box around them.
Original C implementation by: ?
Python implementation by: Roman Stanchak
"""
from opencv.cv import *
from opencv.highgui import *
from PIL import Image
from opencv import adaptors
# Global Variables
cascade = None
capture = None
storage = cvCreateMemStorage(0)
cascade_name = "./haarcascades/haarcascade_frontalface_alt.xml"
input_name = "../c/lena.jpg"
# Parameters for haar detection
# From the API:
# The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned
# for accurate yet slow object detection. For a faster operation on real video
# images the settings are:
# scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING,
# min_size=<minimum possible face size
min_size = cvSize(20,20)
image_scale = 1.3
haar_scale = 1.2
min_neighbors = 2
haar_flags = CV_HAAR_DO_CANNY_PRUNING #0
init_opencv = True
urls = (
'/(.*)', 'index'
)
web.config.debug = True # Debug true causa problema quando usado junto com sessoes.
snapshot_faces_dir = os.environ['HOME'] + "/robot_/faces/"
resource_name = "FaceDetection"
resource = { \
"status" : "ready", \
"num_faces" : 0, \
"screen_width" : "", \
"screen_height" : "", \
"response_time" : 0, \
"id" : 0 \
}
# ToDo: implementar seguranca de acesso usando sessao.
#session = web.session.Session(app, web.session.DiskStore('sessions'))
#, initializer={'logged_in': False})
class index:
# ========== Inicializacao unica por classe =================
# Variaveis declaradas aqui sao usadas na classe usando self.
url_wsr_kinect_rgb = "http://localhost:8094/rgb_image/"
# # Conectando com o BD.
# db = db_util.connect()
# ============ Final da Inicializacao unica por classe =================
def GET(self, name):
# web.header('Content-Type', 'application/xml')
global resource
# autorizacao.
server_timeout,client_timeout,client_username\
=http_util.http_authorization(global_data.host_auth\
,global_data.access_token\
,http_util.http_get_token_in_header(web))
# resposta.
if server_timeout<=0 or client_timeout<=0:
web.ctx.status='401 Unauthorized'
return
t0 = int(time.time() * 1000)
# Chamada get sem parametro.
if not name:
web.header('Content-Type', 'application/xml')
# Detectar faces.
result = self.detect_and_draw()
# resource_xml = xml_util.dict_to_rdfxml(result, "face_detect")
# Gravar no BD quando detectar faces.
if result['num_faces'] > 0:
# db_util.persist_resource(self.db, 'face_detect', resource_xml)
# log no servico stm
resource_rdfxml=xml_util.dict_to_rdfxml(result,"face_detect")
xml_response=http_util.http_request('post'\
,global_data.host_stm,"/"\
,None,global_data.access_token,resource_rdfxml)
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
return resource_xml
# Chamada com nome do arquivo solicitado.
else:
full_image_path = (snapshot_faces_dir + name)
try:
# retornando a partir do arquivo gravado.
f=open(full_image_path,"rb")
web.header('Content-Type', 'image/png')
return f.read()
except IOError:
# arquivo nao existe.
web.debug("no file")
web.header('Content-Type', 'application/xml')
return "<error>file not found</error>"
finally:
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
def detect_and_draw(self):#, resource_params):
# web.debug('detect draw')
t0 = int(time.time() * 1000)
global init_opencv, cascade, capture, storage
if init_opencv:
init_opencv = False
# the OpenCV API says this function is obsolete, but we can't
# cast the output of cvLoad to a HaarClassifierCascade, so use this anyways
# the size parameter is ignored
cascade = cvLoadHaarClassifierCascade( cascade_name, cvSize(1,1) );
if not cascade:
web.debug("ERROR: Could not load classifier cascade")
raise ValueError('[ERRO] Could not load classifier cascade.')
frame_copy = None
# Buscando imagem atual do servico kinect (rgb snapshot).
# inserindo o token no header
req=urllib2.Request(self.url_wsr_kinect_rgb)
req.add_header("Authenticate",("token="+global_data.access_token))
file=urllib2.urlopen(req)
# file = urllib.urlopen(self.url_wsr_kinect_rgb)
# close?
# urllib2 module sends HTTP/1.1 requests with Connection:close header included.
# Convertendo para formato PIL
im=cStringIO.StringIO(file.read())
pil_img=Image.open(im)
# Convertendo de formato PIL para IPL-OpenCv.
frame = adaptors.PIL2Ipl(pil_img)
if( not frame ):
web.debug("[INFO] Not a frame")
if( not frame_copy ):
frame_copy = cvCreateImage( cvSize(frame.width,frame.height), \
IPL_DEPTH_8U, frame.nChannels )
if( frame.origin == IPL_ORIGIN_TL ):
cvCopy( frame, frame_copy )
else:
cvFlip( frame, frame_copy, 0 )
img = frame_copy
# Criando copia default do recurso passado como parametro.
global resource
resource_result = {}
# Identificador unico da resposta.
id_response = int(time.time() * 10000) # decimo de milesimo de segundo.
resource_result['id'] = id_response
# Populando recurso resultado.
# resource_result['_max_num_faces'] = resource_params['_max_num_faces']
resource_result["image_full"] = str(id_response) + "_full.png"
resource_result['screen_width'] = str(img.width)
resource_result['screen_height'] = str(img.height)
# allocate temporary images
gray = cvCreateImage( cvSize(img.width,img.height), 8, 1 )
small_img = cvCreateImage( cvSize( cvRound (img.width/image_scale), \
cvRound (img.height/image_scale)), 8, 1 )
# convert color input image to grayscale
cvCvtColor( img, gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( gray, small_img, CV_INTER_LINEAR )
#
cvEqualizeHist( small_img, small_img )
cvClearMemStorage( storage )
if( cascade ):
# t = cvGetTickCount()
t = int(time.time() * 1000)
# for i in range(1,6):
# cvClearMemStorage( storage )
faces = cvHaarDetectObjects( small_img, cascade, storage,
haar_scale, min_neighbors, haar_flags, min_size )
# t = cvGetTickCount() - t
t = int(time.time() * 1000) - t
# print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.))
web.debug('detection time = ' + str(t) + 'ms image_scale = ' \
+ str(image_scale))
faces_dict={}
num_faces = 0
# loop das faces detectadas
if faces:
for r in faces:
# pt1 = cvPoint( int(r.x*image_scale), int(r.y*image_scale))
# pt2 = cvPoint( int((r.x+r.width)*image_scale), int((r.y+r.height)*image_scale) )
x1 = int(r.x*image_scale)
x2 = int((r.x+r.width)*image_scale)
dx1 = int(r.width*image_scale*0.12)
dx2 = (-1)*int(r.width*image_scale*0.13)
lx = int(x2+dx2-(x1+dx1))
ly = int(lx * 1.2 )
# ly = int(y2+dy2-(y1+dy1))
y1 = int(r.y*image_scale)
dy1 = int(r.height*image_scale*0.17)
pt1 = cvPoint( x1 + dx1 , y1 + dy1 )
y2 = y1+dy1 + ly
# y2 = int((r.y+r.height)*image_scale)
dy2 = 0 # int(r.height*image_scale*0.05)
pt2 = cvPoint( x2 + dx2 , y2 + dy2 )
# cvRectangle( gray, pt1, pt2, CV_RGB(255,0,0), 2, 8, 0 );
face_name="face"+str(num_faces)
faces_dict[face_name+"_x"]=str(x1+dx1)
faces_dict[face_name+"_y"]=str(y1+dy1)
faces_dict[face_name+"_sx"]=str(int(x2+dx2-(x1+dx1)))
faces_dict[face_name+"_sy"]=str(int(y2+dy2-(y1+dy1)))
# sub-image
sub_image = cvCreateImage( \
(lx, ly), 8, 1) # Parameters overwritten anyway...
sub_image = cvGetSubRect( img \
, ( x1+dx1, y1+dy1 , lx, ly ) )
sub_gray = cvCreateImage( \
cvSize(sub_image.width,sub_image.height), 8, 1 )
final_gray = cvCreateImage( \
cvSize(125, 150), 8, 1 )
# convert color input image to grayscale
cvCvtColor( sub_image, sub_gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( sub_gray, final_gray, CV_INTER_LINEAR )
#
# cvEqualizeHist( final_gray, final_gray )
# Gravando faces detectadas.
#cvSaveImage("/home/marcus/Desktop/faces/"
# cvSaveImage("/var/www/portal/images/faces/"
filename = snapshot_faces_dir \
+ str(id_response) + "_face" + str(num_faces) + ".png"
cvSaveImage(filename, final_gray)
faces_dict[face_name+"_filename"]=filename
num_faces = num_faces + 1
web.debug('faces count = ' + str(num_faces))
# web.debug('details of faces: ' + str(faces_dict))
# gravar imagem completa.
if num_faces > 0:
filename = snapshot_faces_dir \
+ str(id_response) + "_full" + ".png"
cvSaveImage(filename, img)
resource_result['num_faces'] = num_faces
resource_result['faces'] = faces_dict
t0 = int(time.time() * 1000) - t0 | return resource_result
# def __done__(self):
# # disconnect from server
# self.db.close()
application = web.application(urls, globals())
if __name__ == "__main__":
global_data.access_token="anonymous"
global_data.realm="realm@som4r.net"
global_data.user="face_detect"
global_data.passwd="123456"
global_data.host_auth="localhost:8012"
global_data.host_stm="localhost:8098"
# autenticando este servidor.
# authenticate and get token.
global_data.access_token,xml_resource\
=http_util.http_authentication(global_data.realm\
,global_data.host_auth,global_data.user,global_data.passwd)
# print "token="+str(global_data.access_token)
if global_data.access_token is None \
or (not isinstance(global_data.access_token,str)):
print "[ERROR] cannot get token."
application.run()
# Comando para rodar no apache.
#application = web.application(urls, globals()).wsgifunc() | # web.debug('detect draw total time: ' + str(t0) + " ms")
resource_result['response_time'] = t0
| random_line_split |
wsrest_face_detect_kinect.py | #! /usr/bin/python
__author__="marcus"
__date__ ="$Aug 22, 2011 5:36:00 PM$"
import web
import cStringIO
import time
import urllib2
import os
import xml_util
import http_util
import global_data
"""
This program is demonstration for face and object detection using haar-like features.
The program finds faces in a camera image or video stream and displays a red box around them.
Original C implementation by: ?
Python implementation by: Roman Stanchak
"""
from opencv.cv import *
from opencv.highgui import *
from PIL import Image
from opencv import adaptors
# Global Variables
cascade = None
capture = None
storage = cvCreateMemStorage(0)
cascade_name = "./haarcascades/haarcascade_frontalface_alt.xml"
input_name = "../c/lena.jpg"
# Parameters for haar detection
# From the API:
# The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned
# for accurate yet slow object detection. For a faster operation on real video
# images the settings are:
# scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING,
# min_size=<minimum possible face size
min_size = cvSize(20,20)
image_scale = 1.3
haar_scale = 1.2
min_neighbors = 2
haar_flags = CV_HAAR_DO_CANNY_PRUNING #0
init_opencv = True
urls = (
'/(.*)', 'index'
)
web.config.debug = True # Debug true causa problema quando usado junto com sessoes.
snapshot_faces_dir = os.environ['HOME'] + "/robot_/faces/"
resource_name = "FaceDetection"
resource = { \
"status" : "ready", \
"num_faces" : 0, \
"screen_width" : "", \
"screen_height" : "", \
"response_time" : 0, \
"id" : 0 \
}
# ToDo: implementar seguranca de acesso usando sessao.
#session = web.session.Session(app, web.session.DiskStore('sessions'))
#, initializer={'logged_in': False})
class index:
# ========== Inicializacao unica por classe =================
# Variaveis declaradas aqui sao usadas na classe usando self.
url_wsr_kinect_rgb = "http://localhost:8094/rgb_image/"
# # Conectando com o BD.
# db = db_util.connect()
# ============ Final da Inicializacao unica por classe =================
def GET(self, name):
# web.header('Content-Type', 'application/xml')
global resource
# autorizacao.
server_timeout,client_timeout,client_username\
=http_util.http_authorization(global_data.host_auth\
,global_data.access_token\
,http_util.http_get_token_in_header(web))
# resposta.
if server_timeout<=0 or client_timeout<=0:
web.ctx.status='401 Unauthorized'
return
t0 = int(time.time() * 1000)
# Chamada get sem parametro.
if not name:
web.header('Content-Type', 'application/xml')
# Detectar faces.
result = self.detect_and_draw()
# resource_xml = xml_util.dict_to_rdfxml(result, "face_detect")
# Gravar no BD quando detectar faces.
if result['num_faces'] > 0:
# db_util.persist_resource(self.db, 'face_detect', resource_xml)
# log no servico stm
resource_rdfxml=xml_util.dict_to_rdfxml(result,"face_detect")
xml_response=http_util.http_request('post'\
,global_data.host_stm,"/"\
,None,global_data.access_token,resource_rdfxml)
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
return resource_xml
# Chamada com nome do arquivo solicitado.
else:
full_image_path = (snapshot_faces_dir + name)
try:
# retornando a partir do arquivo gravado.
f=open(full_image_path,"rb")
web.header('Content-Type', 'image/png')
return f.read()
except IOError:
# arquivo nao existe.
web.debug("no file")
web.header('Content-Type', 'application/xml')
return "<error>file not found</error>"
finally:
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
def detect_and_draw(self):#, resource_params):
# web.debug('detect draw')
|
# def __done__(self):
# # disconnect from server
# self.db.close()
application = web.application(urls, globals())
if __name__ == "__main__":
global_data.access_token="anonymous"
global_data.realm="realm@som4r.net"
global_data.user="face_detect"
global_data.passwd="123456"
global_data.host_auth="localhost:8012"
global_data.host_stm="localhost:8098"
# autenticando este servidor.
# authenticate and get token.
global_data.access_token,xml_resource\
=http_util.http_authentication(global_data.realm\
,global_data.host_auth,global_data.user,global_data.passwd)
# print "token="+str(global_data.access_token)
if global_data.access_token is None \
or (not isinstance(global_data.access_token,str)):
print "[ERROR] cannot get token."
application.run()
# Comando para rodar no apache.
#application = web.application(urls, globals()).wsgifunc()
| t0 = int(time.time() * 1000)
global init_opencv, cascade, capture, storage
if init_opencv:
init_opencv = False
# the OpenCV API says this function is obsolete, but we can't
# cast the output of cvLoad to a HaarClassifierCascade, so use this anyways
# the size parameter is ignored
cascade = cvLoadHaarClassifierCascade( cascade_name, cvSize(1,1) );
if not cascade:
web.debug("ERROR: Could not load classifier cascade")
raise ValueError('[ERRO] Could not load classifier cascade.')
frame_copy = None
# Buscando imagem atual do servico kinect (rgb snapshot).
# inserindo o token no header
req=urllib2.Request(self.url_wsr_kinect_rgb)
req.add_header("Authenticate",("token="+global_data.access_token))
file=urllib2.urlopen(req)
# file = urllib.urlopen(self.url_wsr_kinect_rgb)
# close?
# urllib2 module sends HTTP/1.1 requests with Connection:close header included.
# Convertendo para formato PIL
im=cStringIO.StringIO(file.read())
pil_img=Image.open(im)
# Convertendo de formato PIL para IPL-OpenCv.
frame = adaptors.PIL2Ipl(pil_img)
if( not frame ):
web.debug("[INFO] Not a frame")
if( not frame_copy ):
frame_copy = cvCreateImage( cvSize(frame.width,frame.height), \
IPL_DEPTH_8U, frame.nChannels )
if( frame.origin == IPL_ORIGIN_TL ):
cvCopy( frame, frame_copy )
else:
cvFlip( frame, frame_copy, 0 )
img = frame_copy
# Criando copia default do recurso passado como parametro.
global resource
resource_result = {}
# Identificador unico da resposta.
id_response = int(time.time() * 10000) # decimo de milesimo de segundo.
resource_result['id'] = id_response
# Populando recurso resultado.
# resource_result['_max_num_faces'] = resource_params['_max_num_faces']
resource_result["image_full"] = str(id_response) + "_full.png"
resource_result['screen_width'] = str(img.width)
resource_result['screen_height'] = str(img.height)
# allocate temporary images
gray = cvCreateImage( cvSize(img.width,img.height), 8, 1 )
small_img = cvCreateImage( cvSize( cvRound (img.width/image_scale), \
cvRound (img.height/image_scale)), 8, 1 )
# convert color input image to grayscale
cvCvtColor( img, gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( gray, small_img, CV_INTER_LINEAR )
#
cvEqualizeHist( small_img, small_img )
cvClearMemStorage( storage )
if( cascade ):
# t = cvGetTickCount()
t = int(time.time() * 1000)
# for i in range(1,6):
# cvClearMemStorage( storage )
faces = cvHaarDetectObjects( small_img, cascade, storage,
haar_scale, min_neighbors, haar_flags, min_size )
# t = cvGetTickCount() - t
t = int(time.time() * 1000) - t
# print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.))
web.debug('detection time = ' + str(t) + 'ms image_scale = ' \
+ str(image_scale))
faces_dict={}
num_faces = 0
# loop das faces detectadas
if faces:
for r in faces:
# pt1 = cvPoint( int(r.x*image_scale), int(r.y*image_scale))
# pt2 = cvPoint( int((r.x+r.width)*image_scale), int((r.y+r.height)*image_scale) )
x1 = int(r.x*image_scale)
x2 = int((r.x+r.width)*image_scale)
dx1 = int(r.width*image_scale*0.12)
dx2 = (-1)*int(r.width*image_scale*0.13)
lx = int(x2+dx2-(x1+dx1))
ly = int(lx * 1.2 )
# ly = int(y2+dy2-(y1+dy1))
y1 = int(r.y*image_scale)
dy1 = int(r.height*image_scale*0.17)
pt1 = cvPoint( x1 + dx1 , y1 + dy1 )
y2 = y1+dy1 + ly
# y2 = int((r.y+r.height)*image_scale)
dy2 = 0 # int(r.height*image_scale*0.05)
pt2 = cvPoint( x2 + dx2 , y2 + dy2 )
# cvRectangle( gray, pt1, pt2, CV_RGB(255,0,0), 2, 8, 0 );
face_name="face"+str(num_faces)
faces_dict[face_name+"_x"]=str(x1+dx1)
faces_dict[face_name+"_y"]=str(y1+dy1)
faces_dict[face_name+"_sx"]=str(int(x2+dx2-(x1+dx1)))
faces_dict[face_name+"_sy"]=str(int(y2+dy2-(y1+dy1)))
# sub-image
sub_image = cvCreateImage( \
(lx, ly), 8, 1) # Parameters overwritten anyway...
sub_image = cvGetSubRect( img \
, ( x1+dx1, y1+dy1 , lx, ly ) )
sub_gray = cvCreateImage( \
cvSize(sub_image.width,sub_image.height), 8, 1 )
final_gray = cvCreateImage( \
cvSize(125, 150), 8, 1 )
# convert color input image to grayscale
cvCvtColor( sub_image, sub_gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( sub_gray, final_gray, CV_INTER_LINEAR )
#
# cvEqualizeHist( final_gray, final_gray )
# Gravando faces detectadas.
#cvSaveImage("/home/marcus/Desktop/faces/"
# cvSaveImage("/var/www/portal/images/faces/"
filename = snapshot_faces_dir \
+ str(id_response) + "_face" + str(num_faces) + ".png"
cvSaveImage(filename, final_gray)
faces_dict[face_name+"_filename"]=filename
num_faces = num_faces + 1
web.debug('faces count = ' + str(num_faces))
# web.debug('details of faces: ' + str(faces_dict))
# gravar imagem completa.
if num_faces > 0:
filename = snapshot_faces_dir \
+ str(id_response) + "_full" + ".png"
cvSaveImage(filename, img)
resource_result['num_faces'] = num_faces
resource_result['faces'] = faces_dict
t0 = int(time.time() * 1000) - t0
# web.debug('detect draw total time: ' + str(t0) + " ms")
resource_result['response_time'] = t0
return resource_result | identifier_body |
wsrest_face_detect_kinect.py | #! /usr/bin/python
__author__="marcus"
__date__ ="$Aug 22, 2011 5:36:00 PM$"
import web
import cStringIO
import time
import urllib2
import os
import xml_util
import http_util
import global_data
"""
This program is demonstration for face and object detection using haar-like features.
The program finds faces in a camera image or video stream and displays a red box around them.
Original C implementation by: ?
Python implementation by: Roman Stanchak
"""
from opencv.cv import *
from opencv.highgui import *
from PIL import Image
from opencv import adaptors
# Global Variables
cascade = None
capture = None
storage = cvCreateMemStorage(0)
cascade_name = "./haarcascades/haarcascade_frontalface_alt.xml"
input_name = "../c/lena.jpg"
# Parameters for haar detection
# From the API:
# The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned
# for accurate yet slow object detection. For a faster operation on real video
# images the settings are:
# scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING,
# min_size=<minimum possible face size
min_size = cvSize(20,20)
image_scale = 1.3
haar_scale = 1.2
min_neighbors = 2
haar_flags = CV_HAAR_DO_CANNY_PRUNING #0
init_opencv = True
urls = (
'/(.*)', 'index'
)
web.config.debug = True # Debug true causa problema quando usado junto com sessoes.
snapshot_faces_dir = os.environ['HOME'] + "/robot_/faces/"
resource_name = "FaceDetection"
resource = { \
"status" : "ready", \
"num_faces" : 0, \
"screen_width" : "", \
"screen_height" : "", \
"response_time" : 0, \
"id" : 0 \
}
# ToDo: implementar seguranca de acesso usando sessao.
#session = web.session.Session(app, web.session.DiskStore('sessions'))
#, initializer={'logged_in': False})
class index:
# ========== Inicializacao unica por classe =================
# Variaveis declaradas aqui sao usadas na classe usando self.
url_wsr_kinect_rgb = "http://localhost:8094/rgb_image/"
# # Conectando com o BD.
# db = db_util.connect()
# ============ Final da Inicializacao unica por classe =================
def GET(self, name):
# web.header('Content-Type', 'application/xml')
global resource
# autorizacao.
server_timeout,client_timeout,client_username\
=http_util.http_authorization(global_data.host_auth\
,global_data.access_token\
,http_util.http_get_token_in_header(web))
# resposta.
if server_timeout<=0 or client_timeout<=0:
web.ctx.status='401 Unauthorized'
return
t0 = int(time.time() * 1000)
# Chamada get sem parametro.
if not name:
web.header('Content-Type', 'application/xml')
# Detectar faces.
result = self.detect_and_draw()
# resource_xml = xml_util.dict_to_rdfxml(result, "face_detect")
# Gravar no BD quando detectar faces.
if result['num_faces'] > 0:
# db_util.persist_resource(self.db, 'face_detect', resource_xml)
# log no servico stm
resource_rdfxml=xml_util.dict_to_rdfxml(result,"face_detect")
xml_response=http_util.http_request('post'\
,global_data.host_stm,"/"\
,None,global_data.access_token,resource_rdfxml)
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
return resource_xml
# Chamada com nome do arquivo solicitado.
else:
full_image_path = (snapshot_faces_dir + name)
try:
# retornando a partir do arquivo gravado.
f=open(full_image_path,"rb")
web.header('Content-Type', 'image/png')
return f.read()
except IOError:
# arquivo nao existe.
web.debug("no file")
web.header('Content-Type', 'application/xml')
return "<error>file not found</error>"
finally:
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
def detect_and_draw(self):#, resource_params):
# web.debug('detect draw')
t0 = int(time.time() * 1000)
global init_opencv, cascade, capture, storage
if init_opencv:
init_opencv = False
# the OpenCV API says this function is obsolete, but we can't
# cast the output of cvLoad to a HaarClassifierCascade, so use this anyways
# the size parameter is ignored
cascade = cvLoadHaarClassifierCascade( cascade_name, cvSize(1,1) );
if not cascade:
web.debug("ERROR: Could not load classifier cascade")
raise ValueError('[ERRO] Could not load classifier cascade.')
frame_copy = None
# Buscando imagem atual do servico kinect (rgb snapshot).
# inserindo o token no header
req=urllib2.Request(self.url_wsr_kinect_rgb)
req.add_header("Authenticate",("token="+global_data.access_token))
file=urllib2.urlopen(req)
# file = urllib.urlopen(self.url_wsr_kinect_rgb)
# close?
# urllib2 module sends HTTP/1.1 requests with Connection:close header included.
# Convertendo para formato PIL
im=cStringIO.StringIO(file.read())
pil_img=Image.open(im)
# Convertendo de formato PIL para IPL-OpenCv.
frame = adaptors.PIL2Ipl(pil_img)
if( not frame ):
web.debug("[INFO] Not a frame")
if( not frame_copy ):
frame_copy = cvCreateImage( cvSize(frame.width,frame.height), \
IPL_DEPTH_8U, frame.nChannels )
if( frame.origin == IPL_ORIGIN_TL ):
cvCopy( frame, frame_copy )
else:
cvFlip( frame, frame_copy, 0 )
img = frame_copy
# Criando copia default do recurso passado como parametro.
global resource
resource_result = {}
# Identificador unico da resposta.
id_response = int(time.time() * 10000) # decimo de milesimo de segundo.
resource_result['id'] = id_response
# Populando recurso resultado.
# resource_result['_max_num_faces'] = resource_params['_max_num_faces']
resource_result["image_full"] = str(id_response) + "_full.png"
resource_result['screen_width'] = str(img.width)
resource_result['screen_height'] = str(img.height)
# allocate temporary images
gray = cvCreateImage( cvSize(img.width,img.height), 8, 1 )
small_img = cvCreateImage( cvSize( cvRound (img.width/image_scale), \
cvRound (img.height/image_scale)), 8, 1 )
# convert color input image to grayscale
cvCvtColor( img, gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( gray, small_img, CV_INTER_LINEAR )
#
cvEqualizeHist( small_img, small_img )
cvClearMemStorage( storage )
if( cascade ):
# t = cvGetTickCount()
t = int(time.time() * 1000)
# for i in range(1,6):
# cvClearMemStorage( storage )
faces = cvHaarDetectObjects( small_img, cascade, storage,
haar_scale, min_neighbors, haar_flags, min_size )
# t = cvGetTickCount() - t
t = int(time.time() * 1000) - t
# print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.))
web.debug('detection time = ' + str(t) + 'ms image_scale = ' \
+ str(image_scale))
faces_dict={}
num_faces = 0
# loop das faces detectadas
if faces:
|
resource_result['num_faces'] = num_faces
resource_result['faces'] = faces_dict
t0 = int(time.time() * 1000) - t0
# web.debug('detect draw total time: ' + str(t0) + " ms")
resource_result['response_time'] = t0
return resource_result
# def __done__(self):
# # disconnect from server
# self.db.close()
application = web.application(urls, globals())
if __name__ == "__main__":
global_data.access_token="anonymous"
global_data.realm="realm@som4r.net"
global_data.user="face_detect"
global_data.passwd="123456"
global_data.host_auth="localhost:8012"
global_data.host_stm="localhost:8098"
# autenticando este servidor.
# authenticate and get token.
global_data.access_token,xml_resource\
=http_util.http_authentication(global_data.realm\
,global_data.host_auth,global_data.user,global_data.passwd)
# print "token="+str(global_data.access_token)
if global_data.access_token is None \
or (not isinstance(global_data.access_token,str)):
print "[ERROR] cannot get token."
application.run()
# Comando para rodar no apache.
#application = web.application(urls, globals()).wsgifunc()
| for r in faces:
# pt1 = cvPoint( int(r.x*image_scale), int(r.y*image_scale))
# pt2 = cvPoint( int((r.x+r.width)*image_scale), int((r.y+r.height)*image_scale) )
x1 = int(r.x*image_scale)
x2 = int((r.x+r.width)*image_scale)
dx1 = int(r.width*image_scale*0.12)
dx2 = (-1)*int(r.width*image_scale*0.13)
lx = int(x2+dx2-(x1+dx1))
ly = int(lx * 1.2 )
# ly = int(y2+dy2-(y1+dy1))
y1 = int(r.y*image_scale)
dy1 = int(r.height*image_scale*0.17)
pt1 = cvPoint( x1 + dx1 , y1 + dy1 )
y2 = y1+dy1 + ly
# y2 = int((r.y+r.height)*image_scale)
dy2 = 0 # int(r.height*image_scale*0.05)
pt2 = cvPoint( x2 + dx2 , y2 + dy2 )
# cvRectangle( gray, pt1, pt2, CV_RGB(255,0,0), 2, 8, 0 );
face_name="face"+str(num_faces)
faces_dict[face_name+"_x"]=str(x1+dx1)
faces_dict[face_name+"_y"]=str(y1+dy1)
faces_dict[face_name+"_sx"]=str(int(x2+dx2-(x1+dx1)))
faces_dict[face_name+"_sy"]=str(int(y2+dy2-(y1+dy1)))
# sub-image
sub_image = cvCreateImage( \
(lx, ly), 8, 1) # Parameters overwritten anyway...
sub_image = cvGetSubRect( img \
, ( x1+dx1, y1+dy1 , lx, ly ) )
sub_gray = cvCreateImage( \
cvSize(sub_image.width,sub_image.height), 8, 1 )
final_gray = cvCreateImage( \
cvSize(125, 150), 8, 1 )
# convert color input image to grayscale
cvCvtColor( sub_image, sub_gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( sub_gray, final_gray, CV_INTER_LINEAR )
#
# cvEqualizeHist( final_gray, final_gray )
# Gravando faces detectadas.
#cvSaveImage("/home/marcus/Desktop/faces/"
# cvSaveImage("/var/www/portal/images/faces/"
filename = snapshot_faces_dir \
+ str(id_response) + "_face" + str(num_faces) + ".png"
cvSaveImage(filename, final_gray)
faces_dict[face_name+"_filename"]=filename
num_faces = num_faces + 1
web.debug('faces count = ' + str(num_faces))
# web.debug('details of faces: ' + str(faces_dict))
# gravar imagem completa.
if num_faces > 0:
filename = snapshot_faces_dir \
+ str(id_response) + "_full" + ".png"
cvSaveImage(filename, img) | conditional_block |
wsrest_face_detect_kinect.py | #! /usr/bin/python
__author__="marcus"
__date__ ="$Aug 22, 2011 5:36:00 PM$"
import web
import cStringIO
import time
import urllib2
import os
import xml_util
import http_util
import global_data
"""
This program is demonstration for face and object detection using haar-like features.
The program finds faces in a camera image or video stream and displays a red box around them.
Original C implementation by: ?
Python implementation by: Roman Stanchak
"""
from opencv.cv import *
from opencv.highgui import *
from PIL import Image
from opencv import adaptors
# Global Variables
cascade = None
capture = None
storage = cvCreateMemStorage(0)
cascade_name = "./haarcascades/haarcascade_frontalface_alt.xml"
input_name = "../c/lena.jpg"
# Parameters for haar detection
# From the API:
# The default parameters (scale_factor=1.1, min_neighbors=3, flags=0) are tuned
# for accurate yet slow object detection. For a faster operation on real video
# images the settings are:
# scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING,
# min_size=<minimum possible face size
min_size = cvSize(20,20)
image_scale = 1.3
haar_scale = 1.2
min_neighbors = 2
haar_flags = CV_HAAR_DO_CANNY_PRUNING #0
init_opencv = True
urls = (
'/(.*)', 'index'
)
web.config.debug = True # Debug true causa problema quando usado junto com sessoes.
snapshot_faces_dir = os.environ['HOME'] + "/robot_/faces/"
resource_name = "FaceDetection"
resource = { \
"status" : "ready", \
"num_faces" : 0, \
"screen_width" : "", \
"screen_height" : "", \
"response_time" : 0, \
"id" : 0 \
}
# ToDo: implementar seguranca de acesso usando sessao.
#session = web.session.Session(app, web.session.DiskStore('sessions'))
#, initializer={'logged_in': False})
class | :
# ========== Inicializacao unica por classe =================
# Variaveis declaradas aqui sao usadas na classe usando self.
url_wsr_kinect_rgb = "http://localhost:8094/rgb_image/"
# # Conectando com o BD.
# db = db_util.connect()
# ============ Final da Inicializacao unica por classe =================
def GET(self, name):
# web.header('Content-Type', 'application/xml')
global resource
# autorizacao.
server_timeout,client_timeout,client_username\
=http_util.http_authorization(global_data.host_auth\
,global_data.access_token\
,http_util.http_get_token_in_header(web))
# resposta.
if server_timeout<=0 or client_timeout<=0:
web.ctx.status='401 Unauthorized'
return
t0 = int(time.time() * 1000)
# Chamada get sem parametro.
if not name:
web.header('Content-Type', 'application/xml')
# Detectar faces.
result = self.detect_and_draw()
# resource_xml = xml_util.dict_to_rdfxml(result, "face_detect")
# Gravar no BD quando detectar faces.
if result['num_faces'] > 0:
# db_util.persist_resource(self.db, 'face_detect', resource_xml)
# log no servico stm
resource_rdfxml=xml_util.dict_to_rdfxml(result,"face_detect")
xml_response=http_util.http_request('post'\
,global_data.host_stm,"/"\
,None,global_data.access_token,resource_rdfxml)
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
return resource_xml
# Chamada com nome do arquivo solicitado.
else:
full_image_path = (snapshot_faces_dir + name)
try:
# retornando a partir do arquivo gravado.
f=open(full_image_path,"rb")
web.header('Content-Type', 'image/png')
return f.read()
except IOError:
# arquivo nao existe.
web.debug("no file")
web.header('Content-Type', 'application/xml')
return "<error>file not found</error>"
finally:
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
def detect_and_draw(self):#, resource_params):
# web.debug('detect draw')
t0 = int(time.time() * 1000)
global init_opencv, cascade, capture, storage
if init_opencv:
init_opencv = False
# the OpenCV API says this function is obsolete, but we can't
# cast the output of cvLoad to a HaarClassifierCascade, so use this anyways
# the size parameter is ignored
cascade = cvLoadHaarClassifierCascade( cascade_name, cvSize(1,1) );
if not cascade:
web.debug("ERROR: Could not load classifier cascade")
raise ValueError('[ERRO] Could not load classifier cascade.')
frame_copy = None
# Buscando imagem atual do servico kinect (rgb snapshot).
# inserindo o token no header
req=urllib2.Request(self.url_wsr_kinect_rgb)
req.add_header("Authenticate",("token="+global_data.access_token))
file=urllib2.urlopen(req)
# file = urllib.urlopen(self.url_wsr_kinect_rgb)
# close?
# urllib2 module sends HTTP/1.1 requests with Connection:close header included.
# Convertendo para formato PIL
im=cStringIO.StringIO(file.read())
pil_img=Image.open(im)
# Convertendo de formato PIL para IPL-OpenCv.
frame = adaptors.PIL2Ipl(pil_img)
if( not frame ):
web.debug("[INFO] Not a frame")
if( not frame_copy ):
frame_copy = cvCreateImage( cvSize(frame.width,frame.height), \
IPL_DEPTH_8U, frame.nChannels )
if( frame.origin == IPL_ORIGIN_TL ):
cvCopy( frame, frame_copy )
else:
cvFlip( frame, frame_copy, 0 )
img = frame_copy
# Criando copia default do recurso passado como parametro.
global resource
resource_result = {}
# Identificador unico da resposta.
id_response = int(time.time() * 10000) # decimo de milesimo de segundo.
resource_result['id'] = id_response
# Populando recurso resultado.
# resource_result['_max_num_faces'] = resource_params['_max_num_faces']
resource_result["image_full"] = str(id_response) + "_full.png"
resource_result['screen_width'] = str(img.width)
resource_result['screen_height'] = str(img.height)
# allocate temporary images
gray = cvCreateImage( cvSize(img.width,img.height), 8, 1 )
small_img = cvCreateImage( cvSize( cvRound (img.width/image_scale), \
cvRound (img.height/image_scale)), 8, 1 )
# convert color input image to grayscale
cvCvtColor( img, gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( gray, small_img, CV_INTER_LINEAR )
#
cvEqualizeHist( small_img, small_img )
cvClearMemStorage( storage )
if( cascade ):
# t = cvGetTickCount()
t = int(time.time() * 1000)
# for i in range(1,6):
# cvClearMemStorage( storage )
faces = cvHaarDetectObjects( small_img, cascade, storage,
haar_scale, min_neighbors, haar_flags, min_size )
# t = cvGetTickCount() - t
t = int(time.time() * 1000) - t
# print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.))
web.debug('detection time = ' + str(t) + 'ms image_scale = ' \
+ str(image_scale))
faces_dict={}
num_faces = 0
# loop das faces detectadas
if faces:
for r in faces:
# pt1 = cvPoint( int(r.x*image_scale), int(r.y*image_scale))
# pt2 = cvPoint( int((r.x+r.width)*image_scale), int((r.y+r.height)*image_scale) )
x1 = int(r.x*image_scale)
x2 = int((r.x+r.width)*image_scale)
dx1 = int(r.width*image_scale*0.12)
dx2 = (-1)*int(r.width*image_scale*0.13)
lx = int(x2+dx2-(x1+dx1))
ly = int(lx * 1.2 )
# ly = int(y2+dy2-(y1+dy1))
y1 = int(r.y*image_scale)
dy1 = int(r.height*image_scale*0.17)
pt1 = cvPoint( x1 + dx1 , y1 + dy1 )
y2 = y1+dy1 + ly
# y2 = int((r.y+r.height)*image_scale)
dy2 = 0 # int(r.height*image_scale*0.05)
pt2 = cvPoint( x2 + dx2 , y2 + dy2 )
# cvRectangle( gray, pt1, pt2, CV_RGB(255,0,0), 2, 8, 0 );
face_name="face"+str(num_faces)
faces_dict[face_name+"_x"]=str(x1+dx1)
faces_dict[face_name+"_y"]=str(y1+dy1)
faces_dict[face_name+"_sx"]=str(int(x2+dx2-(x1+dx1)))
faces_dict[face_name+"_sy"]=str(int(y2+dy2-(y1+dy1)))
# sub-image
sub_image = cvCreateImage( \
(lx, ly), 8, 1) # Parameters overwritten anyway...
sub_image = cvGetSubRect( img \
, ( x1+dx1, y1+dy1 , lx, ly ) )
sub_gray = cvCreateImage( \
cvSize(sub_image.width,sub_image.height), 8, 1 )
final_gray = cvCreateImage( \
cvSize(125, 150), 8, 1 )
# convert color input image to grayscale
cvCvtColor( sub_image, sub_gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( sub_gray, final_gray, CV_INTER_LINEAR )
#
# cvEqualizeHist( final_gray, final_gray )
# Gravando faces detectadas.
#cvSaveImage("/home/marcus/Desktop/faces/"
# cvSaveImage("/var/www/portal/images/faces/"
filename = snapshot_faces_dir \
+ str(id_response) + "_face" + str(num_faces) + ".png"
cvSaveImage(filename, final_gray)
faces_dict[face_name+"_filename"]=filename
num_faces = num_faces + 1
web.debug('faces count = ' + str(num_faces))
# web.debug('details of faces: ' + str(faces_dict))
# gravar imagem completa.
if num_faces > 0:
filename = snapshot_faces_dir \
+ str(id_response) + "_full" + ".png"
cvSaveImage(filename, img)
resource_result['num_faces'] = num_faces
resource_result['faces'] = faces_dict
t0 = int(time.time() * 1000) - t0
# web.debug('detect draw total time: ' + str(t0) + " ms")
resource_result['response_time'] = t0
return resource_result
# def __done__(self):
# # disconnect from server
# self.db.close()
application = web.application(urls, globals())
if __name__ == "__main__":
global_data.access_token="anonymous"
global_data.realm="realm@som4r.net"
global_data.user="face_detect"
global_data.passwd="123456"
global_data.host_auth="localhost:8012"
global_data.host_stm="localhost:8098"
# autenticando este servidor.
# authenticate and get token.
global_data.access_token,xml_resource\
=http_util.http_authentication(global_data.realm\
,global_data.host_auth,global_data.user,global_data.passwd)
# print "token="+str(global_data.access_token)
if global_data.access_token is None \
or (not isinstance(global_data.access_token,str)):
print "[ERROR] cannot get token."
application.run()
# Comando para rodar no apache.
#application = web.application(urls, globals()).wsgifunc()
| index | identifier_name |
platformAccessory.ts | import { Service, PlatformAccessory, CharacteristicValue } from 'homebridge';
import { ExtronMatrixSwitchHomebridgePlatform } from './platform';
import { telnetResponse } from './common';
import { PLUGIN_NAME } from './settings';
/**
* An interface that defines what fields are required for an preset on the
* Extron unit.
*/
interface ExtronPreset {
number: number;
name: string;
}
export class ExtronMatrixSwitchPlatformAccessory {
private avService: Service;
private lockService: Service;
private currentPreset = 0;
private presetsConfigured = false;
private lockingCode = '1X';
private updateInterval = 1000;
/**
* Default constructor that performs all initial setup.
* @param platform The platform implmented.
* @param accessory The accessory platform.
*/
constructor(
private readonly platform: ExtronMatrixSwitchHomebridgePlatform,
private readonly accessory: PlatformAccessory,
) {
// Set default update interval if not defined in configuration.
if(this.platform.config.updateInterval && this.platform.config.updateInterval !== undefined) {
this.updateInterval = this.platform.config.updateInterval * 1000;
}
this.accessory.getService(this.platform.Service.AccessoryInformation)!
.setCharacteristic(this.platform.Characteristic.Manufacturer, 'Extron, Inc.')
.setCharacteristic(this.platform.Characteristic.Model, 'Crosspoint ULTRA 88 HVA')
.setCharacteristic(this.platform.Characteristic.SerialNumber, this.platform.config.serialNumber);
this.avService = this.accessory.getService(this.platform.Service.Television)
|| this.accessory.addService(this.platform.Service.Television);
this.setupPresets();
const uuid = this.platform.api.hap.uuid.generate('homebridge:extron-matrix-switch' + accessory.context.device.displayName);
this.accessory.UUID = uuid;
this.accessory.category = this.platform.api.hap.Categories.AUDIO_RECEIVER;
this.avService.setCharacteristic(this.platform.Characteristic.Name, 'Extron');
this.avService.setCharacteristic(this.platform.Characteristic.SleepDiscoveryMode,
this.platform.Characteristic.SleepDiscoveryMode.ALWAYS_DISCOVERABLE);
this.avService.getCharacteristic(this.platform.Characteristic.Active)
.onSet(this.setOnOffState.bind(this))
.onGet(this.getOnOffState.bind(this));
this.getOnOffState();
this.updatePresetStatus();
this.avService.getCharacteristic(this.platform.Characteristic.ActiveIdentifier)
.onSet(async (value) => {
// the value will be the value you set for the Identifier Characteristic
// on the Input Source service that was selected - see input sources below.
await this.changeInput(parseInt(value.toString()));
});
// set the service name, this is what is displayed as the default name on the Home app
// in this example we are using the name we stored in the `accessory.context` in the `discoverDevices` method.
this.avService.setCharacteristic(this.platform.Characteristic.Name, accessory.context.device.displayName);
// This is required to be implemented, so currently it does not do anything useful.
this.avService.getCharacteristic(this.platform.Characteristic.RemoteKey)
.onSet((newValue) => {
switch(newValue) {
default: {
break;
}
}
});
// Setup lock
this.platform.config.lockLevel === 'level1' ? '1X' : '2X';
this.lockService = this.accessory.getService(this.platform.Service.LockMechanism)
|| this.accessory.addService(this.platform.Service.LockMechanism);
this.lockService.setCharacteristic(this.platform.Characteristic.Name, accessory.context.device.displayName);
this.lockService.getCharacteristic(this.platform.Characteristic.LockCurrentState)
.onGet(this.getCurrentLockState.bind(this)); | this.lockService.getCharacteristic(this.platform.Characteristic.LockTargetState)
.onSet(this.setTargetLockState.bind(this))
.onGet(this.getCurrentLockState.bind(this));
this.updateLockStatus();
// Set interval to keep HomeKit updated on the current state of power, input states,
// and lock status.
setInterval(async () => {
await this.getOnOffState();
await this.updatePresetStatus();
await this.updateLockStatus();
}, this.updateInterval);
this.platform.api.publishExternalAccessories(PLUGIN_NAME, [this.accessory]);
}
/**
* Setup all presets from the plugin configuration. Note, currently all presets are setup
* as Component (YPbPr) video inputs, but this does not appear to make a difference in
* Apple's Home.app at this time.
*
* Note that TypeScript/JavaScript and HomeKit 0-based arrays, but for user-convenience
* any labelled inputs are 1-based.
*/
setupPresets() {
if(!this.presetsConfigured) {
const presets = this.platform.config.presets as ExtronPreset[];
presets.forEach(async (name, i) => {
const inputService = this.accessory.getService('input' + i) ||
this.accessory.addService(this.platform.Service.InputSource, 'input' + i, name.name);
this.platform.log.info('Adding input %s as number %s', name.name, i);
inputService
.setCharacteristic(this.platform.Characteristic.Identifier, i)
.setCharacteristic(this.platform.Characteristic.ConfiguredName, name.name)
.setCharacteristic(this.platform.Characteristic.Name, name.name)
.setCharacteristic(this.platform.Characteristic.IsConfigured, this.platform.Characteristic.IsConfigured.CONFIGURED)
.setCharacteristic(this.platform.Characteristic.InputSourceType, this.platform.Characteristic.InputSourceType.COMPONENT_VIDEO)
.setCharacteristic(this.platform.Characteristic.CurrentVisibilityState,
this.platform.Characteristic.CurrentVisibilityState.SHOWN);
this.avService.addLinkedService(inputService);
});
this.presetsConfigured = true;
}
}
// #region Power Status
/**
* Returns the current state of the physical Extron unit itself.
* @returns The characterstic status of the Extron unit itself.
*/
async getOnOffState(): Promise<number> {
const muteResponse = await this.telnetCommand('WVM' + String.fromCharCode(13));
this.platform.log.debug('GET: Getting status of unit: %s', muteResponse);
// All zeroes indicates there are not active mutes; otherwise, mutes are in effect.
// Hence, if there are any other values in the response, we assume the unit is off.
const activeFromUnit = !muteResponse.split('').some(x => x === '1' || x === '2' || x === '3');
this.platform.log.debug('GET: Start Update Active State');
this.avService.updateCharacteristic(this.platform.Characteristic.Active, activeFromUnit);
return activeFromUnit ?
this.platform.Characteristic.Active.ACTIVE :
this.platform.Characteristic.Active.INACTIVE;
}
/**
* Sets the user-requested on or off state of the Extron unit.
* @param value The user requested on or off state.
*/
async setOnOffState(value: CharacteristicValue) {
this.platform.log.debug('SET: Setting status from HomeKit to: %s', value);
const command = (value === 1) ? '0*B' : '1*B';
const response = await this.telnetCommand(command);
this.platform.log.debug('SET: Received status of %s', response);
let active = this.platform.Characteristic.Active.INACTIVE;
if(response === 'Vmt0') {
active = this.platform.Characteristic.Active.ACTIVE;
this.platform.log.debug('SET: Unit is active');
}
this.avService.updateCharacteristic(this.platform.Characteristic.Active, active);
}
// #endregion Power Status
//#region Input Status
/**
* Updates the HomeKit staet of the preset that is currently active on the Extron
* unit itself.
*
* Note that when setting the current preset within HomeKit, the currentPreset value
* is decremented to handle 0- and 1-based arrays appropriately.
*/
async updatePresetStatus() {
// Preset "0" is the last set preset #, so query it to get the current state.
//const extronPreset = await this.getPreset(0);
const currentPreset = await this.telnetCommand('W0*1*1VC' + String.fromCharCode(13));
const currentExtronVideoPreset = parseInt(currentPreset.split(' ')[0]);
if(currentExtronVideoPreset !== this.currentPreset) {
this.currentPreset = currentExtronVideoPreset;
this.avService.updateCharacteristic(this.platform.Characteristic.ActiveIdentifier, --this.currentPreset);
}
}
/**
* Handles swiching of presets on Extron unit implemented using the HomeKit InputService.
* Will throw out errors if an out-of-bounds number is specified for an index that does
* not exist.
* @param value The HomeKit-specified input number to switch to.
*/
async changeInput(value: number) {
const newValue = value + 1;
this.platform.log.info('set Active Identifier => setNewValue: ' + newValue);
try {
const response = await this.telnetCommand(newValue + '.');
const responseIndex = newValue < 10 ? '0' + newValue : newValue.toString();
if(response === 'Rpr' + responseIndex) {
this.platform.log.info('Switched to preset ' + newValue + ': got response ' + response);
this.currentPreset = newValue;
this.avService.updateCharacteristic(this.platform.Characteristic.ActiveIdentifier, value);
} else {
switch(response) {
case 'E11':
this.platform.log.info('Preset number %s is out of range of this unit', newValue);
break;
default:
this.platform.log.info('Response does not match: %s with a string length of ', response, response.length);
}
}
} catch(error) {
this.platform.log.error('Error: ' + error);
}
}
//#endregion Input Status
//#region Lock Status
/**
* Returns the current panel lock status from the unit itself.
* @returns The current panel lock status.
*/
async updateLockStatus() : Promise<CharacteristicValue> {
const response = await this.telnetCommand('X');
if(response === '0') {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNSECURED);
this.lockService.updateCharacteristic(this.platform.Characteristic.LockTargetState,
this.platform.Characteristic.LockTargetState.UNSECURED);
return this.platform.Characteristic.LockCurrentState.UNSECURED;
} else {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.SECURED);
this.lockService.updateCharacteristic(this.platform.Characteristic.LockTargetState,
this.platform.Characteristic.LockTargetState.SECURED);
return this.platform.Characteristic.LockCurrentState.SECURED;
}
}
/**
* Sets the user-requested lock status.
* @param value The target value of what the lock should be.
*/
async setTargetLockState(value: CharacteristicValue) {
if(value === this.platform.Characteristic.LockTargetState.SECURED) {
const response = await this.telnetCommand(this.lockingCode);
if(response[3] === this.lockingCode[0]) {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.SECURED);
this.platform.log.info('Lock returned a response of %s', response);
} else {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNKNOWN);
this.platform.log.debug('Locking response was %s, Expected Exe%s', response, this.lockingCode[0]);
}
} else {
const response = await this.telnetCommand('0X');
if(response === 'Exe0') {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNSECURED);
this.platform.log.info('Unlock returned a response of %s: ', response);
} else {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNKNOWN);
this.platform.log.debug('Unlocking response was %s: Expected Exe0', response);
}
}
}
/**
* Gets the current lock state.
* @returns The current state of the lock service.
*/
async getCurrentLockState() {
return await this.updateLockStatus();
}
//#endregion Lock Status
// #region Support functions/methods
/**
* Runs a command on the telnet server and returns a response. Note that this may
* return an error.
* @param command The command to execute on the telnet server.
* @returns The results of the command.
*/
async telnetCommand(command: string): Promise<string> {
const response = await telnetResponse(this.platform.config.telnetSettings, command);
return response;
}
// #edregion Support functions/methods
} | random_line_split | |
platformAccessory.ts | import { Service, PlatformAccessory, CharacteristicValue } from 'homebridge';
import { ExtronMatrixSwitchHomebridgePlatform } from './platform';
import { telnetResponse } from './common';
import { PLUGIN_NAME } from './settings';
/**
* An interface that defines what fields are required for an preset on the
* Extron unit.
*/
interface ExtronPreset {
number: number;
name: string;
}
export class ExtronMatrixSwitchPlatformAccessory {
private avService: Service;
private lockService: Service;
private currentPreset = 0;
private presetsConfigured = false;
private lockingCode = '1X';
private updateInterval = 1000;
/**
* Default constructor that performs all initial setup.
* @param platform The platform implmented.
* @param accessory The accessory platform.
*/
constructor(
private readonly platform: ExtronMatrixSwitchHomebridgePlatform,
private readonly accessory: PlatformAccessory,
) {
// Set default update interval if not defined in configuration.
if(this.platform.config.updateInterval && this.platform.config.updateInterval !== undefined) {
this.updateInterval = this.platform.config.updateInterval * 1000;
}
this.accessory.getService(this.platform.Service.AccessoryInformation)!
.setCharacteristic(this.platform.Characteristic.Manufacturer, 'Extron, Inc.')
.setCharacteristic(this.platform.Characteristic.Model, 'Crosspoint ULTRA 88 HVA')
.setCharacteristic(this.platform.Characteristic.SerialNumber, this.platform.config.serialNumber);
this.avService = this.accessory.getService(this.platform.Service.Television)
|| this.accessory.addService(this.platform.Service.Television);
this.setupPresets();
const uuid = this.platform.api.hap.uuid.generate('homebridge:extron-matrix-switch' + accessory.context.device.displayName);
this.accessory.UUID = uuid;
this.accessory.category = this.platform.api.hap.Categories.AUDIO_RECEIVER;
this.avService.setCharacteristic(this.platform.Characteristic.Name, 'Extron');
this.avService.setCharacteristic(this.platform.Characteristic.SleepDiscoveryMode,
this.platform.Characteristic.SleepDiscoveryMode.ALWAYS_DISCOVERABLE);
this.avService.getCharacteristic(this.platform.Characteristic.Active)
.onSet(this.setOnOffState.bind(this))
.onGet(this.getOnOffState.bind(this));
this.getOnOffState();
this.updatePresetStatus();
this.avService.getCharacteristic(this.platform.Characteristic.ActiveIdentifier)
.onSet(async (value) => {
// the value will be the value you set for the Identifier Characteristic
// on the Input Source service that was selected - see input sources below.
await this.changeInput(parseInt(value.toString()));
});
// set the service name, this is what is displayed as the default name on the Home app
// in this example we are using the name we stored in the `accessory.context` in the `discoverDevices` method.
this.avService.setCharacteristic(this.platform.Characteristic.Name, accessory.context.device.displayName);
// This is required to be implemented, so currently it does not do anything useful.
this.avService.getCharacteristic(this.platform.Characteristic.RemoteKey)
.onSet((newValue) => {
switch(newValue) {
default: {
break;
}
}
});
// Setup lock
this.platform.config.lockLevel === 'level1' ? '1X' : '2X';
this.lockService = this.accessory.getService(this.platform.Service.LockMechanism)
|| this.accessory.addService(this.platform.Service.LockMechanism);
this.lockService.setCharacteristic(this.platform.Characteristic.Name, accessory.context.device.displayName);
this.lockService.getCharacteristic(this.platform.Characteristic.LockCurrentState)
.onGet(this.getCurrentLockState.bind(this));
this.lockService.getCharacteristic(this.platform.Characteristic.LockTargetState)
.onSet(this.setTargetLockState.bind(this))
.onGet(this.getCurrentLockState.bind(this));
this.updateLockStatus();
// Set interval to keep HomeKit updated on the current state of power, input states,
// and lock status.
setInterval(async () => {
await this.getOnOffState();
await this.updatePresetStatus();
await this.updateLockStatus();
}, this.updateInterval);
this.platform.api.publishExternalAccessories(PLUGIN_NAME, [this.accessory]);
}
/**
* Setup all presets from the plugin configuration. Note, currently all presets are setup
* as Component (YPbPr) video inputs, but this does not appear to make a difference in
* Apple's Home.app at this time.
*
* Note that TypeScript/JavaScript and HomeKit 0-based arrays, but for user-convenience
* any labelled inputs are 1-based.
*/
setupPresets() {
if(!this.presetsConfigured) {
const presets = this.platform.config.presets as ExtronPreset[];
presets.forEach(async (name, i) => {
const inputService = this.accessory.getService('input' + i) ||
this.accessory.addService(this.platform.Service.InputSource, 'input' + i, name.name);
this.platform.log.info('Adding input %s as number %s', name.name, i);
inputService
.setCharacteristic(this.platform.Characteristic.Identifier, i)
.setCharacteristic(this.platform.Characteristic.ConfiguredName, name.name)
.setCharacteristic(this.platform.Characteristic.Name, name.name)
.setCharacteristic(this.platform.Characteristic.IsConfigured, this.platform.Characteristic.IsConfigured.CONFIGURED)
.setCharacteristic(this.platform.Characteristic.InputSourceType, this.platform.Characteristic.InputSourceType.COMPONENT_VIDEO)
.setCharacteristic(this.platform.Characteristic.CurrentVisibilityState,
this.platform.Characteristic.CurrentVisibilityState.SHOWN);
this.avService.addLinkedService(inputService);
});
this.presetsConfigured = true;
}
}
// #region Power Status
/**
* Returns the current state of the physical Extron unit itself.
* @returns The characterstic status of the Extron unit itself.
*/
async | (): Promise<number> {
const muteResponse = await this.telnetCommand('WVM' + String.fromCharCode(13));
this.platform.log.debug('GET: Getting status of unit: %s', muteResponse);
// All zeroes indicates there are not active mutes; otherwise, mutes are in effect.
// Hence, if there are any other values in the response, we assume the unit is off.
const activeFromUnit = !muteResponse.split('').some(x => x === '1' || x === '2' || x === '3');
this.platform.log.debug('GET: Start Update Active State');
this.avService.updateCharacteristic(this.platform.Characteristic.Active, activeFromUnit);
return activeFromUnit ?
this.platform.Characteristic.Active.ACTIVE :
this.platform.Characteristic.Active.INACTIVE;
}
/**
* Sets the user-requested on or off state of the Extron unit.
* @param value The user requested on or off state.
*/
async setOnOffState(value: CharacteristicValue) {
this.platform.log.debug('SET: Setting status from HomeKit to: %s', value);
const command = (value === 1) ? '0*B' : '1*B';
const response = await this.telnetCommand(command);
this.platform.log.debug('SET: Received status of %s', response);
let active = this.platform.Characteristic.Active.INACTIVE;
if(response === 'Vmt0') {
active = this.platform.Characteristic.Active.ACTIVE;
this.platform.log.debug('SET: Unit is active');
}
this.avService.updateCharacteristic(this.platform.Characteristic.Active, active);
}
// #endregion Power Status
//#region Input Status
/**
* Updates the HomeKit staet of the preset that is currently active on the Extron
* unit itself.
*
* Note that when setting the current preset within HomeKit, the currentPreset value
* is decremented to handle 0- and 1-based arrays appropriately.
*/
async updatePresetStatus() {
// Preset "0" is the last set preset #, so query it to get the current state.
//const extronPreset = await this.getPreset(0);
const currentPreset = await this.telnetCommand('W0*1*1VC' + String.fromCharCode(13));
const currentExtronVideoPreset = parseInt(currentPreset.split(' ')[0]);
if(currentExtronVideoPreset !== this.currentPreset) {
this.currentPreset = currentExtronVideoPreset;
this.avService.updateCharacteristic(this.platform.Characteristic.ActiveIdentifier, --this.currentPreset);
}
}
/**
* Handles swiching of presets on Extron unit implemented using the HomeKit InputService.
* Will throw out errors if an out-of-bounds number is specified for an index that does
* not exist.
* @param value The HomeKit-specified input number to switch to.
*/
async changeInput(value: number) {
const newValue = value + 1;
this.platform.log.info('set Active Identifier => setNewValue: ' + newValue);
try {
const response = await this.telnetCommand(newValue + '.');
const responseIndex = newValue < 10 ? '0' + newValue : newValue.toString();
if(response === 'Rpr' + responseIndex) {
this.platform.log.info('Switched to preset ' + newValue + ': got response ' + response);
this.currentPreset = newValue;
this.avService.updateCharacteristic(this.platform.Characteristic.ActiveIdentifier, value);
} else {
switch(response) {
case 'E11':
this.platform.log.info('Preset number %s is out of range of this unit', newValue);
break;
default:
this.platform.log.info('Response does not match: %s with a string length of ', response, response.length);
}
}
} catch(error) {
this.platform.log.error('Error: ' + error);
}
}
//#endregion Input Status
//#region Lock Status
/**
* Returns the current panel lock status from the unit itself.
* @returns The current panel lock status.
*/
async updateLockStatus() : Promise<CharacteristicValue> {
const response = await this.telnetCommand('X');
if(response === '0') {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNSECURED);
this.lockService.updateCharacteristic(this.platform.Characteristic.LockTargetState,
this.platform.Characteristic.LockTargetState.UNSECURED);
return this.platform.Characteristic.LockCurrentState.UNSECURED;
} else {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.SECURED);
this.lockService.updateCharacteristic(this.platform.Characteristic.LockTargetState,
this.platform.Characteristic.LockTargetState.SECURED);
return this.platform.Characteristic.LockCurrentState.SECURED;
}
}
/**
* Sets the user-requested lock status.
* @param value The target value of what the lock should be.
*/
async setTargetLockState(value: CharacteristicValue) {
if(value === this.platform.Characteristic.LockTargetState.SECURED) {
const response = await this.telnetCommand(this.lockingCode);
if(response[3] === this.lockingCode[0]) {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.SECURED);
this.platform.log.info('Lock returned a response of %s', response);
} else {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNKNOWN);
this.platform.log.debug('Locking response was %s, Expected Exe%s', response, this.lockingCode[0]);
}
} else {
const response = await this.telnetCommand('0X');
if(response === 'Exe0') {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNSECURED);
this.platform.log.info('Unlock returned a response of %s: ', response);
} else {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNKNOWN);
this.platform.log.debug('Unlocking response was %s: Expected Exe0', response);
}
}
}
/**
* Gets the current lock state.
* @returns The current state of the lock service.
*/
async getCurrentLockState() {
return await this.updateLockStatus();
}
//#endregion Lock Status
// #region Support functions/methods
/**
* Runs a command on the telnet server and returns a response. Note that this may
* return an error.
* @param command The command to execute on the telnet server.
* @returns The results of the command.
*/
async telnetCommand(command: string): Promise<string> {
const response = await telnetResponse(this.platform.config.telnetSettings, command);
return response;
}
// #edregion Support functions/methods
} | getOnOffState | identifier_name |
platformAccessory.ts | import { Service, PlatformAccessory, CharacteristicValue } from 'homebridge';
import { ExtronMatrixSwitchHomebridgePlatform } from './platform';
import { telnetResponse } from './common';
import { PLUGIN_NAME } from './settings';
/**
* An interface that defines what fields are required for an preset on the
* Extron unit.
*/
interface ExtronPreset {
number: number;
name: string;
}
export class ExtronMatrixSwitchPlatformAccessory {
private avService: Service;
private lockService: Service;
private currentPreset = 0;
private presetsConfigured = false;
private lockingCode = '1X';
private updateInterval = 1000;
/**
* Default constructor that performs all initial setup.
* @param platform The platform implmented.
* @param accessory The accessory platform.
*/
constructor(
private readonly platform: ExtronMatrixSwitchHomebridgePlatform,
private readonly accessory: PlatformAccessory,
) {
// Set default update interval if not defined in configuration.
if(this.platform.config.updateInterval && this.platform.config.updateInterval !== undefined) {
this.updateInterval = this.platform.config.updateInterval * 1000;
}
this.accessory.getService(this.platform.Service.AccessoryInformation)!
.setCharacteristic(this.platform.Characteristic.Manufacturer, 'Extron, Inc.')
.setCharacteristic(this.platform.Characteristic.Model, 'Crosspoint ULTRA 88 HVA')
.setCharacteristic(this.platform.Characteristic.SerialNumber, this.platform.config.serialNumber);
this.avService = this.accessory.getService(this.platform.Service.Television)
|| this.accessory.addService(this.platform.Service.Television);
this.setupPresets();
const uuid = this.platform.api.hap.uuid.generate('homebridge:extron-matrix-switch' + accessory.context.device.displayName);
this.accessory.UUID = uuid;
this.accessory.category = this.platform.api.hap.Categories.AUDIO_RECEIVER;
this.avService.setCharacteristic(this.platform.Characteristic.Name, 'Extron');
this.avService.setCharacteristic(this.platform.Characteristic.SleepDiscoveryMode,
this.platform.Characteristic.SleepDiscoveryMode.ALWAYS_DISCOVERABLE);
this.avService.getCharacteristic(this.platform.Characteristic.Active)
.onSet(this.setOnOffState.bind(this))
.onGet(this.getOnOffState.bind(this));
this.getOnOffState();
this.updatePresetStatus();
this.avService.getCharacteristic(this.platform.Characteristic.ActiveIdentifier)
.onSet(async (value) => {
// the value will be the value you set for the Identifier Characteristic
// on the Input Source service that was selected - see input sources below.
await this.changeInput(parseInt(value.toString()));
});
// set the service name, this is what is displayed as the default name on the Home app
// in this example we are using the name we stored in the `accessory.context` in the `discoverDevices` method.
this.avService.setCharacteristic(this.platform.Characteristic.Name, accessory.context.device.displayName);
// This is required to be implemented, so currently it does not do anything useful.
this.avService.getCharacteristic(this.platform.Characteristic.RemoteKey)
.onSet((newValue) => {
switch(newValue) {
default: {
break;
}
}
});
// Setup lock
this.platform.config.lockLevel === 'level1' ? '1X' : '2X';
this.lockService = this.accessory.getService(this.platform.Service.LockMechanism)
|| this.accessory.addService(this.platform.Service.LockMechanism);
this.lockService.setCharacteristic(this.platform.Characteristic.Name, accessory.context.device.displayName);
this.lockService.getCharacteristic(this.platform.Characteristic.LockCurrentState)
.onGet(this.getCurrentLockState.bind(this));
this.lockService.getCharacteristic(this.platform.Characteristic.LockTargetState)
.onSet(this.setTargetLockState.bind(this))
.onGet(this.getCurrentLockState.bind(this));
this.updateLockStatus();
// Set interval to keep HomeKit updated on the current state of power, input states,
// and lock status.
setInterval(async () => {
await this.getOnOffState();
await this.updatePresetStatus();
await this.updateLockStatus();
}, this.updateInterval);
this.platform.api.publishExternalAccessories(PLUGIN_NAME, [this.accessory]);
}
/**
* Setup all presets from the plugin configuration. Note, currently all presets are setup
* as Component (YPbPr) video inputs, but this does not appear to make a difference in
* Apple's Home.app at this time.
*
* Note that TypeScript/JavaScript and HomeKit 0-based arrays, but for user-convenience
* any labelled inputs are 1-based.
*/
setupPresets() {
if(!this.presetsConfigured) {
const presets = this.platform.config.presets as ExtronPreset[];
presets.forEach(async (name, i) => {
const inputService = this.accessory.getService('input' + i) ||
this.accessory.addService(this.platform.Service.InputSource, 'input' + i, name.name);
this.platform.log.info('Adding input %s as number %s', name.name, i);
inputService
.setCharacteristic(this.platform.Characteristic.Identifier, i)
.setCharacteristic(this.platform.Characteristic.ConfiguredName, name.name)
.setCharacteristic(this.platform.Characteristic.Name, name.name)
.setCharacteristic(this.platform.Characteristic.IsConfigured, this.platform.Characteristic.IsConfigured.CONFIGURED)
.setCharacteristic(this.platform.Characteristic.InputSourceType, this.platform.Characteristic.InputSourceType.COMPONENT_VIDEO)
.setCharacteristic(this.platform.Characteristic.CurrentVisibilityState,
this.platform.Characteristic.CurrentVisibilityState.SHOWN);
this.avService.addLinkedService(inputService);
});
this.presetsConfigured = true;
}
}
// #region Power Status
/**
* Returns the current state of the physical Extron unit itself.
* @returns The characterstic status of the Extron unit itself.
*/
async getOnOffState(): Promise<number> {
const muteResponse = await this.telnetCommand('WVM' + String.fromCharCode(13));
this.platform.log.debug('GET: Getting status of unit: %s', muteResponse);
// All zeroes indicates there are not active mutes; otherwise, mutes are in effect.
// Hence, if there are any other values in the response, we assume the unit is off.
const activeFromUnit = !muteResponse.split('').some(x => x === '1' || x === '2' || x === '3');
this.platform.log.debug('GET: Start Update Active State');
this.avService.updateCharacteristic(this.platform.Characteristic.Active, activeFromUnit);
return activeFromUnit ?
this.platform.Characteristic.Active.ACTIVE :
this.platform.Characteristic.Active.INACTIVE;
}
/**
* Sets the user-requested on or off state of the Extron unit.
* @param value The user requested on or off state.
*/
async setOnOffState(value: CharacteristicValue) {
this.platform.log.debug('SET: Setting status from HomeKit to: %s', value);
const command = (value === 1) ? '0*B' : '1*B';
const response = await this.telnetCommand(command);
this.platform.log.debug('SET: Received status of %s', response);
let active = this.platform.Characteristic.Active.INACTIVE;
if(response === 'Vmt0') {
active = this.platform.Characteristic.Active.ACTIVE;
this.platform.log.debug('SET: Unit is active');
}
this.avService.updateCharacteristic(this.platform.Characteristic.Active, active);
}
// #endregion Power Status
//#region Input Status
/**
* Updates the HomeKit staet of the preset that is currently active on the Extron
* unit itself.
*
* Note that when setting the current preset within HomeKit, the currentPreset value
* is decremented to handle 0- and 1-based arrays appropriately.
*/
async updatePresetStatus() {
// Preset "0" is the last set preset #, so query it to get the current state.
//const extronPreset = await this.getPreset(0);
const currentPreset = await this.telnetCommand('W0*1*1VC' + String.fromCharCode(13));
const currentExtronVideoPreset = parseInt(currentPreset.split(' ')[0]);
if(currentExtronVideoPreset !== this.currentPreset) {
this.currentPreset = currentExtronVideoPreset;
this.avService.updateCharacteristic(this.platform.Characteristic.ActiveIdentifier, --this.currentPreset);
}
}
/**
* Handles swiching of presets on Extron unit implemented using the HomeKit InputService.
* Will throw out errors if an out-of-bounds number is specified for an index that does
* not exist.
* @param value The HomeKit-specified input number to switch to.
*/
async changeInput(value: number) {
const newValue = value + 1;
this.platform.log.info('set Active Identifier => setNewValue: ' + newValue);
try {
const response = await this.telnetCommand(newValue + '.');
const responseIndex = newValue < 10 ? '0' + newValue : newValue.toString();
if(response === 'Rpr' + responseIndex) {
this.platform.log.info('Switched to preset ' + newValue + ': got response ' + response);
this.currentPreset = newValue;
this.avService.updateCharacteristic(this.platform.Characteristic.ActiveIdentifier, value);
} else {
switch(response) {
case 'E11':
this.platform.log.info('Preset number %s is out of range of this unit', newValue);
break;
default:
this.platform.log.info('Response does not match: %s with a string length of ', response, response.length);
}
}
} catch(error) {
this.platform.log.error('Error: ' + error);
}
}
//#endregion Input Status
//#region Lock Status
/**
* Returns the current panel lock status from the unit itself.
* @returns The current panel lock status.
*/
async updateLockStatus() : Promise<CharacteristicValue> {
const response = await this.telnetCommand('X');
if(response === '0') {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNSECURED);
this.lockService.updateCharacteristic(this.platform.Characteristic.LockTargetState,
this.platform.Characteristic.LockTargetState.UNSECURED);
return this.platform.Characteristic.LockCurrentState.UNSECURED;
} else {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.SECURED);
this.lockService.updateCharacteristic(this.platform.Characteristic.LockTargetState,
this.platform.Characteristic.LockTargetState.SECURED);
return this.platform.Characteristic.LockCurrentState.SECURED;
}
}
/**
* Sets the user-requested lock status.
* @param value The target value of what the lock should be.
*/
async setTargetLockState(value: CharacteristicValue) {
if(value === this.platform.Characteristic.LockTargetState.SECURED) {
const response = await this.telnetCommand(this.lockingCode);
if(response[3] === this.lockingCode[0]) {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.SECURED);
this.platform.log.info('Lock returned a response of %s', response);
} else |
} else {
const response = await this.telnetCommand('0X');
if(response === 'Exe0') {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNSECURED);
this.platform.log.info('Unlock returned a response of %s: ', response);
} else {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNKNOWN);
this.platform.log.debug('Unlocking response was %s: Expected Exe0', response);
}
}
}
/**
* Gets the current lock state.
* @returns The current state of the lock service.
*/
async getCurrentLockState() {
return await this.updateLockStatus();
}
//#endregion Lock Status
// #region Support functions/methods
/**
* Runs a command on the telnet server and returns a response. Note that this may
* return an error.
* @param command The command to execute on the telnet server.
* @returns The results of the command.
*/
async telnetCommand(command: string): Promise<string> {
const response = await telnetResponse(this.platform.config.telnetSettings, command);
return response;
}
// #edregion Support functions/methods
} | {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNKNOWN);
this.platform.log.debug('Locking response was %s, Expected Exe%s', response, this.lockingCode[0]);
} | conditional_block |
registry.ts | import * as fs from 'fs';
import * as path from 'path';
import { mapLimit } from 'async';
import * as Github from '@octokit/rest';
// tslint:disable-next-line:no-submodule-imports
import * as simpleGit from 'simple-git/promise';
import * as _ from 'lodash';
import { getGlobalGithubConfig } from '../config';
import { logger as loggerRaw } from '../logger';
import { GithubGlobalConfig, TargetConfig } from '../schemas/project_config';
import { ConfigurationError, reportError } from '../utils/errors';
import { withTempDir } from '../utils/files';
import {
getAuthUsername,
getGithubApiToken,
getGithubClient,
GithubRemote,
} from '../utils/githubApi';
import { isDryRun } from '../utils/helpers';
import { renderTemplateSafe } from '../utils/strings';
import { HashAlgorithm, HashOutputFormat } from '../utils/system';
import {
isPreviewRelease,
parseVersion,
versionGreaterOrEqualThan,
} from '../utils/version';
import { stringToRegexp } from '../utils/filters';
import { BaseTarget } from './base';
import {
RemoteArtifact,
BaseArtifactProvider,
MAX_DOWNLOAD_CONCURRENCY,
} from '../artifact_providers/base';
const logger = loggerRaw.withScope('[registry]');
const DEFAULT_REGISTRY_REMOTE: GithubRemote = new GithubRemote(
'getsentry',
'sentry-release-registry'
);
/** Type of the registry package */
export enum RegistryPackageType {
/** App is a generic package type that doesn't belong to any specific registry */
APP = 'app',
/** SDK is a package hosted in one of public registries (PyPI, NPM, etc.) */
SDK = 'sdk',
}
/** Describes a checksum entry in the registry */
interface ChecksumEntry {
/** Checksum (hash) algorithm */
algorithm: HashAlgorithm;
/** Checksum format */
format: HashOutputFormat;
}
/** "registry" target options */
export interface RegistryConfig extends TargetConfig {
/** Type of the registry package */
type: RegistryPackageType;
/** Unique package cannonical name, including type and/or registry name */
canonicalName?: string;
/** Git remote of the release registry */
registryRemote: GithubRemote;
/** Should we create registry entries for pre-releases? */
linkPrereleases: boolean;
/** URL template for file assets */
urlTemplate?: string;
/** Types of checksums to compute for artifacts */
checksums: ChecksumEntry[];
/** Pattern that allows to skip the target if there's no matching file */
onlyIfPresent?: RegExp;
}
/**
* Target responsible for publishing static assets to GitHub pages
*/
export class RegistryTarget extends BaseTarget {
/** Target name */
public readonly name: string = 'registry';
/** Target options */
public readonly registryConfig: RegistryConfig;
/** Github client */
public readonly github: Github;
/** Github repo configuration */
public readonly githubRepo: GithubGlobalConfig;
public constructor(config: any, artifactProvider: BaseArtifactProvider) {
super(config, artifactProvider);
this.github = getGithubClient();
this.githubRepo = getGlobalGithubConfig();
this.registryConfig = this.getRegistryConfig();
}
/**
* Checks the provided checksums configuration
*
* Throws an error in case the configuration is incorrect
* FIXME(tonyo): rewrite this with JSON schemas
*
* @param checksums Raw configuration
*/
protected castChecksums(checksums: any): ChecksumEntry[] {
if (!checksums) {
return [];
}
if (!(checksums instanceof Array)) {
throw new ConfigurationError(
'Invalid type of "checksums": should be an array'
);
}
const resultChecksums: ChecksumEntry[] = [];
checksums.forEach(item => {
if (typeof item !== 'object' || !item.algorithm || !item.format) {
throw new ConfigurationError(
`Invalid checksum type: ${JSON.stringify(item)}`
);
}
// FIXME(tonyo): this is ugly as hell :(
// This all has to be replaced with JSON schema
if (
!(Object as any).values(HashAlgorithm).includes(item.algorithm) ||
!(Object as any).values(HashOutputFormat).includes(item.format)
) {
throw new ConfigurationError(
`Invalid checksum attributes: ${JSON.stringify(item)}`
);
}
resultChecksums.push({ algorithm: item.algorithm, format: item.format });
});
return resultChecksums;
}
/**
* Extracts Registry target options from the raw configuration
*/
public getRegistryConfig(): RegistryConfig {
const registryType = this.config.type;
if (
[RegistryPackageType.APP, RegistryPackageType.SDK].indexOf(
registryType
) === -1
) {
throw new ConfigurationError(
`Invalid registry type specified: "${registryType}"`
);
}
let urlTemplate;
if (registryType === RegistryPackageType.APP) {
urlTemplate = this.config.urlTemplate;
if (urlTemplate && typeof urlTemplate !== 'string') {
throw new ConfigurationError(
`Invalid "urlTemplate" specified: ${urlTemplate}`
);
}
}
const releaseConfig = this.config.config;
if (!releaseConfig) {
throw new ConfigurationError(
'Cannot find configuration dictionary for release registry'
);
}
const canonicalName = releaseConfig.canonical;
if (!canonicalName) {
throw new ConfigurationError(
'Canonical name not found in the configuration'
);
}
const linkPrereleases = this.config.linkPrereleases || false;
if (typeof linkPrereleases !== 'boolean') {
throw new ConfigurationError('Invlaid type of "linkPrereleases"');
}
const checksums = this.castChecksums(this.config.checksums);
const onlyIfPresentStr = this.config.onlyIfPresent || undefined;
let onlyIfPresent;
if (onlyIfPresentStr) {
if (typeof onlyIfPresentStr !== 'string') {
throw new ConfigurationError('Invalid type of "onlyIfPresent"');
}
onlyIfPresent = stringToRegexp(onlyIfPresentStr);
}
return {
canonicalName,
checksums,
linkPrereleases,
onlyIfPresent,
registryRemote: DEFAULT_REGISTRY_REMOTE,
type: registryType,
urlTemplate,
};
}
/**
* Creates a symlink, overwriting the existing one
*
* @param target Target path
* @param newFile Path to the new symlink
*/
public forceSymlink(target: string, newFile: string): void {
if (fs.existsSync(newFile)) {
fs.unlinkSync(newFile);
}
fs.symlinkSync(target, newFile);
}
/**
* Create symbolic links to the new version file
*
* "latest.json" link is not updated if the new version is "older" (e.g., it's
* a patch release for an older major version).
*
* @param versionFilePath Path to the new version file
* @param newVersion The new version
* @param oldVersion The previous latest version
*/
public createSymlinks(
versionFilePath: string,
newVersion: string,
oldVersion?: string
): void {
const parsedNewVersion = parseVersion(newVersion) || undefined;
if (!parsedNewVersion) {
throw new ConfigurationError(
`Cannot parse version: "${parsedNewVersion}"`
);
}
const parsedOldVersion =
(oldVersion ? parseVersion(oldVersion) : undefined) || undefined;
const baseVersionName = path.basename(versionFilePath);
const packageDir = path.dirname(versionFilePath);
// link latest, but only if the new version is "newer"
if (
parsedOldVersion &&
!versionGreaterOrEqualThan(parsedNewVersion, parsedOldVersion)
) {
logger.warn(
`Not updating the latest version file: current version is "${oldVersion}", new version is "${newVersion}"`
);
} else {
logger.debug(
`Changing symlink for "latest.json" from version "${oldVersion}" to "${newVersion}"`
);
this.forceSymlink(baseVersionName, path.join(packageDir, 'latest.json'));
}
// link major
const majorVersionLink = `${parsedNewVersion.major}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, majorVersionLink));
// link minor
const minorVersionLink = `${parsedNewVersion.major}.${parsedNewVersion.minor}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, minorVersionLink));
}
/**
* Returns the path to the SDK, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The SDK's canonical name
* @returns The SDK path
*/
public getSdkPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
return [registryDir, 'packages'].concat(packageDirs).join(path.sep);
}
/**
* Returns the path to the app, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
* @returns The app path
*/
public getAppPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
if (packageDirs[0] !== 'app') {
throw new ConfigurationError(
`Invalid canonical entry for an app: ${canonical}`
);
}
return [registryDir, 'apps'].concat(packageDirs.slice(1)).join(path.sep);
}
/**
* Returns the path to the package from its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
*/
public getPackageDirPath(registryDir: string, canonical: string): string |
/**
* Adds file URLs to the manifest
*
* URL template is taken from "urlTemplate" configuration argument
*
* FIXME(tonyo): LEGACY function, left for compatibility, replaced by addFilesData
*
* @param manifest Package manifest
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addFileLinks(
manifest: { [key: string]: any },
version: string,
revision: string
): Promise<void> {
if (!this.registryConfig.urlTemplate) {
return;
}
const artifacts = await this.getArtifactsForRevision(revision);
if (artifacts.length === 0) {
logger.warn('No artifacts found, not adding any links to the manifest');
return;
}
const fileUrls: { [_: string]: string } = {};
for (const artifact of artifacts) {
fileUrls[artifact.filename] = renderTemplateSafe(
this.registryConfig.urlTemplate,
{
file: artifact.filename,
revision,
version,
}
);
}
logger.debug(
`Writing file urls to the manifest, files found: ${artifacts.length}`
);
manifest.file_urls = fileUrls;
}
/**
* Extends the artifact entry with additional information
*
* Information and checksums and download URLs are added here
*
* @param artifact Artifact
* @param version The new version
* @param revision Git commit SHA to be published
*
*/
public async getArtifactData(
artifact: RemoteArtifact,
version: string,
revision: string
): Promise<any> {
const artifactData: any = {};
if (this.registryConfig.urlTemplate) {
artifactData.url = renderTemplateSafe(this.registryConfig.urlTemplate, {
file: artifact.filename,
revision,
version,
});
}
if (this.registryConfig.checksums.length > 0) {
const fileChecksums: { [key: string]: string } = {};
for (const checksumType of this.registryConfig.checksums) {
const { algorithm, format } = checksumType;
const checksum = await this.artifactProvider.getChecksum(
artifact,
algorithm,
format
);
fileChecksums[`${algorithm}-${format}`] = checksum;
}
artifactData.checksums = fileChecksums;
}
return artifactData;
}
/**
* Extends the artifact entries with additional information
*
* @param packageManifest Package manifest
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addFilesData(
packageManifest: { [key: string]: any },
version: string,
revision: string
): Promise<void> {
// Clear existing data
delete packageManifest.files;
const artifacts = await this.getArtifactsForRevision(revision);
if (artifacts.length === 0) {
logger.warn('No artifacts found, not adding any file data');
return;
}
logger.info(
'Adding extra data (checksums, download links) for available artifacts...'
);
const files: { [key: string]: any } = {};
// tslint:disable-next-line:await-promise
await mapLimit(artifacts, MAX_DOWNLOAD_CONCURRENCY, async artifact => {
const fileData = await this.getArtifactData(artifact, version, revision);
if (!_.isEmpty(fileData)) {
files[artifact.filename] = fileData;
}
});
if (!_.isEmpty(files)) {
packageManifest.files = files;
}
}
/**
* Updates the local copy of the release registry
*
* @param packageManifest The package's manifest object
* @param canonical The package's canonical name
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async getUpdatedManifest(
packageManifest: { [key: string]: any },
canonical: string,
version: string,
revision: string
): Promise<any> {
// Additional check
if (canonical !== packageManifest.canonical) {
reportError(
`Canonical name in "craft" config ("${canonical}") is inconsistent with ` +
`the one in package manifest ("${packageManifest.canonical}")`
);
}
// Update the manifest
const updatedManifest = { ...packageManifest, version };
// Add file links if it's a generic app (legacy)
if (this.registryConfig.type === RegistryPackageType.APP) {
await this.addFileLinks(updatedManifest, version, revision);
}
// Add various file-related data
await this.addFilesData(updatedManifest, version, revision);
return updatedManifest;
}
/**
* Updates the local copy of the release registry
*
* @param directory The directory with the checkout out registry
* @param canonical The package's canonical name
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addVersionToRegistry(
directory: string,
canonical: string,
version: string,
revision: string
): Promise<void> {
logger.info(
`Adding the version file to the registry for canonical name "${canonical}"...`
);
const packageDirPath = this.getPackageDirPath(directory, canonical);
const versionFilePath = path.join(packageDirPath, `${version}.json`);
if (fs.existsSync(versionFilePath)) {
reportError(`Version file for "${version}" already exists. Aborting.`);
}
const packageManifestPath = path.join(packageDirPath, 'latest.json');
logger.debug('Reading the current configuration from "latest.json"...');
const packageManifest =
JSON.parse(fs.readFileSync(packageManifestPath).toString()) || {};
const previousVersion = packageManifest.version || undefined;
const updatedManifest = await this.getUpdatedManifest(
packageManifest,
canonical,
version,
revision
);
// tslint:disable-next-line:prefer-template
const manifestString = JSON.stringify(updatedManifest, undefined, 2) + '\n';
logger.debug('Updated manifest', manifestString);
logger.debug(`Writing updated manifest to "${versionFilePath}"...`);
fs.writeFileSync(versionFilePath, manifestString);
this.createSymlinks(versionFilePath, version, previousVersion);
}
/**
* Commits and pushes the new version of the package to the release registry
*
* @param directory The directory with the checkout out registry
* @param remote The GitHub remote object
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async pushVersionToRegistry(
directory: string,
remote: GithubRemote,
version: string,
revision: string
): Promise<void> {
logger.info(`Cloning "${remote.getRemoteString()}" to "${directory}"...`);
await simpleGit()
.silent(true)
.clone(remote.getRemoteStringWithAuth(), directory);
const canonical = this.registryConfig.canonicalName;
if (!canonical) {
throw new ConfigurationError(
'"canonical" value not found in the registry configuration'
);
}
await this.addVersionToRegistry(directory, canonical, version, revision);
const git = simpleGit(directory).silent(true);
await git.checkout('master');
// Commit
await git.add(['.']);
await git.commit(`craft: release "${canonical}", version "${version}"`);
// Push!
logger.info(`Pushing the changes...`);
if (!isDryRun()) {
await git.push('origin', 'master');
} else {
logger.info('[dry-run] Not pushing the branch.');
}
}
/**
* Pushes an archive with static HTML web assets to the configured branch
*/
public async publish(version: string, revision: string): Promise<any> {
if (!this.registryConfig.linkPrereleases && isPreviewRelease(version)) {
logger.info('Preview release detected, skipping the target');
return undefined;
}
// If we have onlyIfPresent specified, check that we have any of matched files
const onlyIfPresentPattern = this.registryConfig.onlyIfPresent;
if (onlyIfPresentPattern) {
const artifacts = await this.artifactProvider.filterArtifactsForRevision(
revision,
{
includeNames: onlyIfPresentPattern,
}
);
if (artifacts.length === 0) {
logger.warn(
`No files found that match "${onlyIfPresentPattern.toString()}", skipping the target.`
);
return undefined;
}
}
const remote = this.registryConfig.registryRemote;
const username = await getAuthUsername(this.github);
remote.setAuth(username, getGithubApiToken());
await withTempDir(
async directory =>
this.pushVersionToRegistry(directory, remote, version, revision),
true,
'craft-release-registry-'
);
logger.info('Release registry updated');
}
}
/**
* Parses registry canonical name to a list of registry directories
*
* Example: "npm:@sentry/browser" -> ["npm", "@sentry", "browser"]
*
* @param canonicalName Registry canonical name
* @returns A list of directories
*/
export function parseCanonical(canonicalName: string): string[] {
const [registry, packageName] = canonicalName.split(':');
if (!registry || !packageName) {
throw new ConfigurationError(
`Cannot parse canonical name for the package: ${canonicalName}`
);
}
const packageDirs = packageName.split('/');
if (packageDirs.some(x => !x)) {
throw new ConfigurationError(
`Cannot parse canonical name for the package: ${canonicalName}`
);
}
return [registry].concat(packageName.split('/'));
}
| {
if (this.registryConfig.type === RegistryPackageType.SDK) {
return this.getSdkPackagePath(registryDir, canonical);
} else if (this.registryConfig.type === RegistryPackageType.APP) {
return this.getAppPackagePath(registryDir, canonical);
} else {
throw new ConfigurationError(
`Unknown registry package type: ${this.registryConfig.type}`
);
}
} | identifier_body |
registry.ts | import * as fs from 'fs';
import * as path from 'path';
import { mapLimit } from 'async';
import * as Github from '@octokit/rest';
// tslint:disable-next-line:no-submodule-imports
import * as simpleGit from 'simple-git/promise';
import * as _ from 'lodash';
import { getGlobalGithubConfig } from '../config';
import { logger as loggerRaw } from '../logger';
import { GithubGlobalConfig, TargetConfig } from '../schemas/project_config';
import { ConfigurationError, reportError } from '../utils/errors';
import { withTempDir } from '../utils/files';
import {
getAuthUsername,
getGithubApiToken,
getGithubClient,
GithubRemote,
} from '../utils/githubApi';
import { isDryRun } from '../utils/helpers';
import { renderTemplateSafe } from '../utils/strings';
import { HashAlgorithm, HashOutputFormat } from '../utils/system';
import {
isPreviewRelease,
parseVersion,
versionGreaterOrEqualThan,
} from '../utils/version';
import { stringToRegexp } from '../utils/filters';
import { BaseTarget } from './base';
import {
RemoteArtifact,
BaseArtifactProvider,
MAX_DOWNLOAD_CONCURRENCY,
} from '../artifact_providers/base';
const logger = loggerRaw.withScope('[registry]');
const DEFAULT_REGISTRY_REMOTE: GithubRemote = new GithubRemote(
'getsentry',
'sentry-release-registry'
);
/** Type of the registry package */
export enum RegistryPackageType {
/** App is a generic package type that doesn't belong to any specific registry */
APP = 'app',
/** SDK is a package hosted in one of public registries (PyPI, NPM, etc.) */
SDK = 'sdk',
}
/** Describes a checksum entry in the registry */
interface ChecksumEntry {
/** Checksum (hash) algorithm */
algorithm: HashAlgorithm;
/** Checksum format */
format: HashOutputFormat;
}
/** "registry" target options */
export interface RegistryConfig extends TargetConfig {
/** Type of the registry package */
type: RegistryPackageType;
/** Unique package cannonical name, including type and/or registry name */
canonicalName?: string;
/** Git remote of the release registry */
registryRemote: GithubRemote;
/** Should we create registry entries for pre-releases? */
linkPrereleases: boolean;
/** URL template for file assets */
urlTemplate?: string;
/** Types of checksums to compute for artifacts */
checksums: ChecksumEntry[];
/** Pattern that allows to skip the target if there's no matching file */
onlyIfPresent?: RegExp;
}
/**
* Target responsible for publishing static assets to GitHub pages
*/
export class RegistryTarget extends BaseTarget {
/** Target name */
public readonly name: string = 'registry';
/** Target options */
public readonly registryConfig: RegistryConfig;
/** Github client */
public readonly github: Github;
/** Github repo configuration */
public readonly githubRepo: GithubGlobalConfig;
public constructor(config: any, artifactProvider: BaseArtifactProvider) {
super(config, artifactProvider);
this.github = getGithubClient();
this.githubRepo = getGlobalGithubConfig();
this.registryConfig = this.getRegistryConfig();
}
/**
* Checks the provided checksums configuration
*
* Throws an error in case the configuration is incorrect
* FIXME(tonyo): rewrite this with JSON schemas
*
* @param checksums Raw configuration
*/
protected | (checksums: any): ChecksumEntry[] {
if (!checksums) {
return [];
}
if (!(checksums instanceof Array)) {
throw new ConfigurationError(
'Invalid type of "checksums": should be an array'
);
}
const resultChecksums: ChecksumEntry[] = [];
checksums.forEach(item => {
if (typeof item !== 'object' || !item.algorithm || !item.format) {
throw new ConfigurationError(
`Invalid checksum type: ${JSON.stringify(item)}`
);
}
// FIXME(tonyo): this is ugly as hell :(
// This all has to be replaced with JSON schema
if (
!(Object as any).values(HashAlgorithm).includes(item.algorithm) ||
!(Object as any).values(HashOutputFormat).includes(item.format)
) {
throw new ConfigurationError(
`Invalid checksum attributes: ${JSON.stringify(item)}`
);
}
resultChecksums.push({ algorithm: item.algorithm, format: item.format });
});
return resultChecksums;
}
/**
* Extracts Registry target options from the raw configuration
*/
public getRegistryConfig(): RegistryConfig {
const registryType = this.config.type;
if (
[RegistryPackageType.APP, RegistryPackageType.SDK].indexOf(
registryType
) === -1
) {
throw new ConfigurationError(
`Invalid registry type specified: "${registryType}"`
);
}
let urlTemplate;
if (registryType === RegistryPackageType.APP) {
urlTemplate = this.config.urlTemplate;
if (urlTemplate && typeof urlTemplate !== 'string') {
throw new ConfigurationError(
`Invalid "urlTemplate" specified: ${urlTemplate}`
);
}
}
const releaseConfig = this.config.config;
if (!releaseConfig) {
throw new ConfigurationError(
'Cannot find configuration dictionary for release registry'
);
}
const canonicalName = releaseConfig.canonical;
if (!canonicalName) {
throw new ConfigurationError(
'Canonical name not found in the configuration'
);
}
const linkPrereleases = this.config.linkPrereleases || false;
if (typeof linkPrereleases !== 'boolean') {
throw new ConfigurationError('Invlaid type of "linkPrereleases"');
}
const checksums = this.castChecksums(this.config.checksums);
const onlyIfPresentStr = this.config.onlyIfPresent || undefined;
let onlyIfPresent;
if (onlyIfPresentStr) {
if (typeof onlyIfPresentStr !== 'string') {
throw new ConfigurationError('Invalid type of "onlyIfPresent"');
}
onlyIfPresent = stringToRegexp(onlyIfPresentStr);
}
return {
canonicalName,
checksums,
linkPrereleases,
onlyIfPresent,
registryRemote: DEFAULT_REGISTRY_REMOTE,
type: registryType,
urlTemplate,
};
}
/**
* Creates a symlink, overwriting the existing one
*
* @param target Target path
* @param newFile Path to the new symlink
*/
public forceSymlink(target: string, newFile: string): void {
if (fs.existsSync(newFile)) {
fs.unlinkSync(newFile);
}
fs.symlinkSync(target, newFile);
}
/**
* Create symbolic links to the new version file
*
* "latest.json" link is not updated if the new version is "older" (e.g., it's
* a patch release for an older major version).
*
* @param versionFilePath Path to the new version file
* @param newVersion The new version
* @param oldVersion The previous latest version
*/
public createSymlinks(
versionFilePath: string,
newVersion: string,
oldVersion?: string
): void {
const parsedNewVersion = parseVersion(newVersion) || undefined;
if (!parsedNewVersion) {
throw new ConfigurationError(
`Cannot parse version: "${parsedNewVersion}"`
);
}
const parsedOldVersion =
(oldVersion ? parseVersion(oldVersion) : undefined) || undefined;
const baseVersionName = path.basename(versionFilePath);
const packageDir = path.dirname(versionFilePath);
// link latest, but only if the new version is "newer"
if (
parsedOldVersion &&
!versionGreaterOrEqualThan(parsedNewVersion, parsedOldVersion)
) {
logger.warn(
`Not updating the latest version file: current version is "${oldVersion}", new version is "${newVersion}"`
);
} else {
logger.debug(
`Changing symlink for "latest.json" from version "${oldVersion}" to "${newVersion}"`
);
this.forceSymlink(baseVersionName, path.join(packageDir, 'latest.json'));
}
// link major
const majorVersionLink = `${parsedNewVersion.major}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, majorVersionLink));
// link minor
const minorVersionLink = `${parsedNewVersion.major}.${parsedNewVersion.minor}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, minorVersionLink));
}
/**
* Returns the path to the SDK, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The SDK's canonical name
* @returns The SDK path
*/
public getSdkPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
return [registryDir, 'packages'].concat(packageDirs).join(path.sep);
}
/**
* Returns the path to the app, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
* @returns The app path
*/
public getAppPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
if (packageDirs[0] !== 'app') {
throw new ConfigurationError(
`Invalid canonical entry for an app: ${canonical}`
);
}
return [registryDir, 'apps'].concat(packageDirs.slice(1)).join(path.sep);
}
/**
* Returns the path to the package from its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
*/
public getPackageDirPath(registryDir: string, canonical: string): string {
if (this.registryConfig.type === RegistryPackageType.SDK) {
return this.getSdkPackagePath(registryDir, canonical);
} else if (this.registryConfig.type === RegistryPackageType.APP) {
return this.getAppPackagePath(registryDir, canonical);
} else {
throw new ConfigurationError(
`Unknown registry package type: ${this.registryConfig.type}`
);
}
}
/**
* Adds file URLs to the manifest
*
* URL template is taken from "urlTemplate" configuration argument
*
* FIXME(tonyo): LEGACY function, left for compatibility, replaced by addFilesData
*
* @param manifest Package manifest
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addFileLinks(
manifest: { [key: string]: any },
version: string,
revision: string
): Promise<void> {
if (!this.registryConfig.urlTemplate) {
return;
}
const artifacts = await this.getArtifactsForRevision(revision);
if (artifacts.length === 0) {
logger.warn('No artifacts found, not adding any links to the manifest');
return;
}
const fileUrls: { [_: string]: string } = {};
for (const artifact of artifacts) {
fileUrls[artifact.filename] = renderTemplateSafe(
this.registryConfig.urlTemplate,
{
file: artifact.filename,
revision,
version,
}
);
}
logger.debug(
`Writing file urls to the manifest, files found: ${artifacts.length}`
);
manifest.file_urls = fileUrls;
}
/**
* Extends the artifact entry with additional information
*
* Information and checksums and download URLs are added here
*
* @param artifact Artifact
* @param version The new version
* @param revision Git commit SHA to be published
*
*/
public async getArtifactData(
artifact: RemoteArtifact,
version: string,
revision: string
): Promise<any> {
const artifactData: any = {};
if (this.registryConfig.urlTemplate) {
artifactData.url = renderTemplateSafe(this.registryConfig.urlTemplate, {
file: artifact.filename,
revision,
version,
});
}
if (this.registryConfig.checksums.length > 0) {
const fileChecksums: { [key: string]: string } = {};
for (const checksumType of this.registryConfig.checksums) {
const { algorithm, format } = checksumType;
const checksum = await this.artifactProvider.getChecksum(
artifact,
algorithm,
format
);
fileChecksums[`${algorithm}-${format}`] = checksum;
}
artifactData.checksums = fileChecksums;
}
return artifactData;
}
/**
* Extends the artifact entries with additional information
*
* @param packageManifest Package manifest
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addFilesData(
packageManifest: { [key: string]: any },
version: string,
revision: string
): Promise<void> {
// Clear existing data
delete packageManifest.files;
const artifacts = await this.getArtifactsForRevision(revision);
if (artifacts.length === 0) {
logger.warn('No artifacts found, not adding any file data');
return;
}
logger.info(
'Adding extra data (checksums, download links) for available artifacts...'
);
const files: { [key: string]: any } = {};
// tslint:disable-next-line:await-promise
await mapLimit(artifacts, MAX_DOWNLOAD_CONCURRENCY, async artifact => {
const fileData = await this.getArtifactData(artifact, version, revision);
if (!_.isEmpty(fileData)) {
files[artifact.filename] = fileData;
}
});
if (!_.isEmpty(files)) {
packageManifest.files = files;
}
}
/**
* Updates the local copy of the release registry
*
* @param packageManifest The package's manifest object
* @param canonical The package's canonical name
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async getUpdatedManifest(
packageManifest: { [key: string]: any },
canonical: string,
version: string,
revision: string
): Promise<any> {
// Additional check
if (canonical !== packageManifest.canonical) {
reportError(
`Canonical name in "craft" config ("${canonical}") is inconsistent with ` +
`the one in package manifest ("${packageManifest.canonical}")`
);
}
// Update the manifest
const updatedManifest = { ...packageManifest, version };
// Add file links if it's a generic app (legacy)
if (this.registryConfig.type === RegistryPackageType.APP) {
await this.addFileLinks(updatedManifest, version, revision);
}
// Add various file-related data
await this.addFilesData(updatedManifest, version, revision);
return updatedManifest;
}
/**
* Updates the local copy of the release registry
*
* @param directory The directory with the checkout out registry
* @param canonical The package's canonical name
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addVersionToRegistry(
directory: string,
canonical: string,
version: string,
revision: string
): Promise<void> {
logger.info(
`Adding the version file to the registry for canonical name "${canonical}"...`
);
const packageDirPath = this.getPackageDirPath(directory, canonical);
const versionFilePath = path.join(packageDirPath, `${version}.json`);
if (fs.existsSync(versionFilePath)) {
reportError(`Version file for "${version}" already exists. Aborting.`);
}
const packageManifestPath = path.join(packageDirPath, 'latest.json');
logger.debug('Reading the current configuration from "latest.json"...');
const packageManifest =
JSON.parse(fs.readFileSync(packageManifestPath).toString()) || {};
const previousVersion = packageManifest.version || undefined;
const updatedManifest = await this.getUpdatedManifest(
packageManifest,
canonical,
version,
revision
);
// tslint:disable-next-line:prefer-template
const manifestString = JSON.stringify(updatedManifest, undefined, 2) + '\n';
logger.debug('Updated manifest', manifestString);
logger.debug(`Writing updated manifest to "${versionFilePath}"...`);
fs.writeFileSync(versionFilePath, manifestString);
this.createSymlinks(versionFilePath, version, previousVersion);
}
/**
* Commits and pushes the new version of the package to the release registry
*
* @param directory The directory with the checkout out registry
* @param remote The GitHub remote object
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async pushVersionToRegistry(
directory: string,
remote: GithubRemote,
version: string,
revision: string
): Promise<void> {
logger.info(`Cloning "${remote.getRemoteString()}" to "${directory}"...`);
await simpleGit()
.silent(true)
.clone(remote.getRemoteStringWithAuth(), directory);
const canonical = this.registryConfig.canonicalName;
if (!canonical) {
throw new ConfigurationError(
'"canonical" value not found in the registry configuration'
);
}
await this.addVersionToRegistry(directory, canonical, version, revision);
const git = simpleGit(directory).silent(true);
await git.checkout('master');
// Commit
await git.add(['.']);
await git.commit(`craft: release "${canonical}", version "${version}"`);
// Push!
logger.info(`Pushing the changes...`);
if (!isDryRun()) {
await git.push('origin', 'master');
} else {
logger.info('[dry-run] Not pushing the branch.');
}
}
/**
* Pushes an archive with static HTML web assets to the configured branch
*/
public async publish(version: string, revision: string): Promise<any> {
if (!this.registryConfig.linkPrereleases && isPreviewRelease(version)) {
logger.info('Preview release detected, skipping the target');
return undefined;
}
// If we have onlyIfPresent specified, check that we have any of matched files
const onlyIfPresentPattern = this.registryConfig.onlyIfPresent;
if (onlyIfPresentPattern) {
const artifacts = await this.artifactProvider.filterArtifactsForRevision(
revision,
{
includeNames: onlyIfPresentPattern,
}
);
if (artifacts.length === 0) {
logger.warn(
`No files found that match "${onlyIfPresentPattern.toString()}", skipping the target.`
);
return undefined;
}
}
const remote = this.registryConfig.registryRemote;
const username = await getAuthUsername(this.github);
remote.setAuth(username, getGithubApiToken());
await withTempDir(
async directory =>
this.pushVersionToRegistry(directory, remote, version, revision),
true,
'craft-release-registry-'
);
logger.info('Release registry updated');
}
}
/**
* Parses registry canonical name to a list of registry directories
*
* Example: "npm:@sentry/browser" -> ["npm", "@sentry", "browser"]
*
* @param canonicalName Registry canonical name
* @returns A list of directories
*/
export function parseCanonical(canonicalName: string): string[] {
const [registry, packageName] = canonicalName.split(':');
if (!registry || !packageName) {
throw new ConfigurationError(
`Cannot parse canonical name for the package: ${canonicalName}`
);
}
const packageDirs = packageName.split('/');
if (packageDirs.some(x => !x)) {
throw new ConfigurationError(
`Cannot parse canonical name for the package: ${canonicalName}`
);
}
return [registry].concat(packageName.split('/'));
}
| castChecksums | identifier_name |
registry.ts | import * as fs from 'fs';
import * as path from 'path';
import { mapLimit } from 'async';
import * as Github from '@octokit/rest';
// tslint:disable-next-line:no-submodule-imports
import * as simpleGit from 'simple-git/promise';
import * as _ from 'lodash';
import { getGlobalGithubConfig } from '../config';
import { logger as loggerRaw } from '../logger';
import { GithubGlobalConfig, TargetConfig } from '../schemas/project_config';
import { ConfigurationError, reportError } from '../utils/errors';
import { withTempDir } from '../utils/files';
import {
getAuthUsername,
getGithubApiToken,
getGithubClient,
GithubRemote,
} from '../utils/githubApi';
import { isDryRun } from '../utils/helpers';
import { renderTemplateSafe } from '../utils/strings';
import { HashAlgorithm, HashOutputFormat } from '../utils/system';
import {
isPreviewRelease,
parseVersion,
versionGreaterOrEqualThan,
} from '../utils/version';
import { stringToRegexp } from '../utils/filters';
import { BaseTarget } from './base';
import {
RemoteArtifact,
BaseArtifactProvider,
MAX_DOWNLOAD_CONCURRENCY,
} from '../artifact_providers/base';
const logger = loggerRaw.withScope('[registry]');
const DEFAULT_REGISTRY_REMOTE: GithubRemote = new GithubRemote(
'getsentry',
'sentry-release-registry'
);
/** Type of the registry package */
export enum RegistryPackageType {
/** App is a generic package type that doesn't belong to any specific registry */
APP = 'app',
/** SDK is a package hosted in one of public registries (PyPI, NPM, etc.) */
SDK = 'sdk',
}
/** Describes a checksum entry in the registry */
interface ChecksumEntry {
/** Checksum (hash) algorithm */
algorithm: HashAlgorithm;
/** Checksum format */
format: HashOutputFormat;
}
/** "registry" target options */
export interface RegistryConfig extends TargetConfig {
/** Type of the registry package */
type: RegistryPackageType;
/** Unique package cannonical name, including type and/or registry name */
canonicalName?: string;
/** Git remote of the release registry */
registryRemote: GithubRemote;
/** Should we create registry entries for pre-releases? */
linkPrereleases: boolean;
/** URL template for file assets */
urlTemplate?: string;
/** Types of checksums to compute for artifacts */
checksums: ChecksumEntry[];
/** Pattern that allows to skip the target if there's no matching file */
onlyIfPresent?: RegExp;
}
/**
* Target responsible for publishing static assets to GitHub pages
*/
export class RegistryTarget extends BaseTarget {
/** Target name */
public readonly name: string = 'registry';
/** Target options */
public readonly registryConfig: RegistryConfig;
/** Github client */
public readonly github: Github;
/** Github repo configuration */
public readonly githubRepo: GithubGlobalConfig;
public constructor(config: any, artifactProvider: BaseArtifactProvider) {
super(config, artifactProvider);
this.github = getGithubClient();
this.githubRepo = getGlobalGithubConfig();
this.registryConfig = this.getRegistryConfig();
}
/**
* Checks the provided checksums configuration
*
* Throws an error in case the configuration is incorrect
* FIXME(tonyo): rewrite this with JSON schemas
*
* @param checksums Raw configuration
*/
protected castChecksums(checksums: any): ChecksumEntry[] {
if (!checksums) {
return [];
}
if (!(checksums instanceof Array)) {
throw new ConfigurationError(
'Invalid type of "checksums": should be an array'
);
}
const resultChecksums: ChecksumEntry[] = [];
checksums.forEach(item => {
if (typeof item !== 'object' || !item.algorithm || !item.format) {
throw new ConfigurationError(
`Invalid checksum type: ${JSON.stringify(item)}`
);
}
// FIXME(tonyo): this is ugly as hell :(
// This all has to be replaced with JSON schema
if (
!(Object as any).values(HashAlgorithm).includes(item.algorithm) ||
!(Object as any).values(HashOutputFormat).includes(item.format)
) {
throw new ConfigurationError(
`Invalid checksum attributes: ${JSON.stringify(item)}`
);
}
resultChecksums.push({ algorithm: item.algorithm, format: item.format });
});
return resultChecksums;
}
/**
* Extracts Registry target options from the raw configuration
*/
public getRegistryConfig(): RegistryConfig {
const registryType = this.config.type;
if (
[RegistryPackageType.APP, RegistryPackageType.SDK].indexOf(
registryType
) === -1
) |
let urlTemplate;
if (registryType === RegistryPackageType.APP) {
urlTemplate = this.config.urlTemplate;
if (urlTemplate && typeof urlTemplate !== 'string') {
throw new ConfigurationError(
`Invalid "urlTemplate" specified: ${urlTemplate}`
);
}
}
const releaseConfig = this.config.config;
if (!releaseConfig) {
throw new ConfigurationError(
'Cannot find configuration dictionary for release registry'
);
}
const canonicalName = releaseConfig.canonical;
if (!canonicalName) {
throw new ConfigurationError(
'Canonical name not found in the configuration'
);
}
const linkPrereleases = this.config.linkPrereleases || false;
if (typeof linkPrereleases !== 'boolean') {
throw new ConfigurationError('Invlaid type of "linkPrereleases"');
}
const checksums = this.castChecksums(this.config.checksums);
const onlyIfPresentStr = this.config.onlyIfPresent || undefined;
let onlyIfPresent;
if (onlyIfPresentStr) {
if (typeof onlyIfPresentStr !== 'string') {
throw new ConfigurationError('Invalid type of "onlyIfPresent"');
}
onlyIfPresent = stringToRegexp(onlyIfPresentStr);
}
return {
canonicalName,
checksums,
linkPrereleases,
onlyIfPresent,
registryRemote: DEFAULT_REGISTRY_REMOTE,
type: registryType,
urlTemplate,
};
}
/**
* Creates a symlink, overwriting the existing one
*
* @param target Target path
* @param newFile Path to the new symlink
*/
public forceSymlink(target: string, newFile: string): void {
if (fs.existsSync(newFile)) {
fs.unlinkSync(newFile);
}
fs.symlinkSync(target, newFile);
}
/**
* Create symbolic links to the new version file
*
* "latest.json" link is not updated if the new version is "older" (e.g., it's
* a patch release for an older major version).
*
* @param versionFilePath Path to the new version file
* @param newVersion The new version
* @param oldVersion The previous latest version
*/
public createSymlinks(
versionFilePath: string,
newVersion: string,
oldVersion?: string
): void {
const parsedNewVersion = parseVersion(newVersion) || undefined;
if (!parsedNewVersion) {
throw new ConfigurationError(
`Cannot parse version: "${parsedNewVersion}"`
);
}
const parsedOldVersion =
(oldVersion ? parseVersion(oldVersion) : undefined) || undefined;
const baseVersionName = path.basename(versionFilePath);
const packageDir = path.dirname(versionFilePath);
// link latest, but only if the new version is "newer"
if (
parsedOldVersion &&
!versionGreaterOrEqualThan(parsedNewVersion, parsedOldVersion)
) {
logger.warn(
`Not updating the latest version file: current version is "${oldVersion}", new version is "${newVersion}"`
);
} else {
logger.debug(
`Changing symlink for "latest.json" from version "${oldVersion}" to "${newVersion}"`
);
this.forceSymlink(baseVersionName, path.join(packageDir, 'latest.json'));
}
// link major
const majorVersionLink = `${parsedNewVersion.major}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, majorVersionLink));
// link minor
const minorVersionLink = `${parsedNewVersion.major}.${parsedNewVersion.minor}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, minorVersionLink));
}
/**
* Returns the path to the SDK, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The SDK's canonical name
* @returns The SDK path
*/
public getSdkPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
return [registryDir, 'packages'].concat(packageDirs).join(path.sep);
}
/**
* Returns the path to the app, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
* @returns The app path
*/
public getAppPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
if (packageDirs[0] !== 'app') {
throw new ConfigurationError(
`Invalid canonical entry for an app: ${canonical}`
);
}
return [registryDir, 'apps'].concat(packageDirs.slice(1)).join(path.sep);
}
/**
* Returns the path to the package from its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
*/
public getPackageDirPath(registryDir: string, canonical: string): string {
if (this.registryConfig.type === RegistryPackageType.SDK) {
return this.getSdkPackagePath(registryDir, canonical);
} else if (this.registryConfig.type === RegistryPackageType.APP) {
return this.getAppPackagePath(registryDir, canonical);
} else {
throw new ConfigurationError(
`Unknown registry package type: ${this.registryConfig.type}`
);
}
}
/**
* Adds file URLs to the manifest
*
* URL template is taken from "urlTemplate" configuration argument
*
* FIXME(tonyo): LEGACY function, left for compatibility, replaced by addFilesData
*
* @param manifest Package manifest
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addFileLinks(
manifest: { [key: string]: any },
version: string,
revision: string
): Promise<void> {
if (!this.registryConfig.urlTemplate) {
return;
}
const artifacts = await this.getArtifactsForRevision(revision);
if (artifacts.length === 0) {
logger.warn('No artifacts found, not adding any links to the manifest');
return;
}
const fileUrls: { [_: string]: string } = {};
for (const artifact of artifacts) {
fileUrls[artifact.filename] = renderTemplateSafe(
this.registryConfig.urlTemplate,
{
file: artifact.filename,
revision,
version,
}
);
}
logger.debug(
`Writing file urls to the manifest, files found: ${artifacts.length}`
);
manifest.file_urls = fileUrls;
}
/**
* Extends the artifact entry with additional information
*
* Information and checksums and download URLs are added here
*
* @param artifact Artifact
* @param version The new version
* @param revision Git commit SHA to be published
*
*/
public async getArtifactData(
artifact: RemoteArtifact,
version: string,
revision: string
): Promise<any> {
const artifactData: any = {};
if (this.registryConfig.urlTemplate) {
artifactData.url = renderTemplateSafe(this.registryConfig.urlTemplate, {
file: artifact.filename,
revision,
version,
});
}
if (this.registryConfig.checksums.length > 0) {
const fileChecksums: { [key: string]: string } = {};
for (const checksumType of this.registryConfig.checksums) {
const { algorithm, format } = checksumType;
const checksum = await this.artifactProvider.getChecksum(
artifact,
algorithm,
format
);
fileChecksums[`${algorithm}-${format}`] = checksum;
}
artifactData.checksums = fileChecksums;
}
return artifactData;
}
/**
* Extends the artifact entries with additional information
*
* @param packageManifest Package manifest
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addFilesData(
packageManifest: { [key: string]: any },
version: string,
revision: string
): Promise<void> {
// Clear existing data
delete packageManifest.files;
const artifacts = await this.getArtifactsForRevision(revision);
if (artifacts.length === 0) {
logger.warn('No artifacts found, not adding any file data');
return;
}
logger.info(
'Adding extra data (checksums, download links) for available artifacts...'
);
const files: { [key: string]: any } = {};
// tslint:disable-next-line:await-promise
await mapLimit(artifacts, MAX_DOWNLOAD_CONCURRENCY, async artifact => {
const fileData = await this.getArtifactData(artifact, version, revision);
if (!_.isEmpty(fileData)) {
files[artifact.filename] = fileData;
}
});
if (!_.isEmpty(files)) {
packageManifest.files = files;
}
}
/**
* Updates the local copy of the release registry
*
* @param packageManifest The package's manifest object
* @param canonical The package's canonical name
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async getUpdatedManifest(
packageManifest: { [key: string]: any },
canonical: string,
version: string,
revision: string
): Promise<any> {
// Additional check
if (canonical !== packageManifest.canonical) {
reportError(
`Canonical name in "craft" config ("${canonical}") is inconsistent with ` +
`the one in package manifest ("${packageManifest.canonical}")`
);
}
// Update the manifest
const updatedManifest = { ...packageManifest, version };
// Add file links if it's a generic app (legacy)
if (this.registryConfig.type === RegistryPackageType.APP) {
await this.addFileLinks(updatedManifest, version, revision);
}
// Add various file-related data
await this.addFilesData(updatedManifest, version, revision);
return updatedManifest;
}
/**
* Updates the local copy of the release registry
*
* @param directory The directory with the checkout out registry
* @param canonical The package's canonical name
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addVersionToRegistry(
directory: string,
canonical: string,
version: string,
revision: string
): Promise<void> {
logger.info(
`Adding the version file to the registry for canonical name "${canonical}"...`
);
const packageDirPath = this.getPackageDirPath(directory, canonical);
const versionFilePath = path.join(packageDirPath, `${version}.json`);
if (fs.existsSync(versionFilePath)) {
reportError(`Version file for "${version}" already exists. Aborting.`);
}
const packageManifestPath = path.join(packageDirPath, 'latest.json');
logger.debug('Reading the current configuration from "latest.json"...');
const packageManifest =
JSON.parse(fs.readFileSync(packageManifestPath).toString()) || {};
const previousVersion = packageManifest.version || undefined;
const updatedManifest = await this.getUpdatedManifest(
packageManifest,
canonical,
version,
revision
);
// tslint:disable-next-line:prefer-template
const manifestString = JSON.stringify(updatedManifest, undefined, 2) + '\n';
logger.debug('Updated manifest', manifestString);
logger.debug(`Writing updated manifest to "${versionFilePath}"...`);
fs.writeFileSync(versionFilePath, manifestString);
this.createSymlinks(versionFilePath, version, previousVersion);
}
/**
* Commits and pushes the new version of the package to the release registry
*
* @param directory The directory with the checkout out registry
* @param remote The GitHub remote object
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async pushVersionToRegistry(
directory: string,
remote: GithubRemote,
version: string,
revision: string
): Promise<void> {
logger.info(`Cloning "${remote.getRemoteString()}" to "${directory}"...`);
await simpleGit()
.silent(true)
.clone(remote.getRemoteStringWithAuth(), directory);
const canonical = this.registryConfig.canonicalName;
if (!canonical) {
throw new ConfigurationError(
'"canonical" value not found in the registry configuration'
);
}
await this.addVersionToRegistry(directory, canonical, version, revision);
const git = simpleGit(directory).silent(true);
await git.checkout('master');
// Commit
await git.add(['.']);
await git.commit(`craft: release "${canonical}", version "${version}"`);
// Push!
logger.info(`Pushing the changes...`);
if (!isDryRun()) {
await git.push('origin', 'master');
} else {
logger.info('[dry-run] Not pushing the branch.');
}
}
/**
* Pushes an archive with static HTML web assets to the configured branch
*/
public async publish(version: string, revision: string): Promise<any> {
if (!this.registryConfig.linkPrereleases && isPreviewRelease(version)) {
logger.info('Preview release detected, skipping the target');
return undefined;
}
// If we have onlyIfPresent specified, check that we have any of matched files
const onlyIfPresentPattern = this.registryConfig.onlyIfPresent;
if (onlyIfPresentPattern) {
const artifacts = await this.artifactProvider.filterArtifactsForRevision(
revision,
{
includeNames: onlyIfPresentPattern,
}
);
if (artifacts.length === 0) {
logger.warn(
`No files found that match "${onlyIfPresentPattern.toString()}", skipping the target.`
);
return undefined;
}
}
const remote = this.registryConfig.registryRemote;
const username = await getAuthUsername(this.github);
remote.setAuth(username, getGithubApiToken());
await withTempDir(
async directory =>
this.pushVersionToRegistry(directory, remote, version, revision),
true,
'craft-release-registry-'
);
logger.info('Release registry updated');
}
}
/**
* Parses registry canonical name to a list of registry directories
*
* Example: "npm:@sentry/browser" -> ["npm", "@sentry", "browser"]
*
* @param canonicalName Registry canonical name
* @returns A list of directories
*/
export function parseCanonical(canonicalName: string): string[] {
const [registry, packageName] = canonicalName.split(':');
if (!registry || !packageName) {
throw new ConfigurationError(
`Cannot parse canonical name for the package: ${canonicalName}`
);
}
const packageDirs = packageName.split('/');
if (packageDirs.some(x => !x)) {
throw new ConfigurationError(
`Cannot parse canonical name for the package: ${canonicalName}`
);
}
return [registry].concat(packageName.split('/'));
}
| {
throw new ConfigurationError(
`Invalid registry type specified: "${registryType}"`
);
} | conditional_block |
registry.ts | import * as fs from 'fs';
import * as path from 'path';
import { mapLimit } from 'async';
import * as Github from '@octokit/rest';
// tslint:disable-next-line:no-submodule-imports
import * as simpleGit from 'simple-git/promise';
import * as _ from 'lodash';
import { getGlobalGithubConfig } from '../config';
import { logger as loggerRaw } from '../logger';
import { GithubGlobalConfig, TargetConfig } from '../schemas/project_config';
import { ConfigurationError, reportError } from '../utils/errors';
import { withTempDir } from '../utils/files';
import {
getAuthUsername,
getGithubApiToken,
getGithubClient,
GithubRemote,
} from '../utils/githubApi';
import { isDryRun } from '../utils/helpers';
import { renderTemplateSafe } from '../utils/strings';
import { HashAlgorithm, HashOutputFormat } from '../utils/system';
import {
isPreviewRelease,
parseVersion,
versionGreaterOrEqualThan,
} from '../utils/version';
import { stringToRegexp } from '../utils/filters';
import { BaseTarget } from './base';
import {
RemoteArtifact,
BaseArtifactProvider,
MAX_DOWNLOAD_CONCURRENCY,
} from '../artifact_providers/base';
const logger = loggerRaw.withScope('[registry]');
const DEFAULT_REGISTRY_REMOTE: GithubRemote = new GithubRemote(
'getsentry',
'sentry-release-registry'
);
/** Type of the registry package */
export enum RegistryPackageType {
/** App is a generic package type that doesn't belong to any specific registry */
APP = 'app',
/** SDK is a package hosted in one of public registries (PyPI, NPM, etc.) */
SDK = 'sdk',
}
/** Describes a checksum entry in the registry */
interface ChecksumEntry {
/** Checksum (hash) algorithm */
algorithm: HashAlgorithm;
/** Checksum format */
format: HashOutputFormat;
}
/** "registry" target options */
export interface RegistryConfig extends TargetConfig {
/** Type of the registry package */
type: RegistryPackageType;
/** Unique package cannonical name, including type and/or registry name */
canonicalName?: string;
/** Git remote of the release registry */
registryRemote: GithubRemote;
/** Should we create registry entries for pre-releases? */
linkPrereleases: boolean;
/** URL template for file assets */
urlTemplate?: string;
/** Types of checksums to compute for artifacts */
checksums: ChecksumEntry[];
/** Pattern that allows to skip the target if there's no matching file */
onlyIfPresent?: RegExp;
}
/**
* Target responsible for publishing static assets to GitHub pages
*/
export class RegistryTarget extends BaseTarget {
/** Target name */
public readonly name: string = 'registry';
/** Target options */
public readonly registryConfig: RegistryConfig;
/** Github client */
public readonly github: Github;
/** Github repo configuration */
public readonly githubRepo: GithubGlobalConfig;
public constructor(config: any, artifactProvider: BaseArtifactProvider) {
super(config, artifactProvider);
this.github = getGithubClient();
this.githubRepo = getGlobalGithubConfig();
this.registryConfig = this.getRegistryConfig();
}
/**
* Checks the provided checksums configuration
*
* Throws an error in case the configuration is incorrect
* FIXME(tonyo): rewrite this with JSON schemas
*
* @param checksums Raw configuration
*/
protected castChecksums(checksums: any): ChecksumEntry[] {
if (!checksums) {
return [];
}
if (!(checksums instanceof Array)) {
throw new ConfigurationError(
'Invalid type of "checksums": should be an array'
);
}
const resultChecksums: ChecksumEntry[] = [];
checksums.forEach(item => {
if (typeof item !== 'object' || !item.algorithm || !item.format) {
throw new ConfigurationError( | `Invalid checksum type: ${JSON.stringify(item)}`
);
}
// FIXME(tonyo): this is ugly as hell :(
// This all has to be replaced with JSON schema
if (
!(Object as any).values(HashAlgorithm).includes(item.algorithm) ||
!(Object as any).values(HashOutputFormat).includes(item.format)
) {
throw new ConfigurationError(
`Invalid checksum attributes: ${JSON.stringify(item)}`
);
}
resultChecksums.push({ algorithm: item.algorithm, format: item.format });
});
return resultChecksums;
}
/**
* Extracts Registry target options from the raw configuration
*/
public getRegistryConfig(): RegistryConfig {
const registryType = this.config.type;
if (
[RegistryPackageType.APP, RegistryPackageType.SDK].indexOf(
registryType
) === -1
) {
throw new ConfigurationError(
`Invalid registry type specified: "${registryType}"`
);
}
let urlTemplate;
if (registryType === RegistryPackageType.APP) {
urlTemplate = this.config.urlTemplate;
if (urlTemplate && typeof urlTemplate !== 'string') {
throw new ConfigurationError(
`Invalid "urlTemplate" specified: ${urlTemplate}`
);
}
}
const releaseConfig = this.config.config;
if (!releaseConfig) {
throw new ConfigurationError(
'Cannot find configuration dictionary for release registry'
);
}
const canonicalName = releaseConfig.canonical;
if (!canonicalName) {
throw new ConfigurationError(
'Canonical name not found in the configuration'
);
}
const linkPrereleases = this.config.linkPrereleases || false;
if (typeof linkPrereleases !== 'boolean') {
throw new ConfigurationError('Invlaid type of "linkPrereleases"');
}
const checksums = this.castChecksums(this.config.checksums);
const onlyIfPresentStr = this.config.onlyIfPresent || undefined;
let onlyIfPresent;
if (onlyIfPresentStr) {
if (typeof onlyIfPresentStr !== 'string') {
throw new ConfigurationError('Invalid type of "onlyIfPresent"');
}
onlyIfPresent = stringToRegexp(onlyIfPresentStr);
}
return {
canonicalName,
checksums,
linkPrereleases,
onlyIfPresent,
registryRemote: DEFAULT_REGISTRY_REMOTE,
type: registryType,
urlTemplate,
};
}
/**
* Creates a symlink, overwriting the existing one
*
* @param target Target path
* @param newFile Path to the new symlink
*/
public forceSymlink(target: string, newFile: string): void {
if (fs.existsSync(newFile)) {
fs.unlinkSync(newFile);
}
fs.symlinkSync(target, newFile);
}
/**
* Create symbolic links to the new version file
*
* "latest.json" link is not updated if the new version is "older" (e.g., it's
* a patch release for an older major version).
*
* @param versionFilePath Path to the new version file
* @param newVersion The new version
* @param oldVersion The previous latest version
*/
public createSymlinks(
versionFilePath: string,
newVersion: string,
oldVersion?: string
): void {
const parsedNewVersion = parseVersion(newVersion) || undefined;
if (!parsedNewVersion) {
throw new ConfigurationError(
`Cannot parse version: "${parsedNewVersion}"`
);
}
const parsedOldVersion =
(oldVersion ? parseVersion(oldVersion) : undefined) || undefined;
const baseVersionName = path.basename(versionFilePath);
const packageDir = path.dirname(versionFilePath);
// link latest, but only if the new version is "newer"
if (
parsedOldVersion &&
!versionGreaterOrEqualThan(parsedNewVersion, parsedOldVersion)
) {
logger.warn(
`Not updating the latest version file: current version is "${oldVersion}", new version is "${newVersion}"`
);
} else {
logger.debug(
`Changing symlink for "latest.json" from version "${oldVersion}" to "${newVersion}"`
);
this.forceSymlink(baseVersionName, path.join(packageDir, 'latest.json'));
}
// link major
const majorVersionLink = `${parsedNewVersion.major}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, majorVersionLink));
// link minor
const minorVersionLink = `${parsedNewVersion.major}.${parsedNewVersion.minor}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, minorVersionLink));
}
/**
* Returns the path to the SDK, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The SDK's canonical name
* @returns The SDK path
*/
public getSdkPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
return [registryDir, 'packages'].concat(packageDirs).join(path.sep);
}
/**
* Returns the path to the app, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
* @returns The app path
*/
public getAppPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
if (packageDirs[0] !== 'app') {
throw new ConfigurationError(
`Invalid canonical entry for an app: ${canonical}`
);
}
return [registryDir, 'apps'].concat(packageDirs.slice(1)).join(path.sep);
}
/**
* Returns the path to the package from its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
*/
public getPackageDirPath(registryDir: string, canonical: string): string {
if (this.registryConfig.type === RegistryPackageType.SDK) {
return this.getSdkPackagePath(registryDir, canonical);
} else if (this.registryConfig.type === RegistryPackageType.APP) {
return this.getAppPackagePath(registryDir, canonical);
} else {
throw new ConfigurationError(
`Unknown registry package type: ${this.registryConfig.type}`
);
}
}
/**
* Adds file URLs to the manifest
*
* URL template is taken from "urlTemplate" configuration argument
*
* FIXME(tonyo): LEGACY function, left for compatibility, replaced by addFilesData
*
* @param manifest Package manifest
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addFileLinks(
manifest: { [key: string]: any },
version: string,
revision: string
): Promise<void> {
if (!this.registryConfig.urlTemplate) {
return;
}
const artifacts = await this.getArtifactsForRevision(revision);
if (artifacts.length === 0) {
logger.warn('No artifacts found, not adding any links to the manifest');
return;
}
const fileUrls: { [_: string]: string } = {};
for (const artifact of artifacts) {
fileUrls[artifact.filename] = renderTemplateSafe(
this.registryConfig.urlTemplate,
{
file: artifact.filename,
revision,
version,
}
);
}
logger.debug(
`Writing file urls to the manifest, files found: ${artifacts.length}`
);
manifest.file_urls = fileUrls;
}
/**
* Extends the artifact entry with additional information
*
* Information and checksums and download URLs are added here
*
* @param artifact Artifact
* @param version The new version
* @param revision Git commit SHA to be published
*
*/
public async getArtifactData(
artifact: RemoteArtifact,
version: string,
revision: string
): Promise<any> {
const artifactData: any = {};
if (this.registryConfig.urlTemplate) {
artifactData.url = renderTemplateSafe(this.registryConfig.urlTemplate, {
file: artifact.filename,
revision,
version,
});
}
if (this.registryConfig.checksums.length > 0) {
const fileChecksums: { [key: string]: string } = {};
for (const checksumType of this.registryConfig.checksums) {
const { algorithm, format } = checksumType;
const checksum = await this.artifactProvider.getChecksum(
artifact,
algorithm,
format
);
fileChecksums[`${algorithm}-${format}`] = checksum;
}
artifactData.checksums = fileChecksums;
}
return artifactData;
}
/**
* Extends the artifact entries with additional information
*
* @param packageManifest Package manifest
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addFilesData(
packageManifest: { [key: string]: any },
version: string,
revision: string
): Promise<void> {
// Clear existing data
delete packageManifest.files;
const artifacts = await this.getArtifactsForRevision(revision);
if (artifacts.length === 0) {
logger.warn('No artifacts found, not adding any file data');
return;
}
logger.info(
'Adding extra data (checksums, download links) for available artifacts...'
);
const files: { [key: string]: any } = {};
// tslint:disable-next-line:await-promise
await mapLimit(artifacts, MAX_DOWNLOAD_CONCURRENCY, async artifact => {
const fileData = await this.getArtifactData(artifact, version, revision);
if (!_.isEmpty(fileData)) {
files[artifact.filename] = fileData;
}
});
if (!_.isEmpty(files)) {
packageManifest.files = files;
}
}
/**
* Updates the local copy of the release registry
*
* @param packageManifest The package's manifest object
* @param canonical The package's canonical name
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async getUpdatedManifest(
packageManifest: { [key: string]: any },
canonical: string,
version: string,
revision: string
): Promise<any> {
// Additional check
if (canonical !== packageManifest.canonical) {
reportError(
`Canonical name in "craft" config ("${canonical}") is inconsistent with ` +
`the one in package manifest ("${packageManifest.canonical}")`
);
}
// Update the manifest
const updatedManifest = { ...packageManifest, version };
// Add file links if it's a generic app (legacy)
if (this.registryConfig.type === RegistryPackageType.APP) {
await this.addFileLinks(updatedManifest, version, revision);
}
// Add various file-related data
await this.addFilesData(updatedManifest, version, revision);
return updatedManifest;
}
/**
* Updates the local copy of the release registry
*
* @param directory The directory with the checkout out registry
* @param canonical The package's canonical name
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addVersionToRegistry(
directory: string,
canonical: string,
version: string,
revision: string
): Promise<void> {
logger.info(
`Adding the version file to the registry for canonical name "${canonical}"...`
);
const packageDirPath = this.getPackageDirPath(directory, canonical);
const versionFilePath = path.join(packageDirPath, `${version}.json`);
if (fs.existsSync(versionFilePath)) {
reportError(`Version file for "${version}" already exists. Aborting.`);
}
const packageManifestPath = path.join(packageDirPath, 'latest.json');
logger.debug('Reading the current configuration from "latest.json"...');
const packageManifest =
JSON.parse(fs.readFileSync(packageManifestPath).toString()) || {};
const previousVersion = packageManifest.version || undefined;
const updatedManifest = await this.getUpdatedManifest(
packageManifest,
canonical,
version,
revision
);
// tslint:disable-next-line:prefer-template
const manifestString = JSON.stringify(updatedManifest, undefined, 2) + '\n';
logger.debug('Updated manifest', manifestString);
logger.debug(`Writing updated manifest to "${versionFilePath}"...`);
fs.writeFileSync(versionFilePath, manifestString);
this.createSymlinks(versionFilePath, version, previousVersion);
}
/**
* Commits and pushes the new version of the package to the release registry
*
* @param directory The directory with the checkout out registry
* @param remote The GitHub remote object
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async pushVersionToRegistry(
directory: string,
remote: GithubRemote,
version: string,
revision: string
): Promise<void> {
logger.info(`Cloning "${remote.getRemoteString()}" to "${directory}"...`);
await simpleGit()
.silent(true)
.clone(remote.getRemoteStringWithAuth(), directory);
const canonical = this.registryConfig.canonicalName;
if (!canonical) {
throw new ConfigurationError(
'"canonical" value not found in the registry configuration'
);
}
await this.addVersionToRegistry(directory, canonical, version, revision);
const git = simpleGit(directory).silent(true);
await git.checkout('master');
// Commit
await git.add(['.']);
await git.commit(`craft: release "${canonical}", version "${version}"`);
// Push!
logger.info(`Pushing the changes...`);
if (!isDryRun()) {
await git.push('origin', 'master');
} else {
logger.info('[dry-run] Not pushing the branch.');
}
}
/**
* Pushes an archive with static HTML web assets to the configured branch
*/
public async publish(version: string, revision: string): Promise<any> {
if (!this.registryConfig.linkPrereleases && isPreviewRelease(version)) {
logger.info('Preview release detected, skipping the target');
return undefined;
}
// If we have onlyIfPresent specified, check that we have any of matched files
const onlyIfPresentPattern = this.registryConfig.onlyIfPresent;
if (onlyIfPresentPattern) {
const artifacts = await this.artifactProvider.filterArtifactsForRevision(
revision,
{
includeNames: onlyIfPresentPattern,
}
);
if (artifacts.length === 0) {
logger.warn(
`No files found that match "${onlyIfPresentPattern.toString()}", skipping the target.`
);
return undefined;
}
}
const remote = this.registryConfig.registryRemote;
const username = await getAuthUsername(this.github);
remote.setAuth(username, getGithubApiToken());
await withTempDir(
async directory =>
this.pushVersionToRegistry(directory, remote, version, revision),
true,
'craft-release-registry-'
);
logger.info('Release registry updated');
}
}
/**
* Parses registry canonical name to a list of registry directories
*
* Example: "npm:@sentry/browser" -> ["npm", "@sentry", "browser"]
*
* @param canonicalName Registry canonical name
* @returns A list of directories
*/
export function parseCanonical(canonicalName: string): string[] {
const [registry, packageName] = canonicalName.split(':');
if (!registry || !packageName) {
throw new ConfigurationError(
`Cannot parse canonical name for the package: ${canonicalName}`
);
}
const packageDirs = packageName.split('/');
if (packageDirs.some(x => !x)) {
throw new ConfigurationError(
`Cannot parse canonical name for the package: ${canonicalName}`
);
}
return [registry].concat(packageName.split('/'));
} | random_line_split | |
Ngrams.py | # -*- coding: utf-8
URL = 1
SMILEY = 2
DATE = 3
VERSION = 4
MENTION = 5
HASHTAG = 6
WORD = 7
MONEY = 8
def _ngrams(input_list, n):
return zip(*[input_list[i:] for i in range(n)])
def _remove_numbers(question):
import re
question = re.sub('[^\w]\d+[^\w]', ' ', question)
question = re.sub('[^\w]\d+$', ' ', question)
question = re.sub('^\d+[^\w]', ' ', question)
question = re.sub('^\d+$', ' ', question)
return question
def _remove_uids(question):
import re
question = re.sub('[^\w][a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*[^\w]', ' ', question)
question = re.sub('[^\w][a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*$', ' ', question)
question = re.sub('^[a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*[^\w]', ' ', question)
question = re.sub('^[a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*$', ' ', question)
return question
def _remove_duplicate_chars(question):
import re
question = question.replace('...', '. ')
return re.sub(r'([a-zA-Z\.\?!;\'\",\:\>\<])\1{2,}', r'\1', question)
def _remove_phone_numbers(question):
import re
cleansed_question = re.sub(r'(\d{2}\W*\d{2}\W*\d{2}\W*\d{2}\W*\d{2})', ' ', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_emails(question):
import re
cleansed_question = re.sub(r'([A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]+)', '', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_twitter_at(question):
ats = [at for at in question.split() if at.startswith('@')]
cleansed_question = question
for at in ats:
cleansed_question = cleansed_question.replace(at, '')
return cleansed_question.strip()
def _remove_twitter_rw(question):
import re
twitter_rw = ['rt', 'dm']
cleansed_question = ' ' + question + ' '
for rw in twitter_rw:
cleansed_question = re.sub(r'\W(?:%s)\W' % rw, ' ', cleansed_question, flags=re.IGNORECASE)
return cleansed_question.strip()
def _remove_urls(question):
import re
cleansed_question = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' ', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_smilies(question):
import re
smileys = [': /+','>:[+', ':-(+', ':(+', ':-c', ':-<+', ':-[+', ':{', '>.>', ':o',
'<.<', '>.<', ':\\+', ':\\\'(+', ':\\\'(+', ':\'(+', ';\'(+', '>:\\', '>:/+',
':-/+', ':-.', ':\\+', '=/+', '=\\+', ':S', ':/+', ':$', '\*cry\*', '-_-',
';-)+', ':D', ';D', ';;)+', ':-x', ':x', '^-^', 'x)', 'è_é','haha','hehe', 'u_u',
'0:-)', ':o)', ':*', ':-*', '8-}', '=P~', '>:D<', '<:-P', ';))',
'xD', '>.<','u_u',
'>:]+', ':3', ':c', ':>', '=]', '8)+', '=)+', ':}+', ':^)+', '>:D', ':-D', '8-D', 'x-D', 'X-D', '=-D', '=D', '=-3', '8-)+',
'>;]+', '*-)+', '*)+', ';-]+', ';]+', ';^)+', '>:P', ':-P', ':P', 'X-P', ':-p', '=p', ':-Þ', ':Þ', ':-b', '>:o', '>:O', ':-O',
'°o°', '°O°', 'o_O', 'o.O', '8-0', 'o_O', 'x)+', '^^+', ':=)', '((?::|;|=)(?:-)?(?:\)|D|P))']
regex = '|'.join(['(\s%s\s)' % i for i in [re.escape(smiley) for smiley in smileys]])
cleansed_question = re.sub(regex, ' ', question, flags=re.MULTILINE|re.I)
return cleansed_question.strip()
def _remove_hashtags(question):
import re
question = question.strip()
hash_tags = [tag for tag in question.split() if tag.startswith('#')]
if not hash_tags:
return question
# check the position of the hash tags within the question and decide whether to clean and keep it
# or to remove it
import sys
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove all hash tags being after the last question mark
for tag in hash_tags:
tag_pos = question.find(tag)
if tag_pos > last_question_mark_pos:
# completely remove hashtag if they are after the last qm or just at the beginning of the tweet
question = question.replace(tag, ' ')
question = question.strip()
last_question_mark_pos = question.rfind('?')
else:
# just remove the hashtag sign
question = question.replace(tag, tag[1:])
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove extra spaces and return
pattern = re.compile(r'\s+')
question = re.sub(pattern, ' ', question.strip())
return question
def find_urls(content, scheme):
current = content.find(scheme)
while current != -1:
end = content.find(' ', current)
if end == -1:
end = co | while content[end-1] in [',', '.', ';', ':', '?']:
end -= 1
yield (current, end)
current = end
current = content.find(scheme, current)
class SmileyParser:
def __init__(self):
import re
western_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
':-)', ':)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}', ':^)',
':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D', '=-3', '=3', 'B^D',
':-))',
'>:[', ':-(', ':(', ':-c', ':c', ':-<', ':<', ':-[', ':[', ':{',
':-||', ':@', '>:(',
":'-(", ":'(",
":'-)", ":')",
'D:<', 'D:', 'D8', 'D;', 'D=', 'DX', 'v.v', "D-':",
'>:O', ':-O', ':O', '8-0',
':*', ':^*',
';-)', ';)', '*-)', '*)', ';-]', ';]', ';D', ';^)', ':-,',
'>:P', ':-P', ':P', 'X-P', 'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b',
'>:\\', '>:/', ':-/', ':-.', ':/', ':\\', '=/', '=\\', ':L', '=L', ':S', '>.<',
':|', ':-|',
':$',
':-X', ':X', ':-#', ':#',
'O:-)', '0:-3', '0:3', '0:-)', '0:)', '0;^)',
'>:)', '>;)', '>:-)',
'}:-)', '}:)', '3:-)', '3:)',
'o/\o', '^5', '>_>^', '^<_<',
'|;-)', '|-O',
':-&', ':&',
'#-)',
'%-)', '%)',
':-###..', ':###..',
'\\o/', '/o\\'
'*\\0/*',
# additional
':=)', ';=)', ';))', ':))', ';;)'
]
eastern_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
]
unicode_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
u'\u2639', u'\u263a', u'\u263b',
u'\u1f600', u'\u1f601', u'\u1f602', u'\u1f603', u'\u1f604', u'\u1f605', u'\u1f606', u'\u1f607',
u'\u1f608', u'\u1f609', u'\u1f60a', u'\u1f60b', u'\u1f60c', u'\u1f60d', u'\u1f60e', u'\u1f60f',
u'\u1f610', u'\u1f611', u'\u1f612', u'\u1f603', u'\u1f614', u'\u1f615', u'\u1f616', u'\u1f617',
u'\u1f618', u'\u1f619', u'\u1f61a', u'\u1f60b', u'\u1f61c', u'\u1f61d', u'\u1f61e', u'\u1f61f',
u'\u1f620', u'\u1f621', u'\u1f622', u'\u1f603', u'\u1f624', u'\u1f625', u'\u1f626', u'\u1f627',
u'\u1f628', u'\u1f629', u'\u1f62a', u'\u1f60b', u'\u1f62c', u'\u1f62d', u'\u1f62e', u'\u1f62f',
u'\u1f630', u'\u1f631', u'\u1f632', u'\u1f633', u'\u1f634', u'\u1f635', u'\u1f636', u'\u1f637',
u'\u1f638', u'\u1f639', u'\u1f63a', u'\u1f63b', u'\u1f63c', u'\u1f63d', u'\u1f63e', u'\u1f63f',
u'\u1f640', u'\u1f645', u'\u1f646', u'\u1f647',
u'\u1f648', u'\u1f649', u'\u1f64a', u'\u1f64b', u'\u1f64c', u'\u1f64d', u'\u1f64e', u'\u1f64f',
]
# try to generate rotated smileys
invert = {
# ltr to rtl
')' : '(',
'(' : ')',
']' : '[',
'[' : ']',
'{' : '}',
'}' : '{',
'<' : '>',
'>' : '<',
'/' : '\\',
'\\' : '/',
# does not change
'o' : 'o',
'x' : 'x',
'v' : 'v',
'0' : '0',
'8' : '8',
'|' : '|',
':' : ':',
';' : ';',
',' : ',',
"'" : "'",
'*' : '*',
'%' : '%'
}
def reverse_smiley(smiley):
output = []
for c in reversed(smiley):
if c in invert:
output.append(invert[c])
else:
return None
return ''.join(output)
smileys = western_smileys + \
filter(lambda x:not x is None, [reverse_smiley(smiley) for smiley in western_smileys]) + \
eastern_smileys + \
unicode_smileys
self._regex = re.compile('|'.join(['(?:%s)' % i for i in [re.escape(smiley) for smiley in smileys]]), re.I | re.U)
def parse(self, content):
for i in self._regex.finditer(content):
start, end = i.span()
yield (start, end)
def _tokenize(content):
import re
tokens = []
# urls
for scheme in ['http', 'https', 'ftp']:
for start,end in find_urls(content, '%s://' % scheme):
tokens.append((start, end, URL))
def untokenized(tokens):
current = 0
for start, end, type in sorted(tokens):
yield (current, start)
current = end
yield (current, -1)
# smileys
smileys = SmileyParser()
for start, end in untokenized(tokens):
for s,e in smileys.parse(content[start:end]):
tokens.append((start+s, start+e, SMILEY))
# dates
for start, end in untokenized(tokens):
for m in re.finditer(r'\b([0-9]{1,2}/[0-9]{1,2}(/[0-9]{2,4})?)\b', content[start:end]):
s, e = m.span()
tokens.append((start + s, start + e, DATE))
# Money
for start, end in untokenized(tokens):
#if content[start:end].find(u'€') != -1:
# print 'MONEY: ', content[start:end]
for m in re.finditer(ur'([0-9.,]+[£\$€])', content[start:end], re.U):
s, e = m.span()
tokens.append((start + s, start + e, MONEY))
# versions
for start, end in untokenized(tokens):
for m in re.finditer(r'\b((?:[0-9.])+\w?)\b', content[start:end], re.U):
s, e = m.span()
tokens.append((start + s, start + e, VERSION))
# mentions
for start, end in untokenized(tokens):
for m in re.finditer(r'(@[a-zA-Z0-9_]{1,15})\b', content[start:end]):
s, e = m.span()
tokens.append((start + s, start + e, MENTION))
# hashtags
for start, end in untokenized(tokens):
for m in re.finditer(r'(#[a-zA-Z0-9_]+)\b', content[start:end]):
s, e = m.span()
tokens.append((start + s, start + e, HASHTAG))
# whitespace-separated words
for start, end in untokenized(tokens):
for m in re.finditer(r'\b\w+\b', content[start:end], re.U):
s, e = m.span()
tokens.append((start + s, start + e, WORD))
return sorted(tokens)
def _clean(content):
content = content.lower()
content = _remove_smilies(content)
content = _remove_hashtags(content)
content = _remove_urls(content)
content = _remove_twitter_rw(content)
content = _remove_twitter_at(content)
content = _remove_emails(content)
content = _remove_phone_numbers(content)
content = _remove_duplicate_chars(content)
content = _remove_numbers(content)
content = _remove_uids(content)
tokens = _tokenize(content)
content = ' '.join([content[start:end] for start, end, type in tokens])
return content
def generate(content):
import collections
freq_dist = collections.defaultdict(int)
content = _clean(content)
if content == '':
return []
content_chars = [' ']
content_chars += list(content)
content_chars.append(' ')
ngrams = [x for x in _ngrams(content_chars, 1) if ''.join(list(x)) != ' ']
ngrams += _ngrams(content_chars, 2)
ngrams += _ngrams(content_chars, 3)
ngrams += _ngrams(content_chars, 4)
ngrams += _ngrams(content_chars, 5)
for ngram in ngrams:
freq_dist[''.join(list(ngram))] += 1
import operator
return sorted(freq_dist.iteritems(), key=operator.itemgetter(1), reverse=True)
| ntent.find('\n', current)
if end == -1:
end = len(content)
| conditional_block |
Ngrams.py | # -*- coding: utf-8
URL = 1
SMILEY = 2
DATE = 3
VERSION = 4
MENTION = 5
HASHTAG = 6
WORD = 7
MONEY = 8
def _ngrams(input_list, n):
return zip(*[input_list[i:] for i in range(n)])
def _remove_numbers(question):
import re
question = re.sub('[^\w]\d+[^\w]', ' ', question)
question = re.sub('[^\w]\d+$', ' ', question)
question = re.sub('^\d+[^\w]', ' ', question)
question = re.sub('^\d+$', ' ', question)
return question
def _remove_uids(question):
import re
question = re.sub('[^\w][a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*[^\w]', ' ', question)
question = re.sub('[^\w][a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*$', ' ', question)
question = re.sub('^[a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*[^\w]', ' ', question)
question = re.sub('^[a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*$', ' ', question)
return question
def _remove_duplicate_chars(question):
import re
question = question.replace('...', '. ')
return re.sub(r'([a-zA-Z\.\?!;\'\",\:\>\<])\1{2,}', r'\1', question)
def _remove_phone_numbers(question):
import re
cleansed_question = re.sub(r'(\d{2}\W*\d{2}\W*\d{2}\W*\d{2}\W*\d{2})', ' ', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_emails(question):
import re
cleansed_question = re.sub(r'([A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]+)', '', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_twitter_at(question):
ats = [at for at in question.split() if at.startswith('@')]
cleansed_question = question
for at in ats:
cleansed_question = cleansed_question.replace(at, '')
return cleansed_question.strip()
def | (question):
import re
twitter_rw = ['rt', 'dm']
cleansed_question = ' ' + question + ' '
for rw in twitter_rw:
cleansed_question = re.sub(r'\W(?:%s)\W' % rw, ' ', cleansed_question, flags=re.IGNORECASE)
return cleansed_question.strip()
def _remove_urls(question):
import re
cleansed_question = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' ', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_smilies(question):
import re
smileys = [': /+','>:[+', ':-(+', ':(+', ':-c', ':-<+', ':-[+', ':{', '>.>', ':o',
'<.<', '>.<', ':\\+', ':\\\'(+', ':\\\'(+', ':\'(+', ';\'(+', '>:\\', '>:/+',
':-/+', ':-.', ':\\+', '=/+', '=\\+', ':S', ':/+', ':$', '\*cry\*', '-_-',
';-)+', ':D', ';D', ';;)+', ':-x', ':x', '^-^', 'x)', 'è_é','haha','hehe', 'u_u',
'0:-)', ':o)', ':*', ':-*', '8-}', '=P~', '>:D<', '<:-P', ';))',
'xD', '>.<','u_u',
'>:]+', ':3', ':c', ':>', '=]', '8)+', '=)+', ':}+', ':^)+', '>:D', ':-D', '8-D', 'x-D', 'X-D', '=-D', '=D', '=-3', '8-)+',
'>;]+', '*-)+', '*)+', ';-]+', ';]+', ';^)+', '>:P', ':-P', ':P', 'X-P', ':-p', '=p', ':-Þ', ':Þ', ':-b', '>:o', '>:O', ':-O',
'°o°', '°O°', 'o_O', 'o.O', '8-0', 'o_O', 'x)+', '^^+', ':=)', '((?::|;|=)(?:-)?(?:\)|D|P))']
regex = '|'.join(['(\s%s\s)' % i for i in [re.escape(smiley) for smiley in smileys]])
cleansed_question = re.sub(regex, ' ', question, flags=re.MULTILINE|re.I)
return cleansed_question.strip()
def _remove_hashtags(question):
import re
question = question.strip()
hash_tags = [tag for tag in question.split() if tag.startswith('#')]
if not hash_tags:
return question
# check the position of the hash tags within the question and decide whether to clean and keep it
# or to remove it
import sys
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove all hash tags being after the last question mark
for tag in hash_tags:
tag_pos = question.find(tag)
if tag_pos > last_question_mark_pos:
# completely remove hashtag if they are after the last qm or just at the beginning of the tweet
question = question.replace(tag, ' ')
question = question.strip()
last_question_mark_pos = question.rfind('?')
else:
# just remove the hashtag sign
question = question.replace(tag, tag[1:])
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove extra spaces and return
pattern = re.compile(r'\s+')
question = re.sub(pattern, ' ', question.strip())
return question
def find_urls(content, scheme):
current = content.find(scheme)
while current != -1:
end = content.find(' ', current)
if end == -1:
end = content.find('\n', current)
if end == -1:
end = len(content)
while content[end-1] in [',', '.', ';', ':', '?']:
end -= 1
yield (current, end)
current = end
current = content.find(scheme, current)
class SmileyParser:
def __init__(self):
import re
western_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
':-)', ':)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}', ':^)',
':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D', '=-3', '=3', 'B^D',
':-))',
'>:[', ':-(', ':(', ':-c', ':c', ':-<', ':<', ':-[', ':[', ':{',
':-||', ':@', '>:(',
":'-(", ":'(",
":'-)", ":')",
'D:<', 'D:', 'D8', 'D;', 'D=', 'DX', 'v.v', "D-':",
'>:O', ':-O', ':O', '8-0',
':*', ':^*',
';-)', ';)', '*-)', '*)', ';-]', ';]', ';D', ';^)', ':-,',
'>:P', ':-P', ':P', 'X-P', 'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b',
'>:\\', '>:/', ':-/', ':-.', ':/', ':\\', '=/', '=\\', ':L', '=L', ':S', '>.<',
':|', ':-|',
':$',
':-X', ':X', ':-#', ':#',
'O:-)', '0:-3', '0:3', '0:-)', '0:)', '0;^)',
'>:)', '>;)', '>:-)',
'}:-)', '}:)', '3:-)', '3:)',
'o/\o', '^5', '>_>^', '^<_<',
'|;-)', '|-O',
':-&', ':&',
'#-)',
'%-)', '%)',
':-###..', ':###..',
'\\o/', '/o\\'
'*\\0/*',
# additional
':=)', ';=)', ';))', ':))', ';;)'
]
eastern_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
]
unicode_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
u'\u2639', u'\u263a', u'\u263b',
u'\u1f600', u'\u1f601', u'\u1f602', u'\u1f603', u'\u1f604', u'\u1f605', u'\u1f606', u'\u1f607',
u'\u1f608', u'\u1f609', u'\u1f60a', u'\u1f60b', u'\u1f60c', u'\u1f60d', u'\u1f60e', u'\u1f60f',
u'\u1f610', u'\u1f611', u'\u1f612', u'\u1f603', u'\u1f614', u'\u1f615', u'\u1f616', u'\u1f617',
u'\u1f618', u'\u1f619', u'\u1f61a', u'\u1f60b', u'\u1f61c', u'\u1f61d', u'\u1f61e', u'\u1f61f',
u'\u1f620', u'\u1f621', u'\u1f622', u'\u1f603', u'\u1f624', u'\u1f625', u'\u1f626', u'\u1f627',
u'\u1f628', u'\u1f629', u'\u1f62a', u'\u1f60b', u'\u1f62c', u'\u1f62d', u'\u1f62e', u'\u1f62f',
u'\u1f630', u'\u1f631', u'\u1f632', u'\u1f633', u'\u1f634', u'\u1f635', u'\u1f636', u'\u1f637',
u'\u1f638', u'\u1f639', u'\u1f63a', u'\u1f63b', u'\u1f63c', u'\u1f63d', u'\u1f63e', u'\u1f63f',
u'\u1f640', u'\u1f645', u'\u1f646', u'\u1f647',
u'\u1f648', u'\u1f649', u'\u1f64a', u'\u1f64b', u'\u1f64c', u'\u1f64d', u'\u1f64e', u'\u1f64f',
]
# try to generate rotated smileys
invert = {
# ltr to rtl
')' : '(',
'(' : ')',
']' : '[',
'[' : ']',
'{' : '}',
'}' : '{',
'<' : '>',
'>' : '<',
'/' : '\\',
'\\' : '/',
# does not change
'o' : 'o',
'x' : 'x',
'v' : 'v',
'0' : '0',
'8' : '8',
'|' : '|',
':' : ':',
';' : ';',
',' : ',',
"'" : "'",
'*' : '*',
'%' : '%'
}
def reverse_smiley(smiley):
output = []
for c in reversed(smiley):
if c in invert:
output.append(invert[c])
else:
return None
return ''.join(output)
smileys = western_smileys + \
filter(lambda x:not x is None, [reverse_smiley(smiley) for smiley in western_smileys]) + \
eastern_smileys + \
unicode_smileys
self._regex = re.compile('|'.join(['(?:%s)' % i for i in [re.escape(smiley) for smiley in smileys]]), re.I | re.U)
def parse(self, content):
for i in self._regex.finditer(content):
start, end = i.span()
yield (start, end)
def _tokenize(content):
import re
tokens = []
# urls
for scheme in ['http', 'https', 'ftp']:
for start,end in find_urls(content, '%s://' % scheme):
tokens.append((start, end, URL))
def untokenized(tokens):
current = 0
for start, end, type in sorted(tokens):
yield (current, start)
current = end
yield (current, -1)
# smileys
smileys = SmileyParser()
for start, end in untokenized(tokens):
for s,e in smileys.parse(content[start:end]):
tokens.append((start+s, start+e, SMILEY))
# dates
for start, end in untokenized(tokens):
for m in re.finditer(r'\b([0-9]{1,2}/[0-9]{1,2}(/[0-9]{2,4})?)\b', content[start:end]):
s, e = m.span()
tokens.append((start + s, start + e, DATE))
# Money
for start, end in untokenized(tokens):
#if content[start:end].find(u'€') != -1:
# print 'MONEY: ', content[start:end]
for m in re.finditer(ur'([0-9.,]+[£\$€])', content[start:end], re.U):
s, e = m.span()
tokens.append((start + s, start + e, MONEY))
# versions
for start, end in untokenized(tokens):
for m in re.finditer(r'\b((?:[0-9.])+\w?)\b', content[start:end], re.U):
s, e = m.span()
tokens.append((start + s, start + e, VERSION))
# mentions
for start, end in untokenized(tokens):
for m in re.finditer(r'(@[a-zA-Z0-9_]{1,15})\b', content[start:end]):
s, e = m.span()
tokens.append((start + s, start + e, MENTION))
# hashtags
for start, end in untokenized(tokens):
for m in re.finditer(r'(#[a-zA-Z0-9_]+)\b', content[start:end]):
s, e = m.span()
tokens.append((start + s, start + e, HASHTAG))
# whitespace-separated words
for start, end in untokenized(tokens):
for m in re.finditer(r'\b\w+\b', content[start:end], re.U):
s, e = m.span()
tokens.append((start + s, start + e, WORD))
return sorted(tokens)
def _clean(content):
content = content.lower()
content = _remove_smilies(content)
content = _remove_hashtags(content)
content = _remove_urls(content)
content = _remove_twitter_rw(content)
content = _remove_twitter_at(content)
content = _remove_emails(content)
content = _remove_phone_numbers(content)
content = _remove_duplicate_chars(content)
content = _remove_numbers(content)
content = _remove_uids(content)
tokens = _tokenize(content)
content = ' '.join([content[start:end] for start, end, type in tokens])
return content
def generate(content):
import collections
freq_dist = collections.defaultdict(int)
content = _clean(content)
if content == '':
return []
content_chars = [' ']
content_chars += list(content)
content_chars.append(' ')
ngrams = [x for x in _ngrams(content_chars, 1) if ''.join(list(x)) != ' ']
ngrams += _ngrams(content_chars, 2)
ngrams += _ngrams(content_chars, 3)
ngrams += _ngrams(content_chars, 4)
ngrams += _ngrams(content_chars, 5)
for ngram in ngrams:
freq_dist[''.join(list(ngram))] += 1
import operator
return sorted(freq_dist.iteritems(), key=operator.itemgetter(1), reverse=True)
| _remove_twitter_rw | identifier_name |
Ngrams.py | # -*- coding: utf-8
URL = 1
SMILEY = 2
DATE = 3
VERSION = 4
MENTION = 5
HASHTAG = 6
WORD = 7
MONEY = 8
def _ngrams(input_list, n):
return zip(*[input_list[i:] for i in range(n)])
def _remove_numbers(question):
import re
question = re.sub('[^\w]\d+[^\w]', ' ', question)
question = re.sub('[^\w]\d+$', ' ', question)
question = re.sub('^\d+[^\w]', ' ', question)
question = re.sub('^\d+$', ' ', question)
return question
def _remove_uids(question):
import re
question = re.sub('[^\w][a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*[^\w]', ' ', question)
question = re.sub('[^\w][a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*$', ' ', question)
question = re.sub('^[a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*[^\w]', ' ', question)
question = re.sub('^[a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*$', ' ', question)
return question
def _remove_duplicate_chars(question):
import re
question = question.replace('...', '. ')
return re.sub(r'([a-zA-Z\.\?!;\'\",\:\>\<])\1{2,}', r'\1', question)
def _remove_phone_numbers(question):
import re
cleansed_question = re.sub(r'(\d{2}\W*\d{2}\W*\d{2}\W*\d{2}\W*\d{2})', ' ', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_emails(question):
import re
cleansed_question = re.sub(r'([A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]+)', '', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_twitter_at(question):
ats = [at for at in question.split() if at.startswith('@')]
cleansed_question = question
for at in ats:
cleansed_question = cleansed_question.replace(at, '')
return cleansed_question.strip()
def _remove_twitter_rw(question):
import re
twitter_rw = ['rt', 'dm']
cleansed_question = ' ' + question + ' '
for rw in twitter_rw:
cleansed_question = re.sub(r'\W(?:%s)\W' % rw, ' ', cleansed_question, flags=re.IGNORECASE)
return cleansed_question.strip()
def _remove_urls(question):
import re
cleansed_question = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' ', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_smilies(question):
import re
smileys = [': /+','>:[+', ':-(+', ':(+', ':-c', ':-<+', ':-[+', ':{', '>.>', ':o',
'<.<', '>.<', ':\\+', ':\\\'(+', ':\\\'(+', ':\'(+', ';\'(+', '>:\\', '>:/+',
':-/+', ':-.', ':\\+', '=/+', '=\\+', ':S', ':/+', ':$', '\*cry\*', '-_-',
';-)+', ':D', ';D', ';;)+', ':-x', ':x', '^-^', 'x)', 'è_é','haha','hehe', 'u_u',
'0:-)', ':o)', ':*', ':-*', '8-}', '=P~', '>:D<', '<:-P', ';))',
'xD', '>.<','u_u',
'>:]+', ':3', ':c', ':>', '=]', '8)+', '=)+', ':}+', ':^)+', '>:D', ':-D', '8-D', 'x-D', 'X-D', '=-D', '=D', '=-3', '8-)+',
'>;]+', '*-)+', '*)+', ';-]+', ';]+', ';^)+', '>:P', ':-P', ':P', 'X-P', ':-p', '=p', ':-Þ', ':Þ', ':-b', '>:o', '>:O', ':-O',
'°o°', '°O°', 'o_O', 'o.O', '8-0', 'o_O', 'x)+', '^^+', ':=)', '((?::|;|=)(?:-)?(?:\)|D|P))']
regex = '|'.join(['(\s%s\s)' % i for i in [re.escape(smiley) for smiley in smileys]])
cleansed_question = re.sub(regex, ' ', question, flags=re.MULTILINE|re.I)
return cleansed_question.strip()
def _remove_hashtags(question):
import re
question = question.strip()
hash_tags = [tag for tag in question.split() if tag.startswith('#')]
if not hash_tags:
return question
# check the position of the hash tags within the question and decide whether to clean and keep it
# or to remove it
import sys
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove all hash tags being after the last question mark
for tag in hash_tags:
tag_pos = question.find(tag)
if tag_pos > last_question_mark_pos:
# completely remove hashtag if they are after the last qm or just at the beginning of the tweet
question = question.replace(tag, ' ')
question = question.strip()
last_question_mark_pos = question.rfind('?')
else:
# just remove the hashtag sign
question = question.replace(tag, tag[1:])
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove extra spaces and return
pattern = re.compile(r'\s+')
question = re.sub(pattern, ' ', question.strip())
return question
def find_urls(content, scheme):
current = content.find(scheme)
while current != -1:
end = content.find(' ', current)
if end == -1:
end = content.find('\n', current)
if end == -1:
end = len(content)
while content[end-1] in [',', '.', ';', ':', '?']:
end -= 1
yield (current, end)
current = end
current = content.find(scheme, current)
class SmileyParser:
def __init__(self):
import re
western_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
':-)', ':)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}', ':^)',
':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D', '=-3', '=3', 'B^D',
':-))',
'>:[', ':-(', ':(', ':-c', ':c', ':-<', ':<', ':-[', ':[', ':{',
':-||', ':@', '>:(',
":'-(", ":'(",
":'-)", ":')",
'D:<', 'D:', 'D8', 'D;', 'D=', 'DX', 'v.v', "D-':",
'>:O', ':-O', ':O', '8-0',
':*', ':^*',
';-)', ';)', '*-)', '*)', ';-]', ';]', ';D', ';^)', ':-,',
'>:P', ':-P', ':P', 'X-P', 'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b',
'>:\\', '>:/', ':-/', ':-.', ':/', ':\\', '=/', '=\\', ':L', '=L', ':S', '>.<',
':|', ':-|',
':$',
':-X', ':X', ':-#', ':#',
'O:-)', '0:-3', '0:3', '0:-)', '0:)', '0;^)',
'>:)', '>;)', '>:-)',
'}:-)', '}:)', '3:-)', '3:)',
'o/\o', '^5', '>_>^', '^<_<',
'|;-)', '|-O',
':-&', ':&',
'#-)',
'%-)', '%)',
':-###..', ':###..',
'\\o/', '/o\\'
'*\\0/*',
# additional
':=)', ';=)', ';))', ':))', ';;)'
]
eastern_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
]
unicode_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
u'\u2639', u'\u263a', u'\u263b',
u'\u1f600', u'\u1f601', u'\u1f602', u'\u1f603', u'\u1f604', u'\u1f605', u'\u1f606', u'\u1f607',
u'\u1f608', u'\u1f609', u'\u1f60a', u'\u1f60b', u'\u1f60c', u'\u1f60d', u'\u1f60e', u'\u1f60f',
u'\u1f610', u'\u1f611', u'\u1f612', u'\u1f603', u'\u1f614', u'\u1f615', u'\u1f616', u'\u1f617',
u'\u1f618', u'\u1f619', u'\u1f61a', u'\u1f60b', u'\u1f61c', u'\u1f61d', u'\u1f61e', u'\u1f61f',
u'\u1f620', u'\u1f621', u'\u1f622', u'\u1f603', u'\u1f624', u'\u1f625', u'\u1f626', u'\u1f627',
u'\u1f628', u'\u1f629', u'\u1f62a', u'\u1f60b', u'\u1f62c', u'\u1f62d', u'\u1f62e', u'\u1f62f',
u'\u1f630', u'\u1f631', u'\u1f632', u'\u1f633', u'\u1f634', u'\u1f635', u'\u1f636', u'\u1f637',
u'\u1f638', u'\u1f639', u'\u1f63a', u'\u1f63b', u'\u1f63c', u'\u1f63d', u'\u1f63e', u'\u1f63f',
u'\u1f640', u'\u1f645', u'\u1f646', u'\u1f647',
u'\u1f648', u'\u1f649', u'\u1f64a', u'\u1f64b', u'\u1f64c', u'\u1f64d', u'\u1f64e', u'\u1f64f',
]
# try to generate rotated smileys
invert = {
# ltr to rtl
')' : '(',
'(' : ')',
']' : '[',
'[' : ']',
'{' : '}',
'}' : '{',
'<' : '>',
'>' : '<',
'/' : '\\',
'\\' : '/',
# does not change
'o' : 'o',
'x' : 'x',
'v' : 'v',
'0' : '0',
'8' : '8',
'|' : '|',
':' : ':',
';' : ';',
',' : ',',
"'" : "'",
'*' : '*',
'%' : '%'
}
def reverse_smiley(smiley):
output = []
for c in reversed(smiley):
if c in invert: | smileys = western_smileys + \
filter(lambda x:not x is None, [reverse_smiley(smiley) for smiley in western_smileys]) + \
eastern_smileys + \
unicode_smileys
self._regex = re.compile('|'.join(['(?:%s)' % i for i in [re.escape(smiley) for smiley in smileys]]), re.I | re.U)
def parse(self, content):
for i in self._regex.finditer(content):
start, end = i.span()
yield (start, end)
def _tokenize(content):
import re
tokens = []
# urls
for scheme in ['http', 'https', 'ftp']:
for start,end in find_urls(content, '%s://' % scheme):
tokens.append((start, end, URL))
def untokenized(tokens):
current = 0
for start, end, type in sorted(tokens):
yield (current, start)
current = end
yield (current, -1)
# smileys
smileys = SmileyParser()
for start, end in untokenized(tokens):
for s,e in smileys.parse(content[start:end]):
tokens.append((start+s, start+e, SMILEY))
# dates
for start, end in untokenized(tokens):
for m in re.finditer(r'\b([0-9]{1,2}/[0-9]{1,2}(/[0-9]{2,4})?)\b', content[start:end]):
s, e = m.span()
tokens.append((start + s, start + e, DATE))
# Money
for start, end in untokenized(tokens):
#if content[start:end].find(u'€') != -1:
# print 'MONEY: ', content[start:end]
for m in re.finditer(ur'([0-9.,]+[£\$€])', content[start:end], re.U):
s, e = m.span()
tokens.append((start + s, start + e, MONEY))
# versions
for start, end in untokenized(tokens):
for m in re.finditer(r'\b((?:[0-9.])+\w?)\b', content[start:end], re.U):
s, e = m.span()
tokens.append((start + s, start + e, VERSION))
# mentions
for start, end in untokenized(tokens):
for m in re.finditer(r'(@[a-zA-Z0-9_]{1,15})\b', content[start:end]):
s, e = m.span()
tokens.append((start + s, start + e, MENTION))
# hashtags
for start, end in untokenized(tokens):
for m in re.finditer(r'(#[a-zA-Z0-9_]+)\b', content[start:end]):
s, e = m.span()
tokens.append((start + s, start + e, HASHTAG))
# whitespace-separated words
for start, end in untokenized(tokens):
for m in re.finditer(r'\b\w+\b', content[start:end], re.U):
s, e = m.span()
tokens.append((start + s, start + e, WORD))
return sorted(tokens)
def _clean(content):
content = content.lower()
content = _remove_smilies(content)
content = _remove_hashtags(content)
content = _remove_urls(content)
content = _remove_twitter_rw(content)
content = _remove_twitter_at(content)
content = _remove_emails(content)
content = _remove_phone_numbers(content)
content = _remove_duplicate_chars(content)
content = _remove_numbers(content)
content = _remove_uids(content)
tokens = _tokenize(content)
content = ' '.join([content[start:end] for start, end, type in tokens])
return content
def generate(content):
import collections
freq_dist = collections.defaultdict(int)
content = _clean(content)
if content == '':
return []
content_chars = [' ']
content_chars += list(content)
content_chars.append(' ')
ngrams = [x for x in _ngrams(content_chars, 1) if ''.join(list(x)) != ' ']
ngrams += _ngrams(content_chars, 2)
ngrams += _ngrams(content_chars, 3)
ngrams += _ngrams(content_chars, 4)
ngrams += _ngrams(content_chars, 5)
for ngram in ngrams:
freq_dist[''.join(list(ngram))] += 1
import operator
return sorted(freq_dist.iteritems(), key=operator.itemgetter(1), reverse=True) | output.append(invert[c])
else:
return None
return ''.join(output)
| random_line_split |
Ngrams.py | # -*- coding: utf-8
URL = 1
SMILEY = 2
DATE = 3
VERSION = 4
MENTION = 5
HASHTAG = 6
WORD = 7
MONEY = 8
def _ngrams(input_list, n):
return zip(*[input_list[i:] for i in range(n)])
def _remove_numbers(question):
import re
question = re.sub('[^\w]\d+[^\w]', ' ', question)
question = re.sub('[^\w]\d+$', ' ', question)
question = re.sub('^\d+[^\w]', ' ', question)
question = re.sub('^\d+$', ' ', question)
return question
def _remove_uids(question):
import re
question = re.sub('[^\w][a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*[^\w]', ' ', question)
question = re.sub('[^\w][a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*$', ' ', question)
question = re.sub('^[a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*[^\w]', ' ', question)
question = re.sub('^[a-zA-Z0-9]*([0-9]+[a-zA-Z]+)|([a-zA-Z]+[0-9]+)[a-zA-Z0-9]*$', ' ', question)
return question
def _remove_duplicate_chars(question):
import re
question = question.replace('...', '. ')
return re.sub(r'([a-zA-Z\.\?!;\'\",\:\>\<])\1{2,}', r'\1', question)
def _remove_phone_numbers(question):
import re
cleansed_question = re.sub(r'(\d{2}\W*\d{2}\W*\d{2}\W*\d{2}\W*\d{2})', ' ', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_emails(question):
import re
cleansed_question = re.sub(r'([A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]+)', '', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_twitter_at(question):
ats = [at for at in question.split() if at.startswith('@')]
cleansed_question = question
for at in ats:
cleansed_question = cleansed_question.replace(at, '')
return cleansed_question.strip()
def _remove_twitter_rw(question):
import re
twitter_rw = ['rt', 'dm']
cleansed_question = ' ' + question + ' '
for rw in twitter_rw:
cleansed_question = re.sub(r'\W(?:%s)\W' % rw, ' ', cleansed_question, flags=re.IGNORECASE)
return cleansed_question.strip()
def _remove_urls(question):
import re
cleansed_question = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' ', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_smilies(question):
import re
smileys = [': /+','>:[+', ':-(+', ':(+', ':-c', ':-<+', ':-[+', ':{', '>.>', ':o',
'<.<', '>.<', ':\\+', ':\\\'(+', ':\\\'(+', ':\'(+', ';\'(+', '>:\\', '>:/+',
':-/+', ':-.', ':\\+', '=/+', '=\\+', ':S', ':/+', ':$', '\*cry\*', '-_-',
';-)+', ':D', ';D', ';;)+', ':-x', ':x', '^-^', 'x)', 'è_é','haha','hehe', 'u_u',
'0:-)', ':o)', ':*', ':-*', '8-}', '=P~', '>:D<', '<:-P', ';))',
'xD', '>.<','u_u',
'>:]+', ':3', ':c', ':>', '=]', '8)+', '=)+', ':}+', ':^)+', '>:D', ':-D', '8-D', 'x-D', 'X-D', '=-D', '=D', '=-3', '8-)+',
'>;]+', '*-)+', '*)+', ';-]+', ';]+', ';^)+', '>:P', ':-P', ':P', 'X-P', ':-p', '=p', ':-Þ', ':Þ', ':-b', '>:o', '>:O', ':-O',
'°o°', '°O°', 'o_O', 'o.O', '8-0', 'o_O', 'x)+', '^^+', ':=)', '((?::|;|=)(?:-)?(?:\)|D|P))']
regex = '|'.join(['(\s%s\s)' % i for i in [re.escape(smiley) for smiley in smileys]])
cleansed_question = re.sub(regex, ' ', question, flags=re.MULTILINE|re.I)
return cleansed_question.strip()
def _remove_hashtags(question):
import re
question = question.strip()
hash_tags = [tag for tag in question.split() if tag.startswith('#')]
if not hash_tags:
return question
# check the position of the hash tags within the question and decide whether to clean and keep it
# or to remove it
import sys
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove all hash tags being after the last question mark
for tag in hash_tags:
tag_pos = question.find(tag)
if tag_pos > last_question_mark_pos:
# completely remove hashtag if they are after the last qm or just at the beginning of the tweet
question = question.replace(tag, ' ')
question = question.strip()
last_question_mark_pos = question.rfind('?')
else:
# just remove the hashtag sign
question = question.replace(tag, tag[1:])
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove extra spaces and return
pattern = re.compile(r'\s+')
question = re.sub(pattern, ' ', question.strip())
return question
def find_urls(content, scheme):
current = content.find(scheme)
while current != -1:
end = content.find(' ', current)
if end == -1:
end = content.find('\n', current)
if end == -1:
end = len(content)
while content[end-1] in [',', '.', ';', ':', '?']:
end -= 1
yield (current, end)
current = end
current = content.find(scheme, current)
class SmileyParser:
def __init__(self):
import re
western_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
':-)', ':)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}', ':^)',
':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D', '=-3', '=3', 'B^D',
':-))',
'>:[', ':-(', ':(', ':-c', ':c', ':-<', ':<', ':-[', ':[', ':{',
':-||', ':@', '>:(',
":'-(", ":'(",
":'-)", ":')",
'D:<', 'D:', 'D8', 'D;', 'D=', 'DX', 'v.v', "D-':",
'>:O', ':-O', ':O', '8-0',
':*', ':^*',
';-)', ';)', '*-)', '*)', ';-]', ';]', ';D', ';^)', ':-,',
'>:P', ':-P', ':P', 'X-P', 'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b',
'>:\\', '>:/', ':-/', ':-.', ':/', ':\\', '=/', '=\\', ':L', '=L', ':S', '>.<',
':|', ':-|',
':$',
':-X', ':X', ':-#', ':#',
'O:-)', '0:-3', '0:3', '0:-)', '0:)', '0;^)',
'>:)', '>;)', '>:-)',
'}:-)', '}:)', '3:-)', '3:)',
'o/\o', '^5', '>_>^', '^<_<',
'|;-)', '|-O',
':-&', ':&',
'#-)',
'%-)', '%)',
':-###..', ':###..',
'\\o/', '/o\\'
'*\\0/*',
# additional
':=)', ';=)', ';))', ':))', ';;)'
]
eastern_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
]
unicode_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
u'\u2639', u'\u263a', u'\u263b',
u'\u1f600', u'\u1f601', u'\u1f602', u'\u1f603', u'\u1f604', u'\u1f605', u'\u1f606', u'\u1f607',
u'\u1f608', u'\u1f609', u'\u1f60a', u'\u1f60b', u'\u1f60c', u'\u1f60d', u'\u1f60e', u'\u1f60f',
u'\u1f610', u'\u1f611', u'\u1f612', u'\u1f603', u'\u1f614', u'\u1f615', u'\u1f616', u'\u1f617',
u'\u1f618', u'\u1f619', u'\u1f61a', u'\u1f60b', u'\u1f61c', u'\u1f61d', u'\u1f61e', u'\u1f61f',
u'\u1f620', u'\u1f621', u'\u1f622', u'\u1f603', u'\u1f624', u'\u1f625', u'\u1f626', u'\u1f627',
u'\u1f628', u'\u1f629', u'\u1f62a', u'\u1f60b', u'\u1f62c', u'\u1f62d', u'\u1f62e', u'\u1f62f',
u'\u1f630', u'\u1f631', u'\u1f632', u'\u1f633', u'\u1f634', u'\u1f635', u'\u1f636', u'\u1f637',
u'\u1f638', u'\u1f639', u'\u1f63a', u'\u1f63b', u'\u1f63c', u'\u1f63d', u'\u1f63e', u'\u1f63f',
u'\u1f640', u'\u1f645', u'\u1f646', u'\u1f647',
u'\u1f648', u'\u1f649', u'\u1f64a', u'\u1f64b', u'\u1f64c', u'\u1f64d', u'\u1f64e', u'\u1f64f',
]
# try to generate rotated smileys
invert = {
# ltr to rtl
')' : '(',
'(' : ')',
']' : '[',
'[' : ']',
'{' : '}',
'}' : '{',
'<' : '>',
'>' : '<',
'/' : '\\',
'\\' : '/',
# does not change
'o' : 'o',
'x' : 'x',
'v' : 'v',
'0' : '0',
'8' : '8',
'|' : '|',
':' : ':',
';' : ';',
',' : ',',
"'" : "'",
'*' : '*',
'%' : '%'
}
def reverse_smiley(smiley):
output = []
for c in reversed(smiley):
if c in invert:
output.append(invert[c])
else:
return None
return ''.join(output)
smileys = western_smileys + \
filter(lambda x:not x is None, [reverse_smiley(smiley) for smiley in western_smileys]) + \
eastern_smileys + \
unicode_smileys
self._regex = re.compile('|'.join(['(?:%s)' % i for i in [re.escape(smiley) for smiley in smileys]]), re.I | re.U)
def parse(self, content):
for i in | tokenize(content):
import re
tokens = []
# urls
for scheme in ['http', 'https', 'ftp']:
for start,end in find_urls(content, '%s://' % scheme):
tokens.append((start, end, URL))
def untokenized(tokens):
current = 0
for start, end, type in sorted(tokens):
yield (current, start)
current = end
yield (current, -1)
# smileys
smileys = SmileyParser()
for start, end in untokenized(tokens):
for s,e in smileys.parse(content[start:end]):
tokens.append((start+s, start+e, SMILEY))
# dates
for start, end in untokenized(tokens):
for m in re.finditer(r'\b([0-9]{1,2}/[0-9]{1,2}(/[0-9]{2,4})?)\b', content[start:end]):
s, e = m.span()
tokens.append((start + s, start + e, DATE))
# Money
for start, end in untokenized(tokens):
#if content[start:end].find(u'€') != -1:
# print 'MONEY: ', content[start:end]
for m in re.finditer(ur'([0-9.,]+[£\$€])', content[start:end], re.U):
s, e = m.span()
tokens.append((start + s, start + e, MONEY))
# versions
for start, end in untokenized(tokens):
for m in re.finditer(r'\b((?:[0-9.])+\w?)\b', content[start:end], re.U):
s, e = m.span()
tokens.append((start + s, start + e, VERSION))
# mentions
for start, end in untokenized(tokens):
for m in re.finditer(r'(@[a-zA-Z0-9_]{1,15})\b', content[start:end]):
s, e = m.span()
tokens.append((start + s, start + e, MENTION))
# hashtags
for start, end in untokenized(tokens):
for m in re.finditer(r'(#[a-zA-Z0-9_]+)\b', content[start:end]):
s, e = m.span()
tokens.append((start + s, start + e, HASHTAG))
# whitespace-separated words
for start, end in untokenized(tokens):
for m in re.finditer(r'\b\w+\b', content[start:end], re.U):
s, e = m.span()
tokens.append((start + s, start + e, WORD))
return sorted(tokens)
def _clean(content):
content = content.lower()
content = _remove_smilies(content)
content = _remove_hashtags(content)
content = _remove_urls(content)
content = _remove_twitter_rw(content)
content = _remove_twitter_at(content)
content = _remove_emails(content)
content = _remove_phone_numbers(content)
content = _remove_duplicate_chars(content)
content = _remove_numbers(content)
content = _remove_uids(content)
tokens = _tokenize(content)
content = ' '.join([content[start:end] for start, end, type in tokens])
return content
def generate(content):
import collections
freq_dist = collections.defaultdict(int)
content = _clean(content)
if content == '':
return []
content_chars = [' ']
content_chars += list(content)
content_chars.append(' ')
ngrams = [x for x in _ngrams(content_chars, 1) if ''.join(list(x)) != ' ']
ngrams += _ngrams(content_chars, 2)
ngrams += _ngrams(content_chars, 3)
ngrams += _ngrams(content_chars, 4)
ngrams += _ngrams(content_chars, 5)
for ngram in ngrams:
freq_dist[''.join(list(ngram))] += 1
import operator
return sorted(freq_dist.iteritems(), key=operator.itemgetter(1), reverse=True)
| self._regex.finditer(content):
start, end = i.span()
yield (start, end)
def _ | identifier_body |
index.d.ts | // Type definitions for pulsar-client 1.2
// Project: https://github.com/apache/pulsar-client-node
// Definitions by: Brian Walendzinski <https://github.com/bwalendz>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 3.0
/// <reference types="node" />
export interface MessageProperties {
[key: string ]: string;
}
export type MessageRoutingModes =
'RoundRobinPartition' |
'SinglePartition' |
'CustomPartition';
export type HashingScheme =
'BoostHash' |
'JavaStringHash' |
'Murmur3_32Hash';
export type CompressionType =
'LZ4' |
'Zlib';
export type SubscriptionType =
'Exclusive' |
'Shared' |
'Failover' |
'KeyShared';
export class | {
constructor(authTlsOpts: { certificatePath: string; privateKeyPath: string });
certificatePath: string;
privateKeyPath: string;
}
export class AuthenticationToken {
constructor(authTokenOpts: { token: string });
token: string;
}
export interface ClientOpts {
/**
* The connection URL for the Pulsar cluster.
*/
serviceUrl: string;
/**
* Configure the authentication provider.
* Default: No Authentication
*/
authentication?: AuthenticationTls | AuthenticationToken;
/**
* The timeout for Node.js client operations (creating producers, subscribing to and unsubscribing from topics).
* Retries will occur until this threshold is reached, at which point the operation will fail.
* Default: 30
*/
operationTimeoutSeconds?: number;
/**
* The number of threads to use for handling connections to Pulsar brokers.
* Default: 1
*/
ioThreads?: number;
/**
* The number of threads used by message listeners (consumers and readers).
* Default: 1
*/
messageListenerThreads?: number;
/**
* The number of concurrent lookup requests that can be sent on each broker connection.
* Setting a maximum helps to keep from overloading brokers.
* You should set values over the default only if the client needs to produce and/or subscribe to thousands of Pulsar topics.
* Default: 50000
*/
concurrentLookupRequest?: number;
/**
* The file path for the trusted TLS certificate.
*/
tlsTrustCertsFilePath?: string;
/**
* The boolean value of setup whether to enable TLS hostname verification.
* Default: false
*/
tlsValidateHostname?: boolean;
/**
* The boolean value of setup whether the Pulsar client accepts untrusted TLS certificate from broker.
* Default: false
*/
tlsAllowInsecureConnection?: boolean;
/**
* Interval between each stat info. Stats is activated with positive statsInterval. The value should be set to >= 1 second.
* Default: 600
*/
statsIntervalInSeconds?: number;
}
export class Client {
constructor(opts: ClientOpts);
createProducer(data: ProducerOpts): Promise<Producer>;
createReader(data: ReaderOpts): Promise<Reader>;
subscribe(data: SubscribeOpts): Promise<Consumer>;
close(): Promise<null>;
}
export class MessageId {
/**
* MessageId representing the earliest, or oldest available message stored in the topic.
*/
static earliest(): MessageId;
/**
* MessageId representing the latest, or last published message in the topic.
*/
static latest(): MessageId;
/**
* Deserialize a message id object from a Buffer.
* @param data
*/
static deserialize(data: Buffer): MessageId;
/**
* Serialize the message id into a Buffer for storing.
*/
serialize(): Buffer;
/**
* Get message id as String.
*/
toString(): string;
}
export class Message {
/**
* Getter method of topic name.
*/
getTopicName(): string;
/**
* Getter method of properties.
*/
getProperties(): MessageProperties;
/**
* Getter method of message data.
*/
getData(): Buffer;
/**
* Getter method of message id object.
*/
getMessageId(): MessageId;
/**
* Getter method of publish timestamp.
*/
getPublishTimestamp(): number;
/**
* Getter method of event timestamp.
*/
getEventTimestamp(): number;
/**
* Getter method of partition key.
*/
getPartitionKey(): string;
}
export interface ProducerOpts {
/**
* The Pulsar topic to which the producer will publish messages.
*/
topic: string;
/**
* A name for the producer. If you do not explicitly assign a name, Pulsar will automatically generate a globally unique name.
* If you choose to explicitly assign a name, it will need to be unique across all Pulsar clusters, otherwise the creation operation will throw an error.
*/
producerName?: string;
/**
* When publishing a message to a topic, the producer will wait for an acknowledgment from the responsible Pulsar broker.
* If a message is not acknowledged within the threshold set by this parameter, an error will be thrown. If you set sendTimeoutMs to -1,
* the timeout will be set to infinity (and thus removed). Removing the send timeout is recommended when using Pulsar's message de-duplication feature.
* Default: 30000
*/
sendTimeoutMs?: number;
/**
* The initial sequence ID of the message. When producer send message, add sequence ID to message. The ID is increased each time to send.
*/
initialSequenceId?: number;
/**
* The maximum size of the queue holding pending messages (i.e. messages waiting to receive an acknowledgment from the broker).
* By default, when the queue is full all calls to the send method will fail unless blockIfQueueFull is set to true.
* Default: 1000
*/
maxPendingMessages?: number;
/**
* The maximum size of the sum of partition's pending queue.
* Default: 50000
*/
maxPendingMessagesAcrossPartitions?: number;
/**
* If set to true, the producer's send method will wait when the outgoing message queue is full rather than failing and throwing an error
* (the size of that queue is dictated by the maxPendingMessages parameter); if set to false (the default),send operations will fail and
* throw a error when the queue is full.
* Default: false
*/
blockIfQueueFull?: boolean;
/**
* The message routing logic (for producers on partitioned topics). This logic is applied only when no key is set on messages. The available
* options are: round robin (RoundRobinDistribution), or publishing all messages to a single partition (UseSinglePartition).
* Default: UseSinglePartition
*/
messageRoutingMode?: MessageRoutingModes;
/**
* The hashing function that determines the partition on which a particular message is published (partitioned topics only).
* The available options are: JavaStringHash (the equivalent of String.hashCode() in Java), Murmur3_32Hash (applies the Murmur3 hashing function),
* or BoostHash (applies the hashing function from C++'s Boost library).
* Default: BoostHash
*/
hashingScheme?: HashingScheme;
/**
* The message data compression type used by the producer. The available options are LZ4, and Zlib.
* Default: No Compression
*/
compressionType?: CompressionType;
/**
* If set to true, the producer send message as batch.
* Default: true
*/
batchingEnabled?: boolean;
/**
* The maximum time of delay sending message in batching.
* Default: 10
*/
batchingMaxPublishDelayMs?: number;
/**
* The maximum size of sending message in each time of batching.
* Default: 1000
*/
batchingMaxMessages?: number;
/**
* The metadata of producer.
*/
properties?: MessageProperties;
}
export interface ProducerMessage {
/**
* The actual data payload of the message.
*/
data: Buffer;
/**
* A Object for any application-specific metadata attached to the message.
*/
properties?: MessageProperties;
/**
* The timestamp associated with the message.
*/
eventTimestamp?: number;
/**
* The sequence ID of the message.
*/
sequenceId?: number;
/**
* The optional key associated with the message (particularly useful for things like topic compaction).
*/
partitionKey?: string;
/**
* The clusters to which this message will be replicated. Pulsar brokers handle message replication automatically;
* you should only change this setting if you want to override the broker default.
*/
replicationClusters?: string[];
}
export class Producer {
/**
* Publishes a message to the producer's topic. When the message is successfully acknowledged by the Pulsar broker,
* or an error will be thrown, the Promise object run executor function.
* @param message Message to be published.
*/
send(message: ProducerMessage): Promise<null>;
/**
* Sends message from send queue to Pulser broker. When the message is successfully acknowledged by the Pulsar broker,
* or an error will be thrown, the Promise object run executor function.
*/
flush(): Promise<null>;
/**
* Closes the producer and releases all resources allocated to it. If close() is called then no more messages will be accepted from the publisher.
* This method will return Promise object, and when all pending publish requests have been persisted by Pulsar then run executor function.
* If an error is thrown, no pending writes will be retried.
*/
close(): Promise<null>;
}
export interface SubscribeOpts {
/**
* The Pulsar topic on which the consumer will establish a subscription and listen for messages.
*/
topic: string;
/**
* The subscription name for this consumer.
*/
subscription: string;
/**
* Available options are Exclusive, Shared, and Failover.
* Default: Exclusive
*/
subscriptionType?: SubscriptionType;
/**
* Acknowledge timeout in milliseconds.
* Default: 0
*/
ackTimeoutMs?: number;
/**
* Sets the size of the consumer's receiver queue, i.e. the number of messages that can be accumulated by the consumer before the application calls receive.
* A value higher than the default could increase consumer throughput, though at the expense of more memory utilization.
* Default: 1000
*/
receiverQueueSize?: number;
/**
* Set the max total receiver queue size across partitions.
* This setting will be used to reduce the receiver queue size for individual partitions if the total exceeds this value.
* Default: 50000
*/
receiverQueueSizeAcrossPartitions?: number;
/**
* The name of consumer. Currently, failover mode uses consumer name for ordering.
*/
consumerName?: string;
/**
* The metadata of consumer.
*/
properties?: MessageProperties;
/**
* The message listener of consumer.
*/
listener?: (message: Message, consumer: Consumer) => void;
}
export class Consumer {
/**
* Receives a single message from the topic with optional specific timeout in milliseconds.
* @param timeout Wait timeout in milliseconds.
*/
receive(timeout?: number): Promise<Message>;
/**
* Acknowledges a message to the Pulsar broker by message object.
* @param message Message to acknowledge.
*/
acknowledge(message: Message): void;
/**
* Acknowledges a message to the Pulsar broker by message ID object.
* @param messageId Message ID to acknowledge.
*/
acknowledgeId(messageId: MessageId): void;
/**
* Negatively acknowledges a message to the Pulsar broker by message object.
* @param message Message to acknowledge.
*/
negativeAcknowledge(message: Message): void;
/**
* Negatively acknowledges a message to the Pulsar broker by message ID object.
* @param messageId Message ID to acknowledge.
*/
negativeAcknowledgeId(messageId: MessageId): void;
/**
* Acknowledges all the messages in the stream, up to and including the specified message.
* The acknowledgeCumulative method will return void, and send the ack to the broker asynchronously.
* After that, the messages will not be redelivered to the consumer. Cumulative acking can not be used with a shared subscription type.
* @param message Message to acknowledge cumulatively.
*/
acknowledgeCumulative(message: Message): void;
/**
* Acknowledges all the messages in the stream, up to and including the specified message ID.
* @param messageId Message ID to acknowledge cumulatively.
*/
acknowledgeCumulativeId(messageId: MessageId): void;
/**
* Closes the consumer, disabling its ability to receive messages from the broker.
*/
close(): Promise<null>;
}
export interface ReaderOpts {
/**
* The Pulsar topic on which the reader will establish a subscription and listen for messages.
*/
topic: string;
/**
* The initial reader position, i.e. the message at which the reader begins processing messages.
* The options are Pulsar.MessageId.earliest (the earliest available message on the topic), Pulsar.MessageId.latest (the latest available message on the topic),
* or a message ID object for a position that is not earliest or latest.
*/
startMessageId: MessageId;
/**
* Sets the size of the reader's receiver queue, i.e. the number of messages that can be accumulated by the reader before the application calls readNext.
* A value higher than the default of 1000 could increase reader throughput, though at the expense of more memory utilization.
* Default 1000
*/
receiverQueueSize?: number;
/**
* The name of the reader.
*/
readerName?: string;
/**
* The subscription role prefix.
*/
subscriptionRolePrefix?: string;
}
export class Reader {
/**
* Receives the next message on the topic (analogous to the receive method for consumers)
* with optional specific timeout in milliseconds.
* @param timeout Wait timeout in milliseconds.
*/
readNext(timeout?: number): Promise<Message>;
/**
* Return whether Broker has next message in target topic.
*/
hasNext(): boolean;
/**
* Closes the reader, disabling its ability to receive messages from the broker.
*/
close(): Promise<null>;
}
| AuthenticationTls | identifier_name |
index.d.ts | // Type definitions for pulsar-client 1.2
// Project: https://github.com/apache/pulsar-client-node
// Definitions by: Brian Walendzinski <https://github.com/bwalendz>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 3.0
/// <reference types="node" />
export interface MessageProperties {
[key: string ]: string;
}
export type MessageRoutingModes =
'RoundRobinPartition' |
'SinglePartition' |
'CustomPartition';
export type HashingScheme =
'BoostHash' |
'JavaStringHash' |
'Murmur3_32Hash';
export type CompressionType =
'LZ4' |
'Zlib';
export type SubscriptionType =
'Exclusive' |
'Shared' |
'Failover' |
'KeyShared';
export class AuthenticationTls {
constructor(authTlsOpts: { certificatePath: string; privateKeyPath: string });
certificatePath: string;
privateKeyPath: string;
}
export class AuthenticationToken {
constructor(authTokenOpts: { token: string });
token: string;
}
export interface ClientOpts {
/**
* The connection URL for the Pulsar cluster.
*/
serviceUrl: string;
/**
* Configure the authentication provider.
* Default: No Authentication
*/
authentication?: AuthenticationTls | AuthenticationToken;
/**
* The timeout for Node.js client operations (creating producers, subscribing to and unsubscribing from topics).
* Retries will occur until this threshold is reached, at which point the operation will fail.
* Default: 30
*/
operationTimeoutSeconds?: number;
/**
* The number of threads to use for handling connections to Pulsar brokers.
* Default: 1
*/
ioThreads?: number;
/**
* The number of threads used by message listeners (consumers and readers).
* Default: 1
*/
messageListenerThreads?: number;
/**
* The number of concurrent lookup requests that can be sent on each broker connection.
* Setting a maximum helps to keep from overloading brokers.
* You should set values over the default only if the client needs to produce and/or subscribe to thousands of Pulsar topics.
* Default: 50000
*/
concurrentLookupRequest?: number;
/**
* The file path for the trusted TLS certificate.
*/
tlsTrustCertsFilePath?: string;
/**
* The boolean value of setup whether to enable TLS hostname verification.
* Default: false
*/
tlsValidateHostname?: boolean;
/**
* The boolean value of setup whether the Pulsar client accepts untrusted TLS certificate from broker.
* Default: false |
/**
* Interval between each stat info. Stats is activated with positive statsInterval. The value should be set to >= 1 second.
* Default: 600
*/
statsIntervalInSeconds?: number;
}
export class Client {
constructor(opts: ClientOpts);
createProducer(data: ProducerOpts): Promise<Producer>;
createReader(data: ReaderOpts): Promise<Reader>;
subscribe(data: SubscribeOpts): Promise<Consumer>;
close(): Promise<null>;
}
export class MessageId {
/**
* MessageId representing the earliest, or oldest available message stored in the topic.
*/
static earliest(): MessageId;
/**
* MessageId representing the latest, or last published message in the topic.
*/
static latest(): MessageId;
/**
* Deserialize a message id object from a Buffer.
* @param data
*/
static deserialize(data: Buffer): MessageId;
/**
* Serialize the message id into a Buffer for storing.
*/
serialize(): Buffer;
/**
* Get message id as String.
*/
toString(): string;
}
export class Message {
/**
* Getter method of topic name.
*/
getTopicName(): string;
/**
* Getter method of properties.
*/
getProperties(): MessageProperties;
/**
* Getter method of message data.
*/
getData(): Buffer;
/**
* Getter method of message id object.
*/
getMessageId(): MessageId;
/**
* Getter method of publish timestamp.
*/
getPublishTimestamp(): number;
/**
* Getter method of event timestamp.
*/
getEventTimestamp(): number;
/**
* Getter method of partition key.
*/
getPartitionKey(): string;
}
export interface ProducerOpts {
/**
* The Pulsar topic to which the producer will publish messages.
*/
topic: string;
/**
* A name for the producer. If you do not explicitly assign a name, Pulsar will automatically generate a globally unique name.
* If you choose to explicitly assign a name, it will need to be unique across all Pulsar clusters, otherwise the creation operation will throw an error.
*/
producerName?: string;
/**
* When publishing a message to a topic, the producer will wait for an acknowledgment from the responsible Pulsar broker.
* If a message is not acknowledged within the threshold set by this parameter, an error will be thrown. If you set sendTimeoutMs to -1,
* the timeout will be set to infinity (and thus removed). Removing the send timeout is recommended when using Pulsar's message de-duplication feature.
* Default: 30000
*/
sendTimeoutMs?: number;
/**
* The initial sequence ID of the message. When producer send message, add sequence ID to message. The ID is increased each time to send.
*/
initialSequenceId?: number;
/**
* The maximum size of the queue holding pending messages (i.e. messages waiting to receive an acknowledgment from the broker).
* By default, when the queue is full all calls to the send method will fail unless blockIfQueueFull is set to true.
* Default: 1000
*/
maxPendingMessages?: number;
/**
* The maximum size of the sum of partition's pending queue.
* Default: 50000
*/
maxPendingMessagesAcrossPartitions?: number;
/**
* If set to true, the producer's send method will wait when the outgoing message queue is full rather than failing and throwing an error
* (the size of that queue is dictated by the maxPendingMessages parameter); if set to false (the default),send operations will fail and
* throw a error when the queue is full.
* Default: false
*/
blockIfQueueFull?: boolean;
/**
* The message routing logic (for producers on partitioned topics). This logic is applied only when no key is set on messages. The available
* options are: round robin (RoundRobinDistribution), or publishing all messages to a single partition (UseSinglePartition).
* Default: UseSinglePartition
*/
messageRoutingMode?: MessageRoutingModes;
/**
* The hashing function that determines the partition on which a particular message is published (partitioned topics only).
* The available options are: JavaStringHash (the equivalent of String.hashCode() in Java), Murmur3_32Hash (applies the Murmur3 hashing function),
* or BoostHash (applies the hashing function from C++'s Boost library).
* Default: BoostHash
*/
hashingScheme?: HashingScheme;
/**
* The message data compression type used by the producer. The available options are LZ4, and Zlib.
* Default: No Compression
*/
compressionType?: CompressionType;
/**
* If set to true, the producer send message as batch.
* Default: true
*/
batchingEnabled?: boolean;
/**
* The maximum time of delay sending message in batching.
* Default: 10
*/
batchingMaxPublishDelayMs?: number;
/**
* The maximum size of sending message in each time of batching.
* Default: 1000
*/
batchingMaxMessages?: number;
/**
* The metadata of producer.
*/
properties?: MessageProperties;
}
export interface ProducerMessage {
/**
* The actual data payload of the message.
*/
data: Buffer;
/**
* A Object for any application-specific metadata attached to the message.
*/
properties?: MessageProperties;
/**
* The timestamp associated with the message.
*/
eventTimestamp?: number;
/**
* The sequence ID of the message.
*/
sequenceId?: number;
/**
* The optional key associated with the message (particularly useful for things like topic compaction).
*/
partitionKey?: string;
/**
* The clusters to which this message will be replicated. Pulsar brokers handle message replication automatically;
* you should only change this setting if you want to override the broker default.
*/
replicationClusters?: string[];
}
export class Producer {
/**
* Publishes a message to the producer's topic. When the message is successfully acknowledged by the Pulsar broker,
* or an error will be thrown, the Promise object run executor function.
* @param message Message to be published.
*/
send(message: ProducerMessage): Promise<null>;
/**
* Sends message from send queue to Pulser broker. When the message is successfully acknowledged by the Pulsar broker,
* or an error will be thrown, the Promise object run executor function.
*/
flush(): Promise<null>;
/**
* Closes the producer and releases all resources allocated to it. If close() is called then no more messages will be accepted from the publisher.
* This method will return Promise object, and when all pending publish requests have been persisted by Pulsar then run executor function.
* If an error is thrown, no pending writes will be retried.
*/
close(): Promise<null>;
}
export interface SubscribeOpts {
/**
* The Pulsar topic on which the consumer will establish a subscription and listen for messages.
*/
topic: string;
/**
* The subscription name for this consumer.
*/
subscription: string;
/**
* Available options are Exclusive, Shared, and Failover.
* Default: Exclusive
*/
subscriptionType?: SubscriptionType;
/**
* Acknowledge timeout in milliseconds.
* Default: 0
*/
ackTimeoutMs?: number;
/**
* Sets the size of the consumer's receiver queue, i.e. the number of messages that can be accumulated by the consumer before the application calls receive.
* A value higher than the default could increase consumer throughput, though at the expense of more memory utilization.
* Default: 1000
*/
receiverQueueSize?: number;
/**
* Set the max total receiver queue size across partitions.
* This setting will be used to reduce the receiver queue size for individual partitions if the total exceeds this value.
* Default: 50000
*/
receiverQueueSizeAcrossPartitions?: number;
/**
* The name of consumer. Currently, failover mode uses consumer name for ordering.
*/
consumerName?: string;
/**
* The metadata of consumer.
*/
properties?: MessageProperties;
/**
* The message listener of consumer.
*/
listener?: (message: Message, consumer: Consumer) => void;
}
export class Consumer {
/**
* Receives a single message from the topic with optional specific timeout in milliseconds.
* @param timeout Wait timeout in milliseconds.
*/
receive(timeout?: number): Promise<Message>;
/**
* Acknowledges a message to the Pulsar broker by message object.
* @param message Message to acknowledge.
*/
acknowledge(message: Message): void;
/**
* Acknowledges a message to the Pulsar broker by message ID object.
* @param messageId Message ID to acknowledge.
*/
acknowledgeId(messageId: MessageId): void;
/**
* Negatively acknowledges a message to the Pulsar broker by message object.
* @param message Message to acknowledge.
*/
negativeAcknowledge(message: Message): void;
/**
* Negatively acknowledges a message to the Pulsar broker by message ID object.
* @param messageId Message ID to acknowledge.
*/
negativeAcknowledgeId(messageId: MessageId): void;
/**
* Acknowledges all the messages in the stream, up to and including the specified message.
* The acknowledgeCumulative method will return void, and send the ack to the broker asynchronously.
* After that, the messages will not be redelivered to the consumer. Cumulative acking can not be used with a shared subscription type.
* @param message Message to acknowledge cumulatively.
*/
acknowledgeCumulative(message: Message): void;
/**
* Acknowledges all the messages in the stream, up to and including the specified message ID.
* @param messageId Message ID to acknowledge cumulatively.
*/
acknowledgeCumulativeId(messageId: MessageId): void;
/**
* Closes the consumer, disabling its ability to receive messages from the broker.
*/
close(): Promise<null>;
}
export interface ReaderOpts {
/**
* The Pulsar topic on which the reader will establish a subscription and listen for messages.
*/
topic: string;
/**
* The initial reader position, i.e. the message at which the reader begins processing messages.
* The options are Pulsar.MessageId.earliest (the earliest available message on the topic), Pulsar.MessageId.latest (the latest available message on the topic),
* or a message ID object for a position that is not earliest or latest.
*/
startMessageId: MessageId;
/**
* Sets the size of the reader's receiver queue, i.e. the number of messages that can be accumulated by the reader before the application calls readNext.
* A value higher than the default of 1000 could increase reader throughput, though at the expense of more memory utilization.
* Default 1000
*/
receiverQueueSize?: number;
/**
* The name of the reader.
*/
readerName?: string;
/**
* The subscription role prefix.
*/
subscriptionRolePrefix?: string;
}
export class Reader {
/**
* Receives the next message on the topic (analogous to the receive method for consumers)
* with optional specific timeout in milliseconds.
* @param timeout Wait timeout in milliseconds.
*/
readNext(timeout?: number): Promise<Message>;
/**
* Return whether Broker has next message in target topic.
*/
hasNext(): boolean;
/**
* Closes the reader, disabling its ability to receive messages from the broker.
*/
close(): Promise<null>;
} | */
tlsAllowInsecureConnection?: boolean; | random_line_split |
main.rs | #[macro_use]
extern crate derive_new;
use std::fmt::{self, Debug, Display};
use serde::{Deserialize, Serialize};
const PROOF: &str = "0";
#[derive(Serialize, Deserialize, Debug, Clone, new)]
struct Block {
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: Option<u128>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Wallet {
pub_key: String,
priv_key: String,
}
impl Wallet {
fn get(&self) -> String {
self.pub_key.clone()
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct BlockChain {
blocks: Vec<Block>,
pending_transactions: Vec<Transaction>,
}
impl Block {
fn test_block(&self) -> String {
calculate_hash_proof(
self.index.clone(),
self.previus_hash.clone(),
self.timestamp.clone(),
self.data.clone(),
PROOF.clone(),
)
.0
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Transaction {
sender: String,
reciver: String,
amount: u64,
hash: Option<String>,
}
impl Transaction {
fn new(sender: Wallet, reciver: Wallet, amount: u64) -> Transaction {
let sender = sender.pub_key.clone();
let reciver = reciver.pub_key.clone();
let x = Transaction {
sender,
reciver,
amount,
hash: None,
};
calculate_hash_transaction(x)
}
}
fn calculate_hash_transaction(transaction: Transaction) -> Transaction {
use sha3::{Digest, Sha3_512};
let mut hasher = Sha3_512::new();
hasher.update(transaction.sender.clone());
hasher.update(transaction.reciver.clone());
hasher.update(transaction.amount.to_string().as_bytes());
let hash = format!("{:20x}", hasher.finalize());
let x = Transaction {
sender: transaction.sender,
reciver: transaction.reciver,
amount: transaction.amount,
hash: Some(hash),
};
x
}
impl Display for Transaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}-{:?}-{}", self.sender, self.reciver, self.amount)
}
}
trait Createblock {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Self;
}
impl Createblock for Block {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Block {
Block {
index,
previus_hash,
timestamp,
data,
hash,
proof: Some(proof),
}
}
}
trait Blockchain {
fn new() -> Self;
}
impl Blockchain for BlockChain {
fn new() -> BlockChain {
BlockChain {
blocks: Vec::new(),
pending_transactions: Vec::new(),
}
}
}
impl BlockChain {
fn add_block_thirst(&mut self, block: Block) {
self.blocks.push(block);
}
fn add_transaction(&mut self, transaction: Transaction) {
self.pending_transactions.push(transaction);
}
fn add_block(&mut self, data: Vec<Transaction>, proof: &str) {
let (calculate_hash, proof) = calculate_hash_proof(
self.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
self.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string(),
data.clone(),
proof,
);
self.add_block_thirst(Block {
index: self
.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
previus_hash: self
.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
timestamp: chrono::offset::Utc::now().timestamp_millis().to_string(),
data,
hash: calculate_hash,
proof: Some(proof),
})
}
fn get_pendding_transactions(&self) -> Vec<Transaction> {
self.pending_transactions.clone()
}
fn clear_pendding_transactions(&mut self) {
self.pending_transactions.clear();
}
fn is_good(&self) -> bool {
let blocksss = self.blocks.clone();
for x in 1..self.blocks.len() {
// println!("{} || {}", self.blocks[x].test_block(),self.blocks[x + 1].previus_hash);
let test = blocksss[x].test_block();
let prev = blocksss[x - 1].previus_hash.clone();
if test != prev {
// println!("||||||||||||||| {:?} |||||||||||||||||||| {:?} ||||||||||||||||||", self.blocks[x].test_block(), self.blocks[x + 1].previus_hash);
return false;
}
}
return true;
}
// funcion for mining pending transactions
fn mine(&mut self, proof: &str) {
let mut data = self.get_pendding_transactions();
if data.len() > 0 {
self.add_block(data, proof);
self.clear_pendding_transactions();
}
}
// how much money the user has using wallet
fn get_balance(&self, wallet: Wallet) -> u128 {
let mut balance: u128 = 0;
for x in self.blocks.clone() {
for y in x.data.clone() {
if y.sender == wallet.pub_key |
if y.reciver == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
}
}
println!("{}", balance);
balance
}
}
impl Display for Block {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {:?} {:?} {:?} {:?}",
self.index, self.previus_hash, self.timestamp, self.data, self.hash
)
}
}
fn calculate_hash_proof(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
proof: &str,
) -> (String, u128) {
use sha3::{Digest, Sha3_512};
let proof = proof.to_owned();
let mut hasher = Sha3_512::new();
let before = index.to_string().parse::<String>().unwrap()
+ &previus_hash
+ &format!("{:?}", timestamp)
+ &format!("{:?}", data);
hasher.update(before.as_bytes());
let steps: u128 = std::u128::MAX;
let mut i = 0;
for x in 0..steps {
if format!("{:02x}", hasher.clone().finalize())[..proof.len()] == proof {
println!(
"Mined! : {} difficulty: {}",
format!("{:02x}", hasher.clone().finalize()),
x
);
i = x;
break;
} else {
hasher.update(x.to_string().as_bytes());
}
}
(format!("{:02x}", hasher.finalize()), i)
}
fn main() {
let olek = generate_wallet();
let anna = generate_wallet();
let mut blockchin: BlockChain = Blockchain::new();
let s: Transaction = Transaction::new(olek.clone(), anna, 22);
let time = chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string();
let calc = calculate_hash_proof(0, "".to_string(), time.clone(), vec![s.clone()], PROOF);
let start: Block = Block::new(
0,
"".to_string(),
time,
vec![s.clone()],
calc.0,
Some(calc.1),
);
blockchin.add_block_thirst(start);
// end of starrt code
// let mut transactions = vec![];
// for x in 0..=33 {
// let a: Transaction = Transaction::new(x.to_string(), (x + 10).to_string(), x + 100);
// transactions.push(a);
// }
// for x in transactions {
// blockchin.add_transaction(x);
// }
// let mut transaction = vec![];
// for _ in 0..10 {
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let one = generate_wallet();
// // sleep for 1 second
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let two = generate_wallet();
// let s: Transaction = Transaction::new(one, two, 100);
// blockchin.add_transaction(s.clone());
// transaction.push(s);
// }
blockchin.mine(PROOF);
// create_pending(&mut blockchin, PROOF);
let json = serde_json::to_string_pretty(&blockchin).unwrap();
println!("{}", json);
std::fs::write("json.json", json).expect("Unable to write file");
if blockchin.is_good() {
println!("XD")
}
let nic = generate_wallet();
// check user balance
println!(" is {}", blockchin.get_balance(olek));
// blockchin
// let contents =
// std::fs::read_to_string("json.json").expect("Something went wrong reading the file");
// let bc: BlockChain = serde_json::from_str(&contents).unwrap();
// if bc.is_good() {
// panic!("oh no");
// }
// use rsa::{PaddingScheme, PublicKey, RsaPrivateKey};
// let mut rng = rand::rngs::OsRng;
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let privet_key =
// rsa::RsaPrivateKey::new(&mut rng, 333).expect("Oh nie nie da sie privata stworzyc");
// let public = rsa::RsaPublicKey::from(&privet_key);
// let enc_data = public
// .encrypt(&mut rng, padding, b"s")
// .expect("can't encrypt data");
// println!("{:?}", enc_data);
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let decode = privet_key.decrypt(padding, &enc_data).unwrap();
// println!("{}", String::from_utf8_lossy(&decode));
}
fn create_pending(blockchin: &mut BlockChain, proof: &str) {
let mut tran: Vec<Transaction> = Vec::new();
for x in blockchin.clone().get_pendding_transactions() {
tran.push(x.clone());
if tran.len() == 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
} else if blockchin.clone().get_pendding_transactions().len() < 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
}
}
blockchin.clear_pendding_transactions();
}
fn generate_wallet() -> Wallet {
let key = openssl::rsa::Rsa::generate(1024).expect("Failed to generate key"); //2048
let priv_key = key.private_key_to_pem().unwrap();
let pub_key = key.public_key_to_pem().unwrap();
let priv_key = hex::encode(priv_key);
let pub_key = hex::encode(pub_key);
Wallet { pub_key, priv_key }
}
| {
let amount = y.amount as u128;
balance += amount;
} | conditional_block |
main.rs | #[macro_use]
extern crate derive_new;
use std::fmt::{self, Debug, Display};
use serde::{Deserialize, Serialize};
const PROOF: &str = "0";
#[derive(Serialize, Deserialize, Debug, Clone, new)]
struct Block {
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: Option<u128>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Wallet {
pub_key: String,
priv_key: String,
}
impl Wallet {
fn get(&self) -> String {
self.pub_key.clone()
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct BlockChain {
blocks: Vec<Block>,
pending_transactions: Vec<Transaction>,
}
impl Block {
fn test_block(&self) -> String {
calculate_hash_proof(
self.index.clone(),
self.previus_hash.clone(),
self.timestamp.clone(),
self.data.clone(),
PROOF.clone(),
)
.0
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct | {
sender: String,
reciver: String,
amount: u64,
hash: Option<String>,
}
impl Transaction {
fn new(sender: Wallet, reciver: Wallet, amount: u64) -> Transaction {
let sender = sender.pub_key.clone();
let reciver = reciver.pub_key.clone();
let x = Transaction {
sender,
reciver,
amount,
hash: None,
};
calculate_hash_transaction(x)
}
}
fn calculate_hash_transaction(transaction: Transaction) -> Transaction {
use sha3::{Digest, Sha3_512};
let mut hasher = Sha3_512::new();
hasher.update(transaction.sender.clone());
hasher.update(transaction.reciver.clone());
hasher.update(transaction.amount.to_string().as_bytes());
let hash = format!("{:20x}", hasher.finalize());
let x = Transaction {
sender: transaction.sender,
reciver: transaction.reciver,
amount: transaction.amount,
hash: Some(hash),
};
x
}
impl Display for Transaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}-{:?}-{}", self.sender, self.reciver, self.amount)
}
}
trait Createblock {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Self;
}
impl Createblock for Block {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Block {
Block {
index,
previus_hash,
timestamp,
data,
hash,
proof: Some(proof),
}
}
}
trait Blockchain {
fn new() -> Self;
}
impl Blockchain for BlockChain {
fn new() -> BlockChain {
BlockChain {
blocks: Vec::new(),
pending_transactions: Vec::new(),
}
}
}
impl BlockChain {
fn add_block_thirst(&mut self, block: Block) {
self.blocks.push(block);
}
fn add_transaction(&mut self, transaction: Transaction) {
self.pending_transactions.push(transaction);
}
fn add_block(&mut self, data: Vec<Transaction>, proof: &str) {
let (calculate_hash, proof) = calculate_hash_proof(
self.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
self.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string(),
data.clone(),
proof,
);
self.add_block_thirst(Block {
index: self
.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
previus_hash: self
.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
timestamp: chrono::offset::Utc::now().timestamp_millis().to_string(),
data,
hash: calculate_hash,
proof: Some(proof),
})
}
fn get_pendding_transactions(&self) -> Vec<Transaction> {
self.pending_transactions.clone()
}
fn clear_pendding_transactions(&mut self) {
self.pending_transactions.clear();
}
fn is_good(&self) -> bool {
let blocksss = self.blocks.clone();
for x in 1..self.blocks.len() {
// println!("{} || {}", self.blocks[x].test_block(),self.blocks[x + 1].previus_hash);
let test = blocksss[x].test_block();
let prev = blocksss[x - 1].previus_hash.clone();
if test != prev {
// println!("||||||||||||||| {:?} |||||||||||||||||||| {:?} ||||||||||||||||||", self.blocks[x].test_block(), self.blocks[x + 1].previus_hash);
return false;
}
}
return true;
}
// funcion for mining pending transactions
fn mine(&mut self, proof: &str) {
let mut data = self.get_pendding_transactions();
if data.len() > 0 {
self.add_block(data, proof);
self.clear_pendding_transactions();
}
}
// how much money the user has using wallet
fn get_balance(&self, wallet: Wallet) -> u128 {
let mut balance: u128 = 0;
for x in self.blocks.clone() {
for y in x.data.clone() {
if y.sender == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
if y.reciver == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
}
}
println!("{}", balance);
balance
}
}
impl Display for Block {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {:?} {:?} {:?} {:?}",
self.index, self.previus_hash, self.timestamp, self.data, self.hash
)
}
}
fn calculate_hash_proof(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
proof: &str,
) -> (String, u128) {
use sha3::{Digest, Sha3_512};
let proof = proof.to_owned();
let mut hasher = Sha3_512::new();
let before = index.to_string().parse::<String>().unwrap()
+ &previus_hash
+ &format!("{:?}", timestamp)
+ &format!("{:?}", data);
hasher.update(before.as_bytes());
let steps: u128 = std::u128::MAX;
let mut i = 0;
for x in 0..steps {
if format!("{:02x}", hasher.clone().finalize())[..proof.len()] == proof {
println!(
"Mined! : {} difficulty: {}",
format!("{:02x}", hasher.clone().finalize()),
x
);
i = x;
break;
} else {
hasher.update(x.to_string().as_bytes());
}
}
(format!("{:02x}", hasher.finalize()), i)
}
fn main() {
let olek = generate_wallet();
let anna = generate_wallet();
let mut blockchin: BlockChain = Blockchain::new();
let s: Transaction = Transaction::new(olek.clone(), anna, 22);
let time = chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string();
let calc = calculate_hash_proof(0, "".to_string(), time.clone(), vec![s.clone()], PROOF);
let start: Block = Block::new(
0,
"".to_string(),
time,
vec![s.clone()],
calc.0,
Some(calc.1),
);
blockchin.add_block_thirst(start);
// end of starrt code
// let mut transactions = vec![];
// for x in 0..=33 {
// let a: Transaction = Transaction::new(x.to_string(), (x + 10).to_string(), x + 100);
// transactions.push(a);
// }
// for x in transactions {
// blockchin.add_transaction(x);
// }
// let mut transaction = vec![];
// for _ in 0..10 {
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let one = generate_wallet();
// // sleep for 1 second
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let two = generate_wallet();
// let s: Transaction = Transaction::new(one, two, 100);
// blockchin.add_transaction(s.clone());
// transaction.push(s);
// }
blockchin.mine(PROOF);
// create_pending(&mut blockchin, PROOF);
let json = serde_json::to_string_pretty(&blockchin).unwrap();
println!("{}", json);
std::fs::write("json.json", json).expect("Unable to write file");
if blockchin.is_good() {
println!("XD")
}
let nic = generate_wallet();
// check user balance
println!(" is {}", blockchin.get_balance(olek));
// blockchin
// let contents =
// std::fs::read_to_string("json.json").expect("Something went wrong reading the file");
// let bc: BlockChain = serde_json::from_str(&contents).unwrap();
// if bc.is_good() {
// panic!("oh no");
// }
// use rsa::{PaddingScheme, PublicKey, RsaPrivateKey};
// let mut rng = rand::rngs::OsRng;
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let privet_key =
// rsa::RsaPrivateKey::new(&mut rng, 333).expect("Oh nie nie da sie privata stworzyc");
// let public = rsa::RsaPublicKey::from(&privet_key);
// let enc_data = public
// .encrypt(&mut rng, padding, b"s")
// .expect("can't encrypt data");
// println!("{:?}", enc_data);
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let decode = privet_key.decrypt(padding, &enc_data).unwrap();
// println!("{}", String::from_utf8_lossy(&decode));
}
fn create_pending(blockchin: &mut BlockChain, proof: &str) {
let mut tran: Vec<Transaction> = Vec::new();
for x in blockchin.clone().get_pendding_transactions() {
tran.push(x.clone());
if tran.len() == 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
} else if blockchin.clone().get_pendding_transactions().len() < 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
}
}
blockchin.clear_pendding_transactions();
}
fn generate_wallet() -> Wallet {
let key = openssl::rsa::Rsa::generate(1024).expect("Failed to generate key"); //2048
let priv_key = key.private_key_to_pem().unwrap();
let pub_key = key.public_key_to_pem().unwrap();
let priv_key = hex::encode(priv_key);
let pub_key = hex::encode(pub_key);
Wallet { pub_key, priv_key }
}
| Transaction | identifier_name |
main.rs | #[macro_use]
extern crate derive_new;
use std::fmt::{self, Debug, Display};
use serde::{Deserialize, Serialize};
const PROOF: &str = "0";
#[derive(Serialize, Deserialize, Debug, Clone, new)]
struct Block {
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: Option<u128>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Wallet {
pub_key: String,
priv_key: String,
}
impl Wallet {
fn get(&self) -> String {
self.pub_key.clone()
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct BlockChain {
blocks: Vec<Block>,
pending_transactions: Vec<Transaction>,
}
impl Block {
fn test_block(&self) -> String {
calculate_hash_proof(
self.index.clone(),
self.previus_hash.clone(),
self.timestamp.clone(),
self.data.clone(),
PROOF.clone(),
)
.0
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Transaction {
sender: String,
reciver: String,
amount: u64,
hash: Option<String>,
}
impl Transaction {
fn new(sender: Wallet, reciver: Wallet, amount: u64) -> Transaction {
let sender = sender.pub_key.clone();
let reciver = reciver.pub_key.clone();
let x = Transaction {
sender,
reciver,
amount,
hash: None,
};
calculate_hash_transaction(x)
}
}
fn calculate_hash_transaction(transaction: Transaction) -> Transaction {
use sha3::{Digest, Sha3_512};
let mut hasher = Sha3_512::new();
hasher.update(transaction.sender.clone());
hasher.update(transaction.reciver.clone());
hasher.update(transaction.amount.to_string().as_bytes());
let hash = format!("{:20x}", hasher.finalize());
let x = Transaction {
sender: transaction.sender,
reciver: transaction.reciver,
amount: transaction.amount,
hash: Some(hash),
};
x
}
impl Display for Transaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}-{:?}-{}", self.sender, self.reciver, self.amount)
}
}
trait Createblock {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Self;
}
impl Createblock for Block {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Block {
Block {
index,
previus_hash,
timestamp,
data,
hash,
proof: Some(proof),
}
}
}
trait Blockchain {
fn new() -> Self;
}
impl Blockchain for BlockChain {
fn new() -> BlockChain {
BlockChain {
blocks: Vec::new(),
pending_transactions: Vec::new(),
}
}
}
impl BlockChain {
fn add_block_thirst(&mut self, block: Block) {
self.blocks.push(block);
}
fn add_transaction(&mut self, transaction: Transaction) {
self.pending_transactions.push(transaction);
}
fn add_block(&mut self, data: Vec<Transaction>, proof: &str) {
let (calculate_hash, proof) = calculate_hash_proof(
self.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
self.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string(),
data.clone(),
proof,
);
self.add_block_thirst(Block {
index: self
.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
previus_hash: self
.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
timestamp: chrono::offset::Utc::now().timestamp_millis().to_string(),
data,
hash: calculate_hash,
proof: Some(proof),
})
}
fn get_pendding_transactions(&self) -> Vec<Transaction> {
self.pending_transactions.clone()
}
fn clear_pendding_transactions(&mut self) {
self.pending_transactions.clear();
}
fn is_good(&self) -> bool {
let blocksss = self.blocks.clone();
for x in 1..self.blocks.len() {
// println!("{} || {}", self.blocks[x].test_block(),self.blocks[x + 1].previus_hash);
let test = blocksss[x].test_block();
let prev = blocksss[x - 1].previus_hash.clone();
if test != prev {
// println!("||||||||||||||| {:?} |||||||||||||||||||| {:?} ||||||||||||||||||", self.blocks[x].test_block(), self.blocks[x + 1].previus_hash);
return false;
}
}
return true;
}
// funcion for mining pending transactions
fn mine(&mut self, proof: &str) {
let mut data = self.get_pendding_transactions();
if data.len() > 0 {
self.add_block(data, proof);
self.clear_pendding_transactions();
}
}
// how much money the user has using wallet
fn get_balance(&self, wallet: Wallet) -> u128 {
let mut balance: u128 = 0;
for x in self.blocks.clone() {
for y in x.data.clone() {
if y.sender == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
if y.reciver == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
}
}
println!("{}", balance);
balance
}
}
impl Display for Block { | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {:?} {:?} {:?} {:?}",
self.index, self.previus_hash, self.timestamp, self.data, self.hash
)
}
}
fn calculate_hash_proof(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
proof: &str,
) -> (String, u128) {
use sha3::{Digest, Sha3_512};
let proof = proof.to_owned();
let mut hasher = Sha3_512::new();
let before = index.to_string().parse::<String>().unwrap()
+ &previus_hash
+ &format!("{:?}", timestamp)
+ &format!("{:?}", data);
hasher.update(before.as_bytes());
let steps: u128 = std::u128::MAX;
let mut i = 0;
for x in 0..steps {
if format!("{:02x}", hasher.clone().finalize())[..proof.len()] == proof {
println!(
"Mined! : {} difficulty: {}",
format!("{:02x}", hasher.clone().finalize()),
x
);
i = x;
break;
} else {
hasher.update(x.to_string().as_bytes());
}
}
(format!("{:02x}", hasher.finalize()), i)
}
fn main() {
let olek = generate_wallet();
let anna = generate_wallet();
let mut blockchin: BlockChain = Blockchain::new();
let s: Transaction = Transaction::new(olek.clone(), anna, 22);
let time = chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string();
let calc = calculate_hash_proof(0, "".to_string(), time.clone(), vec![s.clone()], PROOF);
let start: Block = Block::new(
0,
"".to_string(),
time,
vec![s.clone()],
calc.0,
Some(calc.1),
);
blockchin.add_block_thirst(start);
// end of starrt code
// let mut transactions = vec![];
// for x in 0..=33 {
// let a: Transaction = Transaction::new(x.to_string(), (x + 10).to_string(), x + 100);
// transactions.push(a);
// }
// for x in transactions {
// blockchin.add_transaction(x);
// }
// let mut transaction = vec![];
// for _ in 0..10 {
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let one = generate_wallet();
// // sleep for 1 second
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let two = generate_wallet();
// let s: Transaction = Transaction::new(one, two, 100);
// blockchin.add_transaction(s.clone());
// transaction.push(s);
// }
blockchin.mine(PROOF);
// create_pending(&mut blockchin, PROOF);
let json = serde_json::to_string_pretty(&blockchin).unwrap();
println!("{}", json);
std::fs::write("json.json", json).expect("Unable to write file");
if blockchin.is_good() {
println!("XD")
}
let nic = generate_wallet();
// check user balance
println!(" is {}", blockchin.get_balance(olek));
// blockchin
// let contents =
// std::fs::read_to_string("json.json").expect("Something went wrong reading the file");
// let bc: BlockChain = serde_json::from_str(&contents).unwrap();
// if bc.is_good() {
// panic!("oh no");
// }
// use rsa::{PaddingScheme, PublicKey, RsaPrivateKey};
// let mut rng = rand::rngs::OsRng;
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let privet_key =
// rsa::RsaPrivateKey::new(&mut rng, 333).expect("Oh nie nie da sie privata stworzyc");
// let public = rsa::RsaPublicKey::from(&privet_key);
// let enc_data = public
// .encrypt(&mut rng, padding, b"s")
// .expect("can't encrypt data");
// println!("{:?}", enc_data);
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let decode = privet_key.decrypt(padding, &enc_data).unwrap();
// println!("{}", String::from_utf8_lossy(&decode));
}
fn create_pending(blockchin: &mut BlockChain, proof: &str) {
let mut tran: Vec<Transaction> = Vec::new();
for x in blockchin.clone().get_pendding_transactions() {
tran.push(x.clone());
if tran.len() == 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
} else if blockchin.clone().get_pendding_transactions().len() < 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
}
}
blockchin.clear_pendding_transactions();
}
fn generate_wallet() -> Wallet {
let key = openssl::rsa::Rsa::generate(1024).expect("Failed to generate key"); //2048
let priv_key = key.private_key_to_pem().unwrap();
let pub_key = key.public_key_to_pem().unwrap();
let priv_key = hex::encode(priv_key);
let pub_key = hex::encode(pub_key);
Wallet { pub_key, priv_key }
} | random_line_split | |
main.rs | #[macro_use]
extern crate derive_new;
use std::fmt::{self, Debug, Display};
use serde::{Deserialize, Serialize};
const PROOF: &str = "0";
#[derive(Serialize, Deserialize, Debug, Clone, new)]
struct Block {
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: Option<u128>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Wallet {
pub_key: String,
priv_key: String,
}
impl Wallet {
fn get(&self) -> String {
self.pub_key.clone()
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct BlockChain {
blocks: Vec<Block>,
pending_transactions: Vec<Transaction>,
}
impl Block {
fn test_block(&self) -> String {
calculate_hash_proof(
self.index.clone(),
self.previus_hash.clone(),
self.timestamp.clone(),
self.data.clone(),
PROOF.clone(),
)
.0
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Transaction {
sender: String,
reciver: String,
amount: u64,
hash: Option<String>,
}
impl Transaction {
fn new(sender: Wallet, reciver: Wallet, amount: u64) -> Transaction {
let sender = sender.pub_key.clone();
let reciver = reciver.pub_key.clone();
let x = Transaction {
sender,
reciver,
amount,
hash: None,
};
calculate_hash_transaction(x)
}
}
fn calculate_hash_transaction(transaction: Transaction) -> Transaction {
use sha3::{Digest, Sha3_512};
let mut hasher = Sha3_512::new();
hasher.update(transaction.sender.clone());
hasher.update(transaction.reciver.clone());
hasher.update(transaction.amount.to_string().as_bytes());
let hash = format!("{:20x}", hasher.finalize());
let x = Transaction {
sender: transaction.sender,
reciver: transaction.reciver,
amount: transaction.amount,
hash: Some(hash),
};
x
}
impl Display for Transaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
trait Createblock {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Self;
}
impl Createblock for Block {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Block {
Block {
index,
previus_hash,
timestamp,
data,
hash,
proof: Some(proof),
}
}
}
trait Blockchain {
fn new() -> Self;
}
impl Blockchain for BlockChain {
fn new() -> BlockChain {
BlockChain {
blocks: Vec::new(),
pending_transactions: Vec::new(),
}
}
}
impl BlockChain {
fn add_block_thirst(&mut self, block: Block) {
self.blocks.push(block);
}
fn add_transaction(&mut self, transaction: Transaction) {
self.pending_transactions.push(transaction);
}
fn add_block(&mut self, data: Vec<Transaction>, proof: &str) {
let (calculate_hash, proof) = calculate_hash_proof(
self.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
self.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string(),
data.clone(),
proof,
);
self.add_block_thirst(Block {
index: self
.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
previus_hash: self
.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
timestamp: chrono::offset::Utc::now().timestamp_millis().to_string(),
data,
hash: calculate_hash,
proof: Some(proof),
})
}
fn get_pendding_transactions(&self) -> Vec<Transaction> {
self.pending_transactions.clone()
}
fn clear_pendding_transactions(&mut self) {
self.pending_transactions.clear();
}
fn is_good(&self) -> bool {
let blocksss = self.blocks.clone();
for x in 1..self.blocks.len() {
// println!("{} || {}", self.blocks[x].test_block(),self.blocks[x + 1].previus_hash);
let test = blocksss[x].test_block();
let prev = blocksss[x - 1].previus_hash.clone();
if test != prev {
// println!("||||||||||||||| {:?} |||||||||||||||||||| {:?} ||||||||||||||||||", self.blocks[x].test_block(), self.blocks[x + 1].previus_hash);
return false;
}
}
return true;
}
// funcion for mining pending transactions
fn mine(&mut self, proof: &str) {
let mut data = self.get_pendding_transactions();
if data.len() > 0 {
self.add_block(data, proof);
self.clear_pendding_transactions();
}
}
// how much money the user has using wallet
fn get_balance(&self, wallet: Wallet) -> u128 {
let mut balance: u128 = 0;
for x in self.blocks.clone() {
for y in x.data.clone() {
if y.sender == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
if y.reciver == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
}
}
println!("{}", balance);
balance
}
}
impl Display for Block {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {:?} {:?} {:?} {:?}",
self.index, self.previus_hash, self.timestamp, self.data, self.hash
)
}
}
fn calculate_hash_proof(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
proof: &str,
) -> (String, u128) {
use sha3::{Digest, Sha3_512};
let proof = proof.to_owned();
let mut hasher = Sha3_512::new();
let before = index.to_string().parse::<String>().unwrap()
+ &previus_hash
+ &format!("{:?}", timestamp)
+ &format!("{:?}", data);
hasher.update(before.as_bytes());
let steps: u128 = std::u128::MAX;
let mut i = 0;
for x in 0..steps {
if format!("{:02x}", hasher.clone().finalize())[..proof.len()] == proof {
println!(
"Mined! : {} difficulty: {}",
format!("{:02x}", hasher.clone().finalize()),
x
);
i = x;
break;
} else {
hasher.update(x.to_string().as_bytes());
}
}
(format!("{:02x}", hasher.finalize()), i)
}
fn main() {
let olek = generate_wallet();
let anna = generate_wallet();
let mut blockchin: BlockChain = Blockchain::new();
let s: Transaction = Transaction::new(olek.clone(), anna, 22);
let time = chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string();
let calc = calculate_hash_proof(0, "".to_string(), time.clone(), vec![s.clone()], PROOF);
let start: Block = Block::new(
0,
"".to_string(),
time,
vec![s.clone()],
calc.0,
Some(calc.1),
);
blockchin.add_block_thirst(start);
// end of starrt code
// let mut transactions = vec![];
// for x in 0..=33 {
// let a: Transaction = Transaction::new(x.to_string(), (x + 10).to_string(), x + 100);
// transactions.push(a);
// }
// for x in transactions {
// blockchin.add_transaction(x);
// }
// let mut transaction = vec![];
// for _ in 0..10 {
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let one = generate_wallet();
// // sleep for 1 second
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let two = generate_wallet();
// let s: Transaction = Transaction::new(one, two, 100);
// blockchin.add_transaction(s.clone());
// transaction.push(s);
// }
blockchin.mine(PROOF);
// create_pending(&mut blockchin, PROOF);
let json = serde_json::to_string_pretty(&blockchin).unwrap();
println!("{}", json);
std::fs::write("json.json", json).expect("Unable to write file");
if blockchin.is_good() {
println!("XD")
}
let nic = generate_wallet();
// check user balance
println!(" is {}", blockchin.get_balance(olek));
// blockchin
// let contents =
// std::fs::read_to_string("json.json").expect("Something went wrong reading the file");
// let bc: BlockChain = serde_json::from_str(&contents).unwrap();
// if bc.is_good() {
// panic!("oh no");
// }
// use rsa::{PaddingScheme, PublicKey, RsaPrivateKey};
// let mut rng = rand::rngs::OsRng;
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let privet_key =
// rsa::RsaPrivateKey::new(&mut rng, 333).expect("Oh nie nie da sie privata stworzyc");
// let public = rsa::RsaPublicKey::from(&privet_key);
// let enc_data = public
// .encrypt(&mut rng, padding, b"s")
// .expect("can't encrypt data");
// println!("{:?}", enc_data);
// let padding = PaddingScheme::new_pkcs1v15_encrypt();
// let decode = privet_key.decrypt(padding, &enc_data).unwrap();
// println!("{}", String::from_utf8_lossy(&decode));
}
fn create_pending(blockchin: &mut BlockChain, proof: &str) {
let mut tran: Vec<Transaction> = Vec::new();
for x in blockchin.clone().get_pendding_transactions() {
tran.push(x.clone());
if tran.len() == 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
} else if blockchin.clone().get_pendding_transactions().len() < 5 {
blockchin.add_block(tran.clone(), proof);
tran.clear();
}
}
blockchin.clear_pendding_transactions();
}
fn generate_wallet() -> Wallet {
let key = openssl::rsa::Rsa::generate(1024).expect("Failed to generate key"); //2048
let priv_key = key.private_key_to_pem().unwrap();
let pub_key = key.public_key_to_pem().unwrap();
let priv_key = hex::encode(priv_key);
let pub_key = hex::encode(pub_key);
Wallet { pub_key, priv_key }
}
| {
write!(f, "{:?}-{:?}-{}", self.sender, self.reciver, self.amount)
} | identifier_body |
workloads.go | package illumioapi
import (
"encoding/json"
"errors"
"fmt"
"math"
"net/url"
"strconv"
"strings"
)
// An Agent is an Agent on a Workload
type Agent struct {
ActivePceFqdn string `json:"active_pce_fqdn,omitempty"`
Config *Config `json:"config,omitempty"`
Href string `json:"href,omitempty"`
SecureConnect *SecureConnect `json:"secure_connect,omitempty"`
Status *Status `json:"status,omitempty"`
TargetPceFqdn string `json:"target_pce_fqdn,omitempty"`
}
// AgentHealth represents the Agent Health of the Status of a Workload
type AgentHealth struct {
AuditEvent string `json:"audit_event,omitempty"`
Severity string `json:"severity,omitempty"`
Type string `json:"type,omitempty"`
}
// AgentHealthErrors represents the Agent Health Errors of the Status of a Workload
// This is depreciated - use AgentHealth
type AgentHealthErrors struct {
Errors []string `json:"errors,omitempty"`
Warnings []string `json:"warnings,omitempty"`
}
// Config represents the Configuration of an Agent on a Workload
type Config struct {
LogTraffic bool `json:"log_traffic,omitempty"`
Mode string `json:"mode,omitempty"`
SecurityPolicyUpdateMode string `json:"security_policy_update_mode,omitempty"`
}
// DeletedBy represents the Deleted By property of an object
type DeletedBy struct {
Href string `json:"href,omitempty"`
}
// An Interface represent the Interfaces of a Workload
type Interface struct {
Address string `json:"address,omitempty"`
CidrBlock int `json:"cidr_block,omitempty"`
DefaultGatewayAddress string `json:"default_gateway_address,omitempty"`
FriendlyName string `json:"friendly_name,omitempty"`
LinkState string `json:"link_state,omitempty"`
Name string `json:"name,omitempty"`
}
// OpenServicePorts represents open ports for a service running on a workload
type OpenServicePorts struct {
Address string `json:"address,omitempty"`
Package string `json:"package,omitempty"`
Port int `json:"port,omitempty"`
ProcessName string `json:"process_name,omitempty"`
Protocol int `json:"protocol,omitempty"`
User string `json:"user,omitempty"`
WinServiceName string `json:"win_service_name,omitempty"`
}
// A Workload represents a workload in the PCE
type Workload struct {
Agent *Agent `json:"agent,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
CreatedBy *CreatedBy `json:"created_by,omitempty"`
DataCenter string `json:"data_center,omitempty"`
DataCenterZone string `json:"data_center_zone,omitempty"`
DeleteType string `json:"delete_type,omitempty"`
Deleted *bool `json:"deleted,omitempty"`
DeletedAt string `json:"deleted_at,omitempty"`
DeletedBy *DeletedBy `json:"deleted_by,omitempty"`
Description string `json:"description,omitempty"`
ExternalDataReference string `json:"external_data_reference,omitempty"`
ExternalDataSet string `json:"external_data_set,omitempty"`
Hostname string `json:"hostname,omitempty"`
Href string `json:"href,omitempty"`
Interfaces []*Interface `json:"interfaces,omitempty"`
Labels []*Label `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Online bool `json:"online,omitempty"`
OsDetail string `json:"os_detail,omitempty"`
OsID string `json:"os_id,omitempty"`
PublicIP string `json:"public_ip,omitempty"`
ServicePrincipalName string `json:"service_principal_name,omitempty"`
ServiceProvider string `json:"service_provider,omitempty"`
Services []*Services `json:"services,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
UpdatedBy *UpdatedBy `json:"updated_by,omitempty"`
}
// SecureConnect represents SecureConnect for an Agent on a Workload
type SecureConnect struct {
MatchingIssuerName string `json:"matching_issuer_name,omitempty"`
}
// Services represent the Services running on a Workload
type Services struct {
CreatedAt string `json:"created_at,omitempty"`
OpenServicePorts []*OpenServicePorts `json:"open_service_ports,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// Status represents the Status of an Agent on a Workload
type Status struct {
AgentHealth []*AgentHealth `json:"agent_health,omitempty"`
AgentHealthErrors *AgentHealthErrors `json:"agent_health_errors,omitempty"`
AgentVersion string `json:"agent_version,omitempty"`
FirewallRuleCount int `json:"firewall_rule_count,omitempty"`
FwConfigCurrent bool `json:"fw_config_current,omitempty"`
LastHeartbeatOn string `json:"last_heartbeat_on,omitempty"`
ManagedSince string `json:"managed_since,omitempty"`
SecurityPolicyAppliedAt string `json:"security_policy_applied_at,omitempty"`
SecurityPolicyReceivedAt string `json:"security_policy_received_at,omitempty"`
SecurityPolicyRefreshAt string `json:"security_policy_refresh_at,omitempty"`
SecurityPolicySyncState string `json:"security_policy_sync_state,omitempty"`
UID string `json:"uid,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// GetAllWorkloads returns an slice of workloads in the Illumio PCE.
// The first API call to the PCE does not use the async option.
// If the array length is >=500, it re-runs with async.
func GetAllWorkloads(pce PCE) ([]Workload, APIResponse, error) {
var workloads []Workload
var api APIResponse
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Call the API
api, err = apicall("GET", apiURL.String(), pce, nil, false)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
json.Unmarshal([]byte(api.RespBody), &workloads)
// If length is 500, re-run with async
if len(workloads) >= 500 {
api, err = apicall("GET", apiURL.String(), pce, nil, true)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Unmarshal response to struct
json.Unmarshal([]byte(api.RespBody), &workloads)
}
// Get all labels and create a map
labels, _, err := GetAllLabels(pce)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - get all labels - %s", err)
}
labelMap := make(map[string]Label)
for _, l := range labels {
labelMap[l.Href] = l
}
// Update the workloads array to label values and keys (not just HREFs)
for _, w := range workloads {
for _, l := range w.Labels {
*l = labelMap[l.Href]
}
}
return workloads, api, nil
}
// CreateWorkload creates a new unmanaged workload in the Illumio PCE
func CreateWorkload(pce PCE, workload Workload) (Workload, APIResponse, error) {
var newWL Workload
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err) |
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
api, err = apicall("POST", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Marshal JSON
json.Unmarshal([]byte(api.RespBody), &newWL)
return newWL, api, nil
}
// UpdateWorkload updates an existing workload in the Illumio PCE
// The provided workload struct must include an Href.
// Properties that cannot be included in the PUT method will be ignored.
func UpdateWorkload(pce PCE, workload Workload) (APIResponse, error) {
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2" + workload.Href)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
workload.SanitizePut()
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
api, err = apicall("PUT", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
return api, nil
}
// UpdateLabel updates a workload struct with new label href.
// It does not call the Illumio API. To reflect the change in your PCE,
// you'd use UpdateLabel method on the workload struct and then use the UpdateWorkload function
func (w *Workload) UpdateLabel(pce PCE, key, value string) error {
var updatedLabels []*Label
for _, l := range w.Labels {
x, _, err := GetLabelbyHref(pce, l.Href)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
if x.Key == key {
// Get our new label's href
newLabel, _, err := GetLabelbyKeyValue(pce, key, value)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
// Create the label if it doesn't exist
if newLabel.Href == "" {
createdLabel, _, err := CreateLabel(pce, Label{Key: key, Value: value})
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
updatedLabels = append(updatedLabels, &Label{Href: createdLabel.Href})
// If the new label does exist, add it to the slice
} else {
updatedLabels = append(updatedLabels, &Label{Href: newLabel.Href})
}
} else {
updatedLabels = append(updatedLabels, &Label{Href: l.Href})
}
w.Labels = updatedLabels
}
return nil
}
// BulkWorkload takes a bulk action on an array of workloads.
// Method must be create, update, or delete
func BulkWorkload(pce PCE, workloads []Workload, method string) ([]APIResponse, error) {
var apiResps []APIResponse
var err error
// Check on method
method = strings.ToLower(method)
if method != "create" && method != "update" && method != "delete" {
return apiResps, errors.New("bulk workload error - method must be create, update, or delete")
}
// Sanitize update
if method == "update" {
sanitizedWLs := []Workload{}
for _, workload := range workloads {
workload.SanitizeBulkUpdate()
sanitizedWLs = append(sanitizedWLs, workload)
}
workloads = sanitizedWLs
}
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads/bulk_" + method)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
// If the method is delete, we can only send Hrefs
if method == "delete" {
hrefWorkloads := []Workload{}
for _, workload := range workloads {
hrefWorkloads = append(hrefWorkloads, Workload{Href: workload.Href})
}
// Re-assign workloads to just the HREF
workloads = hrefWorkloads
}
// Figure out how many API calls we need to make
numAPICalls := int(math.Ceil(float64(len(workloads)) / 1000))
// Build the array to be passed to the API
apiArrays := [][]Workload{}
for i := 0; i < numAPICalls; i++ {
// Get 1,000 elements if this is not the last array
if (i + 1) != numAPICalls {
apiArrays = append(apiArrays, workloads[i*1000:(1+i)*1000])
// Get the rest on the last array
} else {
apiArrays = append(apiArrays, workloads[i*1000:])
}
}
// Call the API for each array
for _, apiArray := range apiArrays {
workloadsJSON, err := json.Marshal(apiArray)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
// Uncomment this line if you want to print the JSON object
// fmt.Println(string(workloadsJSON))
api, err := apicall("PUT", apiURL.String(), pce, workloadsJSON, false)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
apiResps = append(apiResps, api)
}
return apiResps, nil
}
// SanitizeBulkUpdate removes the properites necessary for a bulk update
func (w *Workload) SanitizeBulkUpdate() {
// All Workloads
w.CreatedAt = ""
w.CreatedBy = nil
w.DeleteType = ""
w.Deleted = nil
w.DeletedAt = ""
w.DeletedBy = nil
w.UpdatedAt = ""
w.UpdatedBy = nil
// Managed workloads
if w.Agent != nil && w.Agent.Status != nil {
w.Hostname = ""
w.Interfaces = nil
w.Online = false
w.OsDetail = ""
w.OsID = ""
w.PublicIP = ""
w.Agent.Status = nil
w.Services = nil
w.Online = false
}
// Replace Labels with Hrefs
newLabels := []*Label{}
for _, l := range w.Labels {
newLabel := Label{Href: l.Href}
newLabels = append(newLabels, &newLabel)
}
w.Labels = newLabels
}
// SanitizePut removes the necessary properties to update an unmanaged and managed workload
func (w *Workload) SanitizePut() {
w.SanitizeBulkUpdate()
w.Href = ""
} | } | random_line_split |
workloads.go | package illumioapi
import (
"encoding/json"
"errors"
"fmt"
"math"
"net/url"
"strconv"
"strings"
)
// An Agent is an Agent on a Workload
type Agent struct {
ActivePceFqdn string `json:"active_pce_fqdn,omitempty"`
Config *Config `json:"config,omitempty"`
Href string `json:"href,omitempty"`
SecureConnect *SecureConnect `json:"secure_connect,omitempty"`
Status *Status `json:"status,omitempty"`
TargetPceFqdn string `json:"target_pce_fqdn,omitempty"`
}
// AgentHealth represents the Agent Health of the Status of a Workload
type AgentHealth struct {
AuditEvent string `json:"audit_event,omitempty"`
Severity string `json:"severity,omitempty"`
Type string `json:"type,omitempty"`
}
// AgentHealthErrors represents the Agent Health Errors of the Status of a Workload
// This is depreciated - use AgentHealth
type AgentHealthErrors struct {
Errors []string `json:"errors,omitempty"`
Warnings []string `json:"warnings,omitempty"`
}
// Config represents the Configuration of an Agent on a Workload
type Config struct {
LogTraffic bool `json:"log_traffic,omitempty"`
Mode string `json:"mode,omitempty"`
SecurityPolicyUpdateMode string `json:"security_policy_update_mode,omitempty"`
}
// DeletedBy represents the Deleted By property of an object
type DeletedBy struct {
Href string `json:"href,omitempty"`
}
// An Interface represent the Interfaces of a Workload
type Interface struct {
Address string `json:"address,omitempty"`
CidrBlock int `json:"cidr_block,omitempty"`
DefaultGatewayAddress string `json:"default_gateway_address,omitempty"`
FriendlyName string `json:"friendly_name,omitempty"`
LinkState string `json:"link_state,omitempty"`
Name string `json:"name,omitempty"`
}
// OpenServicePorts represents open ports for a service running on a workload
type OpenServicePorts struct {
Address string `json:"address,omitempty"`
Package string `json:"package,omitempty"`
Port int `json:"port,omitempty"`
ProcessName string `json:"process_name,omitempty"`
Protocol int `json:"protocol,omitempty"`
User string `json:"user,omitempty"`
WinServiceName string `json:"win_service_name,omitempty"`
}
// A Workload represents a workload in the PCE
type Workload struct {
Agent *Agent `json:"agent,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
CreatedBy *CreatedBy `json:"created_by,omitempty"`
DataCenter string `json:"data_center,omitempty"`
DataCenterZone string `json:"data_center_zone,omitempty"`
DeleteType string `json:"delete_type,omitempty"`
Deleted *bool `json:"deleted,omitempty"`
DeletedAt string `json:"deleted_at,omitempty"`
DeletedBy *DeletedBy `json:"deleted_by,omitempty"`
Description string `json:"description,omitempty"`
ExternalDataReference string `json:"external_data_reference,omitempty"`
ExternalDataSet string `json:"external_data_set,omitempty"`
Hostname string `json:"hostname,omitempty"`
Href string `json:"href,omitempty"`
Interfaces []*Interface `json:"interfaces,omitempty"`
Labels []*Label `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Online bool `json:"online,omitempty"`
OsDetail string `json:"os_detail,omitempty"`
OsID string `json:"os_id,omitempty"`
PublicIP string `json:"public_ip,omitempty"`
ServicePrincipalName string `json:"service_principal_name,omitempty"`
ServiceProvider string `json:"service_provider,omitempty"`
Services []*Services `json:"services,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
UpdatedBy *UpdatedBy `json:"updated_by,omitempty"`
}
// SecureConnect represents SecureConnect for an Agent on a Workload
type SecureConnect struct {
MatchingIssuerName string `json:"matching_issuer_name,omitempty"`
}
// Services represent the Services running on a Workload
type Services struct {
CreatedAt string `json:"created_at,omitempty"`
OpenServicePorts []*OpenServicePorts `json:"open_service_ports,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// Status represents the Status of an Agent on a Workload
type Status struct {
AgentHealth []*AgentHealth `json:"agent_health,omitempty"`
AgentHealthErrors *AgentHealthErrors `json:"agent_health_errors,omitempty"`
AgentVersion string `json:"agent_version,omitempty"`
FirewallRuleCount int `json:"firewall_rule_count,omitempty"`
FwConfigCurrent bool `json:"fw_config_current,omitempty"`
LastHeartbeatOn string `json:"last_heartbeat_on,omitempty"`
ManagedSince string `json:"managed_since,omitempty"`
SecurityPolicyAppliedAt string `json:"security_policy_applied_at,omitempty"`
SecurityPolicyReceivedAt string `json:"security_policy_received_at,omitempty"`
SecurityPolicyRefreshAt string `json:"security_policy_refresh_at,omitempty"`
SecurityPolicySyncState string `json:"security_policy_sync_state,omitempty"`
UID string `json:"uid,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// GetAllWorkloads returns an slice of workloads in the Illumio PCE.
// The first API call to the PCE does not use the async option.
// If the array length is >=500, it re-runs with async.
func GetAllWorkloads(pce PCE) ([]Workload, APIResponse, error) {
var workloads []Workload
var api APIResponse
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Call the API
api, err = apicall("GET", apiURL.String(), pce, nil, false)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
json.Unmarshal([]byte(api.RespBody), &workloads)
// If length is 500, re-run with async
if len(workloads) >= 500 {
api, err = apicall("GET", apiURL.String(), pce, nil, true)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Unmarshal response to struct
json.Unmarshal([]byte(api.RespBody), &workloads)
}
// Get all labels and create a map
labels, _, err := GetAllLabels(pce)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - get all labels - %s", err)
}
labelMap := make(map[string]Label)
for _, l := range labels {
labelMap[l.Href] = l
}
// Update the workloads array to label values and keys (not just HREFs)
for _, w := range workloads {
for _, l := range w.Labels {
*l = labelMap[l.Href]
}
}
return workloads, api, nil
}
// CreateWorkload creates a new unmanaged workload in the Illumio PCE
func CreateWorkload(pce PCE, workload Workload) (Workload, APIResponse, error) {
var newWL Workload
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
api, err = apicall("POST", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Marshal JSON
json.Unmarshal([]byte(api.RespBody), &newWL)
return newWL, api, nil
}
// UpdateWorkload updates an existing workload in the Illumio PCE
// The provided workload struct must include an Href.
// Properties that cannot be included in the PUT method will be ignored.
func UpdateWorkload(pce PCE, workload Workload) (APIResponse, error) {
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2" + workload.Href)
if err != nil |
workload.SanitizePut()
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
api, err = apicall("PUT", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
return api, nil
}
// UpdateLabel updates a workload struct with new label href.
// It does not call the Illumio API. To reflect the change in your PCE,
// you'd use UpdateLabel method on the workload struct and then use the UpdateWorkload function
func (w *Workload) UpdateLabel(pce PCE, key, value string) error {
var updatedLabels []*Label
for _, l := range w.Labels {
x, _, err := GetLabelbyHref(pce, l.Href)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
if x.Key == key {
// Get our new label's href
newLabel, _, err := GetLabelbyKeyValue(pce, key, value)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
// Create the label if it doesn't exist
if newLabel.Href == "" {
createdLabel, _, err := CreateLabel(pce, Label{Key: key, Value: value})
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
updatedLabels = append(updatedLabels, &Label{Href: createdLabel.Href})
// If the new label does exist, add it to the slice
} else {
updatedLabels = append(updatedLabels, &Label{Href: newLabel.Href})
}
} else {
updatedLabels = append(updatedLabels, &Label{Href: l.Href})
}
w.Labels = updatedLabels
}
return nil
}
// BulkWorkload takes a bulk action on an array of workloads.
// Method must be create, update, or delete
func BulkWorkload(pce PCE, workloads []Workload, method string) ([]APIResponse, error) {
var apiResps []APIResponse
var err error
// Check on method
method = strings.ToLower(method)
if method != "create" && method != "update" && method != "delete" {
return apiResps, errors.New("bulk workload error - method must be create, update, or delete")
}
// Sanitize update
if method == "update" {
sanitizedWLs := []Workload{}
for _, workload := range workloads {
workload.SanitizeBulkUpdate()
sanitizedWLs = append(sanitizedWLs, workload)
}
workloads = sanitizedWLs
}
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads/bulk_" + method)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
// If the method is delete, we can only send Hrefs
if method == "delete" {
hrefWorkloads := []Workload{}
for _, workload := range workloads {
hrefWorkloads = append(hrefWorkloads, Workload{Href: workload.Href})
}
// Re-assign workloads to just the HREF
workloads = hrefWorkloads
}
// Figure out how many API calls we need to make
numAPICalls := int(math.Ceil(float64(len(workloads)) / 1000))
// Build the array to be passed to the API
apiArrays := [][]Workload{}
for i := 0; i < numAPICalls; i++ {
// Get 1,000 elements if this is not the last array
if (i + 1) != numAPICalls {
apiArrays = append(apiArrays, workloads[i*1000:(1+i)*1000])
// Get the rest on the last array
} else {
apiArrays = append(apiArrays, workloads[i*1000:])
}
}
// Call the API for each array
for _, apiArray := range apiArrays {
workloadsJSON, err := json.Marshal(apiArray)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
// Uncomment this line if you want to print the JSON object
// fmt.Println(string(workloadsJSON))
api, err := apicall("PUT", apiURL.String(), pce, workloadsJSON, false)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
apiResps = append(apiResps, api)
}
return apiResps, nil
}
// SanitizeBulkUpdate removes the properites necessary for a bulk update
func (w *Workload) SanitizeBulkUpdate() {
// All Workloads
w.CreatedAt = ""
w.CreatedBy = nil
w.DeleteType = ""
w.Deleted = nil
w.DeletedAt = ""
w.DeletedBy = nil
w.UpdatedAt = ""
w.UpdatedBy = nil
// Managed workloads
if w.Agent != nil && w.Agent.Status != nil {
w.Hostname = ""
w.Interfaces = nil
w.Online = false
w.OsDetail = ""
w.OsID = ""
w.PublicIP = ""
w.Agent.Status = nil
w.Services = nil
w.Online = false
}
// Replace Labels with Hrefs
newLabels := []*Label{}
for _, l := range w.Labels {
newLabel := Label{Href: l.Href}
newLabels = append(newLabels, &newLabel)
}
w.Labels = newLabels
}
// SanitizePut removes the necessary properties to update an unmanaged and managed workload
func (w *Workload) SanitizePut() {
w.SanitizeBulkUpdate()
w.Href = ""
}
| {
return api, fmt.Errorf("update workload - %s", err)
} | conditional_block |
workloads.go | package illumioapi
import (
"encoding/json"
"errors"
"fmt"
"math"
"net/url"
"strconv"
"strings"
)
// An Agent is an Agent on a Workload
type Agent struct {
ActivePceFqdn string `json:"active_pce_fqdn,omitempty"`
Config *Config `json:"config,omitempty"`
Href string `json:"href,omitempty"`
SecureConnect *SecureConnect `json:"secure_connect,omitempty"`
Status *Status `json:"status,omitempty"`
TargetPceFqdn string `json:"target_pce_fqdn,omitempty"`
}
// AgentHealth represents the Agent Health of the Status of a Workload
type AgentHealth struct {
AuditEvent string `json:"audit_event,omitempty"`
Severity string `json:"severity,omitempty"`
Type string `json:"type,omitempty"`
}
// AgentHealthErrors represents the Agent Health Errors of the Status of a Workload
// This is depreciated - use AgentHealth
type AgentHealthErrors struct {
Errors []string `json:"errors,omitempty"`
Warnings []string `json:"warnings,omitempty"`
}
// Config represents the Configuration of an Agent on a Workload
type Config struct {
LogTraffic bool `json:"log_traffic,omitempty"`
Mode string `json:"mode,omitempty"`
SecurityPolicyUpdateMode string `json:"security_policy_update_mode,omitempty"`
}
// DeletedBy represents the Deleted By property of an object
type DeletedBy struct {
Href string `json:"href,omitempty"`
}
// An Interface represent the Interfaces of a Workload
type Interface struct {
Address string `json:"address,omitempty"`
CidrBlock int `json:"cidr_block,omitempty"`
DefaultGatewayAddress string `json:"default_gateway_address,omitempty"`
FriendlyName string `json:"friendly_name,omitempty"`
LinkState string `json:"link_state,omitempty"`
Name string `json:"name,omitempty"`
}
// OpenServicePorts represents open ports for a service running on a workload
type OpenServicePorts struct {
Address string `json:"address,omitempty"`
Package string `json:"package,omitempty"`
Port int `json:"port,omitempty"`
ProcessName string `json:"process_name,omitempty"`
Protocol int `json:"protocol,omitempty"`
User string `json:"user,omitempty"`
WinServiceName string `json:"win_service_name,omitempty"`
}
// A Workload represents a workload in the PCE
type Workload struct {
Agent *Agent `json:"agent,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
CreatedBy *CreatedBy `json:"created_by,omitempty"`
DataCenter string `json:"data_center,omitempty"`
DataCenterZone string `json:"data_center_zone,omitempty"`
DeleteType string `json:"delete_type,omitempty"`
Deleted *bool `json:"deleted,omitempty"`
DeletedAt string `json:"deleted_at,omitempty"`
DeletedBy *DeletedBy `json:"deleted_by,omitempty"`
Description string `json:"description,omitempty"`
ExternalDataReference string `json:"external_data_reference,omitempty"`
ExternalDataSet string `json:"external_data_set,omitempty"`
Hostname string `json:"hostname,omitempty"`
Href string `json:"href,omitempty"`
Interfaces []*Interface `json:"interfaces,omitempty"`
Labels []*Label `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Online bool `json:"online,omitempty"`
OsDetail string `json:"os_detail,omitempty"`
OsID string `json:"os_id,omitempty"`
PublicIP string `json:"public_ip,omitempty"`
ServicePrincipalName string `json:"service_principal_name,omitempty"`
ServiceProvider string `json:"service_provider,omitempty"`
Services []*Services `json:"services,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
UpdatedBy *UpdatedBy `json:"updated_by,omitempty"`
}
// SecureConnect represents SecureConnect for an Agent on a Workload
type SecureConnect struct {
MatchingIssuerName string `json:"matching_issuer_name,omitempty"`
}
// Services represent the Services running on a Workload
type Services struct {
CreatedAt string `json:"created_at,omitempty"`
OpenServicePorts []*OpenServicePorts `json:"open_service_ports,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// Status represents the Status of an Agent on a Workload
type Status struct {
AgentHealth []*AgentHealth `json:"agent_health,omitempty"`
AgentHealthErrors *AgentHealthErrors `json:"agent_health_errors,omitempty"`
AgentVersion string `json:"agent_version,omitempty"`
FirewallRuleCount int `json:"firewall_rule_count,omitempty"`
FwConfigCurrent bool `json:"fw_config_current,omitempty"`
LastHeartbeatOn string `json:"last_heartbeat_on,omitempty"`
ManagedSince string `json:"managed_since,omitempty"`
SecurityPolicyAppliedAt string `json:"security_policy_applied_at,omitempty"`
SecurityPolicyReceivedAt string `json:"security_policy_received_at,omitempty"`
SecurityPolicyRefreshAt string `json:"security_policy_refresh_at,omitempty"`
SecurityPolicySyncState string `json:"security_policy_sync_state,omitempty"`
UID string `json:"uid,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// GetAllWorkloads returns an slice of workloads in the Illumio PCE.
// The first API call to the PCE does not use the async option.
// If the array length is >=500, it re-runs with async.
func GetAllWorkloads(pce PCE) ([]Workload, APIResponse, error) {
var workloads []Workload
var api APIResponse
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Call the API
api, err = apicall("GET", apiURL.String(), pce, nil, false)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
json.Unmarshal([]byte(api.RespBody), &workloads)
// If length is 500, re-run with async
if len(workloads) >= 500 {
api, err = apicall("GET", apiURL.String(), pce, nil, true)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Unmarshal response to struct
json.Unmarshal([]byte(api.RespBody), &workloads)
}
// Get all labels and create a map
labels, _, err := GetAllLabels(pce)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - get all labels - %s", err)
}
labelMap := make(map[string]Label)
for _, l := range labels {
labelMap[l.Href] = l
}
// Update the workloads array to label values and keys (not just HREFs)
for _, w := range workloads {
for _, l := range w.Labels {
*l = labelMap[l.Href]
}
}
return workloads, api, nil
}
// CreateWorkload creates a new unmanaged workload in the Illumio PCE
func CreateWorkload(pce PCE, workload Workload) (Workload, APIResponse, error) {
var newWL Workload
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
api, err = apicall("POST", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Marshal JSON
json.Unmarshal([]byte(api.RespBody), &newWL)
return newWL, api, nil
}
// UpdateWorkload updates an existing workload in the Illumio PCE
// The provided workload struct must include an Href.
// Properties that cannot be included in the PUT method will be ignored.
func UpdateWorkload(pce PCE, workload Workload) (APIResponse, error) {
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2" + workload.Href)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
workload.SanitizePut()
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
api, err = apicall("PUT", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
return api, nil
}
// UpdateLabel updates a workload struct with new label href.
// It does not call the Illumio API. To reflect the change in your PCE,
// you'd use UpdateLabel method on the workload struct and then use the UpdateWorkload function
func (w *Workload) UpdateLabel(pce PCE, key, value string) error {
var updatedLabels []*Label
for _, l := range w.Labels {
x, _, err := GetLabelbyHref(pce, l.Href)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
if x.Key == key {
// Get our new label's href
newLabel, _, err := GetLabelbyKeyValue(pce, key, value)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
// Create the label if it doesn't exist
if newLabel.Href == "" {
createdLabel, _, err := CreateLabel(pce, Label{Key: key, Value: value})
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
updatedLabels = append(updatedLabels, &Label{Href: createdLabel.Href})
// If the new label does exist, add it to the slice
} else {
updatedLabels = append(updatedLabels, &Label{Href: newLabel.Href})
}
} else {
updatedLabels = append(updatedLabels, &Label{Href: l.Href})
}
w.Labels = updatedLabels
}
return nil
}
// BulkWorkload takes a bulk action on an array of workloads.
// Method must be create, update, or delete
func BulkWorkload(pce PCE, workloads []Workload, method string) ([]APIResponse, error) {
var apiResps []APIResponse
var err error
// Check on method
method = strings.ToLower(method)
if method != "create" && method != "update" && method != "delete" {
return apiResps, errors.New("bulk workload error - method must be create, update, or delete")
}
// Sanitize update
if method == "update" {
sanitizedWLs := []Workload{}
for _, workload := range workloads {
workload.SanitizeBulkUpdate()
sanitizedWLs = append(sanitizedWLs, workload)
}
workloads = sanitizedWLs
}
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads/bulk_" + method)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
// If the method is delete, we can only send Hrefs
if method == "delete" {
hrefWorkloads := []Workload{}
for _, workload := range workloads {
hrefWorkloads = append(hrefWorkloads, Workload{Href: workload.Href})
}
// Re-assign workloads to just the HREF
workloads = hrefWorkloads
}
// Figure out how many API calls we need to make
numAPICalls := int(math.Ceil(float64(len(workloads)) / 1000))
// Build the array to be passed to the API
apiArrays := [][]Workload{}
for i := 0; i < numAPICalls; i++ {
// Get 1,000 elements if this is not the last array
if (i + 1) != numAPICalls {
apiArrays = append(apiArrays, workloads[i*1000:(1+i)*1000])
// Get the rest on the last array
} else {
apiArrays = append(apiArrays, workloads[i*1000:])
}
}
// Call the API for each array
for _, apiArray := range apiArrays {
workloadsJSON, err := json.Marshal(apiArray)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
// Uncomment this line if you want to print the JSON object
// fmt.Println(string(workloadsJSON))
api, err := apicall("PUT", apiURL.String(), pce, workloadsJSON, false)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
apiResps = append(apiResps, api)
}
return apiResps, nil
}
// SanitizeBulkUpdate removes the properites necessary for a bulk update
func (w *Workload) SanitizeBulkUpdate() {
// All Workloads
w.CreatedAt = ""
w.CreatedBy = nil
w.DeleteType = ""
w.Deleted = nil
w.DeletedAt = ""
w.DeletedBy = nil
w.UpdatedAt = ""
w.UpdatedBy = nil
// Managed workloads
if w.Agent != nil && w.Agent.Status != nil {
w.Hostname = ""
w.Interfaces = nil
w.Online = false
w.OsDetail = ""
w.OsID = ""
w.PublicIP = ""
w.Agent.Status = nil
w.Services = nil
w.Online = false
}
// Replace Labels with Hrefs
newLabels := []*Label{}
for _, l := range w.Labels {
newLabel := Label{Href: l.Href}
newLabels = append(newLabels, &newLabel)
}
w.Labels = newLabels
}
// SanitizePut removes the necessary properties to update an unmanaged and managed workload
func (w *Workload) SanitizePut() | {
w.SanitizeBulkUpdate()
w.Href = ""
} | identifier_body | |
workloads.go | package illumioapi
import (
"encoding/json"
"errors"
"fmt"
"math"
"net/url"
"strconv"
"strings"
)
// An Agent is an Agent on a Workload
type Agent struct {
ActivePceFqdn string `json:"active_pce_fqdn,omitempty"`
Config *Config `json:"config,omitempty"`
Href string `json:"href,omitempty"`
SecureConnect *SecureConnect `json:"secure_connect,omitempty"`
Status *Status `json:"status,omitempty"`
TargetPceFqdn string `json:"target_pce_fqdn,omitempty"`
}
// AgentHealth represents the Agent Health of the Status of a Workload
type AgentHealth struct {
AuditEvent string `json:"audit_event,omitempty"`
Severity string `json:"severity,omitempty"`
Type string `json:"type,omitempty"`
}
// AgentHealthErrors represents the Agent Health Errors of the Status of a Workload
// This is depreciated - use AgentHealth
type AgentHealthErrors struct {
Errors []string `json:"errors,omitempty"`
Warnings []string `json:"warnings,omitempty"`
}
// Config represents the Configuration of an Agent on a Workload
type Config struct {
LogTraffic bool `json:"log_traffic,omitempty"`
Mode string `json:"mode,omitempty"`
SecurityPolicyUpdateMode string `json:"security_policy_update_mode,omitempty"`
}
// DeletedBy represents the Deleted By property of an object
type DeletedBy struct {
Href string `json:"href,omitempty"`
}
// An Interface represent the Interfaces of a Workload
type Interface struct {
Address string `json:"address,omitempty"`
CidrBlock int `json:"cidr_block,omitempty"`
DefaultGatewayAddress string `json:"default_gateway_address,omitempty"`
FriendlyName string `json:"friendly_name,omitempty"`
LinkState string `json:"link_state,omitempty"`
Name string `json:"name,omitempty"`
}
// OpenServicePorts represents open ports for a service running on a workload
type OpenServicePorts struct {
Address string `json:"address,omitempty"`
Package string `json:"package,omitempty"`
Port int `json:"port,omitempty"`
ProcessName string `json:"process_name,omitempty"`
Protocol int `json:"protocol,omitempty"`
User string `json:"user,omitempty"`
WinServiceName string `json:"win_service_name,omitempty"`
}
// A Workload represents a workload in the PCE
type Workload struct {
Agent *Agent `json:"agent,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
CreatedBy *CreatedBy `json:"created_by,omitempty"`
DataCenter string `json:"data_center,omitempty"`
DataCenterZone string `json:"data_center_zone,omitempty"`
DeleteType string `json:"delete_type,omitempty"`
Deleted *bool `json:"deleted,omitempty"`
DeletedAt string `json:"deleted_at,omitempty"`
DeletedBy *DeletedBy `json:"deleted_by,omitempty"`
Description string `json:"description,omitempty"`
ExternalDataReference string `json:"external_data_reference,omitempty"`
ExternalDataSet string `json:"external_data_set,omitempty"`
Hostname string `json:"hostname,omitempty"`
Href string `json:"href,omitempty"`
Interfaces []*Interface `json:"interfaces,omitempty"`
Labels []*Label `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Online bool `json:"online,omitempty"`
OsDetail string `json:"os_detail,omitempty"`
OsID string `json:"os_id,omitempty"`
PublicIP string `json:"public_ip,omitempty"`
ServicePrincipalName string `json:"service_principal_name,omitempty"`
ServiceProvider string `json:"service_provider,omitempty"`
Services []*Services `json:"services,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
UpdatedBy *UpdatedBy `json:"updated_by,omitempty"`
}
// SecureConnect represents SecureConnect for an Agent on a Workload
type SecureConnect struct {
MatchingIssuerName string `json:"matching_issuer_name,omitempty"`
}
// Services represent the Services running on a Workload
type Services struct {
CreatedAt string `json:"created_at,omitempty"`
OpenServicePorts []*OpenServicePorts `json:"open_service_ports,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// Status represents the Status of an Agent on a Workload
type Status struct {
AgentHealth []*AgentHealth `json:"agent_health,omitempty"`
AgentHealthErrors *AgentHealthErrors `json:"agent_health_errors,omitempty"`
AgentVersion string `json:"agent_version,omitempty"`
FirewallRuleCount int `json:"firewall_rule_count,omitempty"`
FwConfigCurrent bool `json:"fw_config_current,omitempty"`
LastHeartbeatOn string `json:"last_heartbeat_on,omitempty"`
ManagedSince string `json:"managed_since,omitempty"`
SecurityPolicyAppliedAt string `json:"security_policy_applied_at,omitempty"`
SecurityPolicyReceivedAt string `json:"security_policy_received_at,omitempty"`
SecurityPolicyRefreshAt string `json:"security_policy_refresh_at,omitempty"`
SecurityPolicySyncState string `json:"security_policy_sync_state,omitempty"`
UID string `json:"uid,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// GetAllWorkloads returns an slice of workloads in the Illumio PCE.
// The first API call to the PCE does not use the async option.
// If the array length is >=500, it re-runs with async.
func GetAllWorkloads(pce PCE) ([]Workload, APIResponse, error) {
var workloads []Workload
var api APIResponse
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Call the API
api, err = apicall("GET", apiURL.String(), pce, nil, false)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
json.Unmarshal([]byte(api.RespBody), &workloads)
// If length is 500, re-run with async
if len(workloads) >= 500 {
api, err = apicall("GET", apiURL.String(), pce, nil, true)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Unmarshal response to struct
json.Unmarshal([]byte(api.RespBody), &workloads)
}
// Get all labels and create a map
labels, _, err := GetAllLabels(pce)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - get all labels - %s", err)
}
labelMap := make(map[string]Label)
for _, l := range labels {
labelMap[l.Href] = l
}
// Update the workloads array to label values and keys (not just HREFs)
for _, w := range workloads {
for _, l := range w.Labels {
*l = labelMap[l.Href]
}
}
return workloads, api, nil
}
// CreateWorkload creates a new unmanaged workload in the Illumio PCE
func | (pce PCE, workload Workload) (Workload, APIResponse, error) {
var newWL Workload
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
api, err = apicall("POST", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Marshal JSON
json.Unmarshal([]byte(api.RespBody), &newWL)
return newWL, api, nil
}
// UpdateWorkload updates an existing workload in the Illumio PCE
// The provided workload struct must include an Href.
// Properties that cannot be included in the PUT method will be ignored.
func UpdateWorkload(pce PCE, workload Workload) (APIResponse, error) {
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2" + workload.Href)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
workload.SanitizePut()
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
api, err = apicall("PUT", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
return api, nil
}
// UpdateLabel updates a workload struct with new label href.
// It does not call the Illumio API. To reflect the change in your PCE,
// you'd use UpdateLabel method on the workload struct and then use the UpdateWorkload function
func (w *Workload) UpdateLabel(pce PCE, key, value string) error {
var updatedLabels []*Label
for _, l := range w.Labels {
x, _, err := GetLabelbyHref(pce, l.Href)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
if x.Key == key {
// Get our new label's href
newLabel, _, err := GetLabelbyKeyValue(pce, key, value)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
// Create the label if it doesn't exist
if newLabel.Href == "" {
createdLabel, _, err := CreateLabel(pce, Label{Key: key, Value: value})
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
updatedLabels = append(updatedLabels, &Label{Href: createdLabel.Href})
// If the new label does exist, add it to the slice
} else {
updatedLabels = append(updatedLabels, &Label{Href: newLabel.Href})
}
} else {
updatedLabels = append(updatedLabels, &Label{Href: l.Href})
}
w.Labels = updatedLabels
}
return nil
}
// BulkWorkload takes a bulk action on an array of workloads.
// Method must be create, update, or delete
func BulkWorkload(pce PCE, workloads []Workload, method string) ([]APIResponse, error) {
var apiResps []APIResponse
var err error
// Check on method
method = strings.ToLower(method)
if method != "create" && method != "update" && method != "delete" {
return apiResps, errors.New("bulk workload error - method must be create, update, or delete")
}
// Sanitize update
if method == "update" {
sanitizedWLs := []Workload{}
for _, workload := range workloads {
workload.SanitizeBulkUpdate()
sanitizedWLs = append(sanitizedWLs, workload)
}
workloads = sanitizedWLs
}
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads/bulk_" + method)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
// If the method is delete, we can only send Hrefs
if method == "delete" {
hrefWorkloads := []Workload{}
for _, workload := range workloads {
hrefWorkloads = append(hrefWorkloads, Workload{Href: workload.Href})
}
// Re-assign workloads to just the HREF
workloads = hrefWorkloads
}
// Figure out how many API calls we need to make
numAPICalls := int(math.Ceil(float64(len(workloads)) / 1000))
// Build the array to be passed to the API
apiArrays := [][]Workload{}
for i := 0; i < numAPICalls; i++ {
// Get 1,000 elements if this is not the last array
if (i + 1) != numAPICalls {
apiArrays = append(apiArrays, workloads[i*1000:(1+i)*1000])
// Get the rest on the last array
} else {
apiArrays = append(apiArrays, workloads[i*1000:])
}
}
// Call the API for each array
for _, apiArray := range apiArrays {
workloadsJSON, err := json.Marshal(apiArray)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
// Uncomment this line if you want to print the JSON object
// fmt.Println(string(workloadsJSON))
api, err := apicall("PUT", apiURL.String(), pce, workloadsJSON, false)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
apiResps = append(apiResps, api)
}
return apiResps, nil
}
// SanitizeBulkUpdate removes the properites necessary for a bulk update
func (w *Workload) SanitizeBulkUpdate() {
// All Workloads
w.CreatedAt = ""
w.CreatedBy = nil
w.DeleteType = ""
w.Deleted = nil
w.DeletedAt = ""
w.DeletedBy = nil
w.UpdatedAt = ""
w.UpdatedBy = nil
// Managed workloads
if w.Agent != nil && w.Agent.Status != nil {
w.Hostname = ""
w.Interfaces = nil
w.Online = false
w.OsDetail = ""
w.OsID = ""
w.PublicIP = ""
w.Agent.Status = nil
w.Services = nil
w.Online = false
}
// Replace Labels with Hrefs
newLabels := []*Label{}
for _, l := range w.Labels {
newLabel := Label{Href: l.Href}
newLabels = append(newLabels, &newLabel)
}
w.Labels = newLabels
}
// SanitizePut removes the necessary properties to update an unmanaged and managed workload
func (w *Workload) SanitizePut() {
w.SanitizeBulkUpdate()
w.Href = ""
}
| CreateWorkload | identifier_name |
Context.js | import React, { Component } from 'react';
import parse from 'html-react-parser';
import { Link } from 'react-router-dom';
import SiteData from './assets/_data/_data';
import CatData from './assets/_data/_data-categories';
import SortFilterRangeData from './assets/_data/_data-filter-sort';
import { getDateToday, getExcerpt, apiGetItems, ConsoleLog, StripOpeningSlash } from './assets/js/Helpers';
const slugify = require('slugify');
const ItemContext = React.createContext();
//
export default class | extends Component {
state = {
dateToday: getDateToday(),
siteData: SiteData,
catData: CatData,
items: [],
filterIsActive: false,
categoryName: null,
categoryNameDefault: 'Live',
categoryArr: {},
brandArr: [],
sortedItems: [],
featuredItems: [],
featuredItemsArchive: [],
loading: true,
brand: 'all',
price: 0,
minPrice: 0,
maxPrice: 0,
maxPriceInit: 0,
minYear: 0,
minYearInit: 0,
maxYear: 0,
maxYearInit: 0,
priceRangeArr: [],
sortBy: 'DateDesc',
sortByArr: {},
sortRangeArr: []
};
/////////////////////////////////////////////////////////////////////////// GET data (homepage)
// Get data from API to show on the homepage
getData = async (getPageName) => {
if (!getPageName) return;
try {
this.setState({ loading: true });
const data = await fetch(CatData[this.state.categoryNameDefault].apiFeatured).then((data) => data.json());
const dataArchive = await fetch(CatData.Archive.apiFeatured).then((dataArchive) => dataArchive.json());
const dataOther = await fetch(CatData.General.apiFeatured).then((dataOther) => dataOther.json());
let items = this.formatData(data);
let itemsArchive = this.formatData(dataArchive);
let itemsOther = this.formatData(dataOther);
// ConsoleLog("[Context] getData > items..." + items);
//////////////
// FEATURED //
//////////////
// Featured items [Live]
let featuredItems = items.slice(0, SiteData.featuredItems.itemCount); // get first # items from main array
// Featured items [Archive]
let featuredItemsArchive = itemsArchive.slice(0, SiteData.featuredItems.itemCount);
let featuredItemsVideos = itemsOther.slice(2, 4);
let featuredItemsTestimonials = itemsOther.slice(0, 2);
ConsoleLog('[Context] featuredItemsVideos: ' + featuredItemsVideos);
///////////////
// SET STATE //
///////////////
this.setState({
featuredItems,
featuredItemsArchive,
featuredItemsVideos,
featuredItemsTestimonials,
loading: false
});
} catch (error) {
console.log('[Context] getData > error: ' + error);
}
};
// (END) getData
/////////////////////////////////////////////////////////////////////////// GET data (items)
// Get data from API to show on the items page(s)
// accept category and status parameter to determine api call
getDataItems = async (getCategoryName, getBrandSlug) => {
ConsoleLog('[Context] getDataItems() > getCategoryName: ' + getCategoryName);
ConsoleLog('[Context] getDataItems() > getBrandSlug: ' + getBrandSlug);
try {
this.setState({ loading: true });
let apiArr = {
base: CatData[getCategoryName].api,
brandName: getBrandSlug !== "all" ? getBrandSlug : null
}
const data = await fetch(apiGetItems(apiArr), {
method: 'GET'
}).then((data) => data.json());
const categoryName = getCategoryName; // ? getCategoryName : null;
const statusId = categoryName === 'Archive' ? 2 : 1;
const brandSlug = getBrandSlug ? getBrandSlug : null; // status determines Live or Archive
let allItems = this.formatData(data);
let items = allItems;
let sortedItems = [];
let brand = null;
let subcategoryArr = {};
if (brandSlug) {
subcategoryArr = allItems.find((x) => x.subcategoryArr.slug === brandSlug).subcategoryArr;
brand = subcategoryArr.id;
sortedItems = allItems.filter((item) => item.subcategoryArr.slug === brandSlug);
} else {
sortedItems = allItems;
}
////////////
// FILTER // properties based on items
////////////
// Price (2 dropdowns)
let minPrice = 0; //Math.min(...items.map(item => item.price));
let maxPrice = Math.max(...items.map((item) => item.price));
const maxPriceInit = Math.round((maxPrice / 100000).toFixed() * 100000);
const priceRangeArr = SiteData.priceRangeArr; // [0, 5000, 10000, ...]
// Year (2 numeric inputs)
let minYear = Math.min(...items.map((item) => item.year));
let maxYear = Math.max(...items.map((item) => item.year));
const minYearInit = minYear;
const maxYearInit = maxYear;
// Brand (dropdown)
const brandArr = this.setBrandArr(items);
const categoryArr = this.getCategoryArr(categoryName, statusId);
// setDocumentTitle(categoryArr.title);
//////////
// SORT // options based on items page type
//////////
// Sort (dropdown)
let sortRangeArr = [];
sortRangeArr.push(SortFilterRangeData.DateDesc);
sortRangeArr.push(SortFilterRangeData.DateAsc);
// CONDITION: Only show price option for Live pages
if (categoryName === 'Live') {
sortRangeArr.push(SortFilterRangeData.PriceDesc);
sortRangeArr.push(SortFilterRangeData.PriceAsc);
}
// CONDITION: Only show Years option for Live/Archive pages
sortRangeArr.push(SortFilterRangeData.YearDesc);
sortRangeArr.push(SortFilterRangeData.YearAsc);
// ConsoleLog("[Context] sortRangeArr..." + sortRangeArr);
const sortByArr = sortRangeArr[0];
const sortBy = sortByArr.name;
///////////////
// SET STATE //
///////////////
this.setState({
items,
categoryName,
categoryArr,
subcategoryArr,
brand,
brandArr,
sortedItems,
loading: false,
price: maxPrice,
minPrice,
maxPrice: maxPriceInit,
maxPriceInit,
minYear,
minYearInit,
maxYear,
maxYearInit,
priceRangeArr,
sortBy,
sortByArr,
sortRangeArr
});
} catch (error) {
console.log('[Context] getDataItems() > error...');
console.log(error);
}
};
// (END) getDataItems
componentDidMount() {
// ConsoleLog(
// "[Context] componentDidMount()... category = " +
// this.state.categoryName
// );
}
componentDidUpdate() {
ConsoleLog(
'[Context] componentDidUpdate() > categoryName: '
+ this.state.categoryName
+ ' | brand = ' + this.state.brand
);
}
/////////////////////////////////////////////////////////////////////////// SET brand array (items)
setBrandArr = (myObj) => {
// console.log('>>>', myObj[0].categoryArr);
const categorySlugBase = myObj[0].categoryArr.slug ? StripOpeningSlash(myObj[0].categoryArr.slug) : null;//get the base slug from first (any) item
const myArr = { list: myObj }; //put obj array into list for flatMap
const myUniqueBrandList = myArr.list
.flatMap((obj) => obj.subcategoryArr)
.filter((e, i, a) => a.findIndex(({ id, brand }) => id === e.id && brand === e.brand) === i);
// SORT alphabetically [A-Z]
myUniqueBrandList.sort(function(a, b) {
var nameA = a.brand.toLowerCase(),
nameB = b.brand.toLowerCase();
if (nameA < nameB)
//sort string ascending
return -1;
if (nameA > nameB) return 1;
return 0; //default return value (no sorting)
});
// COUNT items in subcategory
let myUniqueBrandListWithCount = this.countItemsInBrand(myUniqueBrandList, myObj);
myUniqueBrandListWithCount = [
{ id: 'all', brand: 'ALL', slug: categorySlugBase, itemCount: myObj.length },
...myUniqueBrandListWithCount
];
// console.log('[Context] myUniqueBrandList...', myUniqueBrandListWithCount);
return myUniqueBrandListWithCount;
};
/////////////////////////////////////////////////////////////////////////// COUNT items in each brand
countItemsInBrand(getBrandArr, getItemsArr) {
for (let i = 0; i < getBrandArr.length; i++) {
var tmp = getItemsArr.filter((item) => item.subcategoryArr.id === getBrandArr[i].id).length;
getBrandArr[i].itemCount = tmp;
}
return getBrandArr;
}
/////////////////////////////////////////////////////////////////////////// SORT used by filter
// REF: https://stackoverflow.com/questions/6913512/how-to-sort-an-array-of-objects-by-multiple-fields
fieldSorter = (fields) => {
return function(a, b) {
return fields
.map(function(o) {
var dir = 1;
if (o[0] === '-') {
dir = -1;
o = o.substring(1);
}
if (a[o] > b[o]) return dir;
if (a[o] < b[o]) return -dir;
return 0;
})
.reduce(function firstNonZeroValue(p, n) {
return p ? p : n;
}, 0);
};
};
/////////////////////////////////////////////////////////////////////////// FORMAT item data
formatData(getItemsData) {
let tempItems = getItemsData.map((dataItem) => {
let id = dataItem.id;
let name = parse(dataItem.name).toString();
let slug = this.generateSlugFromName(name);// 2do - get from API (dataItem.slug)
let status = dataItem.status;
let category = dataItem.category;
let categoryArr = this.getCategoryArr(category, dataItem.status);
let subcategoryArr = dataItem.catalogue_subcat;
if(!dataItem.catalogue_subcat.slug) dataItem.catalogue_subcat.slug = this.generateSlugFromName(dataItem.catalogue_subcat.brand);//if subcat slug is not defined
let price = dataItem.price;
let price_details = dataItem.price_details;
let source = dataItem.source !== '0' ? dataItem.source : null;
let brand = dataItem.brand;
let year = dataItem.year;
let date = dataItem.createdAt;
let excerpt = dataItem.excerpt ? getExcerpt(dataItem.excerpt) : '...';
let imageFilename = dataItem.image;
let image = dataItem.id > 801 ? `${process.env.REACT_APP_IMG_DIR}${imageFilename}` : `${process.env.REACT_APP_IMG_DIR_LARGE}${imageFilename}`;//show larger images for old items
let imageDir = dataItem.imageDir;
let imageHi = parseInt(dataItem.imageHi) === 1 ? true : false;
if(imageDir){//show images from new imageDir
image = `${process.env.REACT_APP_IMG_DDIR}${imageDir}/lg/${imageFilename}`;
// year += '.';//2do - remove this when all migration is complete
ConsoleLog(`[Context.js] IMG > ${id} > ${imageDir} + > ${image}`);
}else{
ConsoleLog(`[Context.js] IMG > ${id} >--- F ---> ${image}`);
}
let item = {
id,
status,
category,
categoryArr,
subcategoryArr,
name,
slug,
brand,
price,
price_details,
source,
year,
image,
imageDir,
imageHi,
imageFilename,
excerpt,
date
};
return item;
});
return tempItems;
}
/////////////////////////////////////////////////////////////////////////// FORMAT price
formatPrice = (price, status) => {
if (status === 2) return 'Sold';
if (status === 0) return 'This vehicle is no longer available';
if (price === 0) return '£0';
return price ? '£' + price.toString().replace(/(\d)(?=(\d{3})+(?!\d))/g, '$1,') : null;
};
/////////////////////////////////////////////////////////////////////////// FORMAT description
formatDescription = (str) => {
if (!str) return '';
var strPrep = str.replace(/\\/g, '');
var strParsed = parse(strPrep);
return strParsed;
};
/////////////////////////////////////////////////////////////////////////// FORMAT category link
// get slug from CatData based on categoryName
formatCategoryLink = (getCategoryName, getItemStatus) => {
let itemCategoryName = getCategoryName ? getCategoryName : this.state.categoryNameDefault;
const tmpCategoryArr = this.getCategoryArr(getCategoryName, getItemStatus);
if(tmpCategoryArr.slug) return tmpCategoryArr.slug;
// ConsoleLog(
// "[Context] formatCategoryLink > getCategoryName..." +
// getCategoryName + ", getItemStatus: " + getItemStatus
// );
return CatData[itemCategoryName].slug;
};
/////////////////////////////////////////////////////////////////////////// GET category link tag
// get slug from CatData based on categoryName
getCategoryLinkTag = (getCategoryArr) => {
ConsoleLog('[Context] getCategoryLinkTag...' + getCategoryArr);
return getCategoryArr.slug ? (
<Link to={getCategoryArr.slug} className="link-category">
{getCategoryArr.title}
</Link>
) : null;
};
/////////////////////////////////////////////////////////////////////////// FORMAT brand link
// get slug from CatData based on categoryName
formatBrandLink = ( getArr ) => {
ConsoleLog('[Context] formatBrandLink() | categoryName: ' + getArr.categoryName + ' | status: ' + getArr.status + ' | slug: ' + getArr.slug);
let apprendUrl = '';
if(getArr.categoryName === "Live") apprendUrl = CatData.Live.slugAppendBrand;//'/for-sale';
if(getArr.categoryName === "Archive") apprendUrl = CatData.Archive.slugAppendBrand;//'/sold';
if(getArr.categoryName === "Staff") apprendUrl = CatData.Staff.slugAppendBrand;//'/staff';
let slug = '/' + getArr.slug + apprendUrl;
ConsoleLog('[Context] formatBrandLink() slug: ' + slug + ' | apprendUrl: ' + apprendUrl);
return slug;
};
/////////////////////////////////////////////////////////////////////////// LOAD data (items)
// get all category data from CatData
getCategoryArr = (getCategoryName, getItemStatus) => {
let itemCategoryName = getCategoryName ? getCategoryName : this.state.categoryNameDefault;
if (itemCategoryName === 1) return CatData.General;
if (itemCategoryName === 2 && getItemStatus === 1) return CatData[this.state.categoryNameDefault];
if ((itemCategoryName === 2 || itemCategoryName === 'Archive') && getItemStatus === 2) return CatData.Archive;
if (itemCategoryName === 2 && getItemStatus === 0) return CatData.Archive;
if (itemCategoryName === 3) return CatData.Testimonials;
if (itemCategoryName === 4) return CatData.Press;
if (itemCategoryName === 5) return CatData.News;
if (itemCategoryName === 7) return CatData.PageText;
if (itemCategoryName === 10) return CatData.History;
if (itemCategoryName === 11) return CatData.Restoration;
if (itemCategoryName === 12) return CatData.Staff;
if (itemCategoryName === 13) return CatData.Videos;
// if (itemCategoryName === 2 && getItemStatus === 0) return CatData[this.state.categoryNameDefault];
// ConsoleLog(
// "[Context] getCategoryArr > getCategoryName...",
// getCategoryName + ", getItemStatus: " + getItemStatus
// );
return CatData[itemCategoryName];
};
/////////////////////////////////////////////////////////////////////////// FORMAT item link
// [domain]/item-slug/category-slug/item-id
formatItemLink = (getItem) => {
let { id, name, slug, status, category } = getItem;
// ConsoleLog("[Context] formatItemLink() > item this.state.categoryName: ", this.state.categoryName);
let itemSlug = slug;
if (!slug) itemSlug = slugify(name, { lower: true });
let itemLink = `/${itemSlug}`;
// if (this.state.categoryName && category === CatData[this.state.categoryName].id) {
// ConsoleLog("NO REPEAT CALL", CatData[this.state.categoryName].slug);
// itemLink += this.state.categoryArr.slug;
// } else {
itemLink += `${this.formatCategoryLink(category, status)}`; //this.state.categoryNameDefault
// }
itemLink += `/${id}`;
return itemLink;
};
/////////////////////////////////////////////////////////////////////////// GENERATE slug from name
// sanitize the name so it is URL friendly
// Porsche 911 GT3 3.8 PDK -> porsche-911-gt3-3.8-pdk
// 2do - have this done at the API or CMS (field) to save processing time
generateSlugFromName = (getName) => {
// console.log('[Context] generateSlugFromName: ', getName);
getName = getName ? slugify(getName.toString(), { lower: true }) : 'stock';
return getName;
};
/////////////////////////////////////////////////////////////////////////// STYLE append class
// accept default class (getDefault) ... append with any additional classes (getAppend)
// EXAMPLE: let classControl = styleAppendClass("form-control", "form-control-sm");
styleAppendClass = (getDefault, getAppend) => {
let classArr = [];
classArr.push(getDefault);
if (getAppend) {
classArr.push(getAppend);
}
let allClasses = classArr.join(' ');
return allClasses;
};
/////////////////////////////////////////////////////////////////////////// FILTER items
handleFilterChange = (event) => {
const target = event.target;
const value = target.type === 'checkbox' ? target.checked : target.value;
const name = event.target.name;
// const value = event.target.value;
// ConsoleLog("[Context] handleFilterChange > this is type..." + type);
ConsoleLog('[Context] handleFilterChange > ' + name + ' = ' + value);
// ConsoleLog("[Context] handleFilterChange > this is value..." + value);
this.setState(
{
[name]: value
},
this.filterItems
);
};
/////////////////////////////////////////////////////////////////////////// FILTER
// show/hide filter row
setFilterToggle = () => {
this.setState({ filterIsActive: this.state.filterIsActive ? false : true });
};
// filter items
filterItems = () => {
ConsoleLog('[Context] filterItems...');
let { items, categoryName, brand, minPrice, maxPrice, minYear, maxYear, sortBy, sortRangeArr } = this.state;
// all the items
let sortedItems = [ ...items ];
// filter by brand
if (brand && brand !== 'all') {
// ConsoleLog("[Context] filterItems() > BRAND CHANGED", brand);
sortedItems = sortedItems.filter((item) => item.brand === parseInt(brand));
}
// filter by price and year
sortedItems =
categoryName === 'Live'
? sortedItems
.filter((item) => item.price >= minPrice && item.price <= maxPrice)
.filter((item) => item.year >= minYear && item.year <= maxYear)
: sortedItems;
const sortByArr = sortRangeArr.find((item) => item.name === sortBy);
// ConsoleLog("[Context] filterItems > sortBy: " + sortBy + ", sortByArr: " + sortByArr);
const field = sortByArr.field;
const field2 = sortByArr.field2;
sortedItems.sort(this.fieldSorter([ field, field2 ]));
ConsoleLog('[Context] filterItems > sortedItems...' + sortedItems);
///////////////
// SET STATE //
///////////////
this.setState({
sortedItems
});
};
render() {
return (
<>
<ItemContext.Provider
value={{
...this.state,
getItem: this.getItem,
getData: this.getData,
getDataItems: this.getDataItems,
getCategoryArr: this.getCategoryArr,
formatPrice: this.formatPrice,
formatDescription: this.formatDescription,
formatItemLink: this.formatItemLink,
formatCategoryLink: this.formatCategoryLink,
getCategoryLinkTag: this.getCategoryLinkTag,
setBrandArr: this.setBrandArr,
setFilterToggle: this.setFilterToggle,
fieldSorter: this.fieldSorter,
handleFilterChange: this.handleFilterChange,
styleAppendClass: this.styleAppendClass,
formatBrandLink: this.formatBrandLink,
formatData: this.formatData
}}
>
{this.props.children}
</ItemContext.Provider>
</>
);
}
}
const ItemConsumer = ItemContext.Consumer;
export function withItemConsumer(Component) {
return function ConsumerWrapper(props) {
return <ItemConsumer>{(value) => <Component {...props} context={value} />}</ItemConsumer>;
};
}
export { ItemProvider, ItemConsumer, ItemContext };
| ItemProvider | identifier_name |
Context.js | import React, { Component } from 'react';
import parse from 'html-react-parser';
import { Link } from 'react-router-dom';
import SiteData from './assets/_data/_data';
import CatData from './assets/_data/_data-categories';
import SortFilterRangeData from './assets/_data/_data-filter-sort';
import { getDateToday, getExcerpt, apiGetItems, ConsoleLog, StripOpeningSlash } from './assets/js/Helpers';
const slugify = require('slugify');
const ItemContext = React.createContext();
//
export default class ItemProvider extends Component {
state = {
dateToday: getDateToday(),
siteData: SiteData,
catData: CatData,
items: [],
filterIsActive: false,
categoryName: null,
categoryNameDefault: 'Live',
categoryArr: {},
brandArr: [],
sortedItems: [],
featuredItems: [],
featuredItemsArchive: [],
loading: true,
brand: 'all',
price: 0,
minPrice: 0,
maxPrice: 0,
maxPriceInit: 0,
minYear: 0,
minYearInit: 0,
maxYear: 0,
maxYearInit: 0,
priceRangeArr: [],
sortBy: 'DateDesc',
sortByArr: {},
sortRangeArr: []
};
/////////////////////////////////////////////////////////////////////////// GET data (homepage)
// Get data from API to show on the homepage
getData = async (getPageName) => {
if (!getPageName) return;
try {
this.setState({ loading: true });
const data = await fetch(CatData[this.state.categoryNameDefault].apiFeatured).then((data) => data.json());
| let itemsArchive = this.formatData(dataArchive);
let itemsOther = this.formatData(dataOther);
// ConsoleLog("[Context] getData > items..." + items);
//////////////
// FEATURED //
//////////////
// Featured items [Live]
let featuredItems = items.slice(0, SiteData.featuredItems.itemCount); // get first # items from main array
// Featured items [Archive]
let featuredItemsArchive = itemsArchive.slice(0, SiteData.featuredItems.itemCount);
let featuredItemsVideos = itemsOther.slice(2, 4);
let featuredItemsTestimonials = itemsOther.slice(0, 2);
ConsoleLog('[Context] featuredItemsVideos: ' + featuredItemsVideos);
///////////////
// SET STATE //
///////////////
this.setState({
featuredItems,
featuredItemsArchive,
featuredItemsVideos,
featuredItemsTestimonials,
loading: false
});
} catch (error) {
console.log('[Context] getData > error: ' + error);
}
};
// (END) getData
/////////////////////////////////////////////////////////////////////////// GET data (items)
// Get data from API to show on the items page(s)
// accept category and status parameter to determine api call
getDataItems = async (getCategoryName, getBrandSlug) => {
ConsoleLog('[Context] getDataItems() > getCategoryName: ' + getCategoryName);
ConsoleLog('[Context] getDataItems() > getBrandSlug: ' + getBrandSlug);
try {
this.setState({ loading: true });
let apiArr = {
base: CatData[getCategoryName].api,
brandName: getBrandSlug !== "all" ? getBrandSlug : null
}
const data = await fetch(apiGetItems(apiArr), {
method: 'GET'
}).then((data) => data.json());
const categoryName = getCategoryName; // ? getCategoryName : null;
const statusId = categoryName === 'Archive' ? 2 : 1;
const brandSlug = getBrandSlug ? getBrandSlug : null; // status determines Live or Archive
let allItems = this.formatData(data);
let items = allItems;
let sortedItems = [];
let brand = null;
let subcategoryArr = {};
if (brandSlug) {
subcategoryArr = allItems.find((x) => x.subcategoryArr.slug === brandSlug).subcategoryArr;
brand = subcategoryArr.id;
sortedItems = allItems.filter((item) => item.subcategoryArr.slug === brandSlug);
} else {
sortedItems = allItems;
}
////////////
// FILTER // properties based on items
////////////
// Price (2 dropdowns)
let minPrice = 0; //Math.min(...items.map(item => item.price));
let maxPrice = Math.max(...items.map((item) => item.price));
const maxPriceInit = Math.round((maxPrice / 100000).toFixed() * 100000);
const priceRangeArr = SiteData.priceRangeArr; // [0, 5000, 10000, ...]
// Year (2 numeric inputs)
let minYear = Math.min(...items.map((item) => item.year));
let maxYear = Math.max(...items.map((item) => item.year));
const minYearInit = minYear;
const maxYearInit = maxYear;
// Brand (dropdown)
const brandArr = this.setBrandArr(items);
const categoryArr = this.getCategoryArr(categoryName, statusId);
// setDocumentTitle(categoryArr.title);
//////////
// SORT // options based on items page type
//////////
// Sort (dropdown)
let sortRangeArr = [];
sortRangeArr.push(SortFilterRangeData.DateDesc);
sortRangeArr.push(SortFilterRangeData.DateAsc);
// CONDITION: Only show price option for Live pages
if (categoryName === 'Live') {
sortRangeArr.push(SortFilterRangeData.PriceDesc);
sortRangeArr.push(SortFilterRangeData.PriceAsc);
}
// CONDITION: Only show Years option for Live/Archive pages
sortRangeArr.push(SortFilterRangeData.YearDesc);
sortRangeArr.push(SortFilterRangeData.YearAsc);
// ConsoleLog("[Context] sortRangeArr..." + sortRangeArr);
const sortByArr = sortRangeArr[0];
const sortBy = sortByArr.name;
///////////////
// SET STATE //
///////////////
this.setState({
items,
categoryName,
categoryArr,
subcategoryArr,
brand,
brandArr,
sortedItems,
loading: false,
price: maxPrice,
minPrice,
maxPrice: maxPriceInit,
maxPriceInit,
minYear,
minYearInit,
maxYear,
maxYearInit,
priceRangeArr,
sortBy,
sortByArr,
sortRangeArr
});
} catch (error) {
console.log('[Context] getDataItems() > error...');
console.log(error);
}
};
// (END) getDataItems
componentDidMount() {
// ConsoleLog(
// "[Context] componentDidMount()... category = " +
// this.state.categoryName
// );
}
componentDidUpdate() {
ConsoleLog(
'[Context] componentDidUpdate() > categoryName: '
+ this.state.categoryName
+ ' | brand = ' + this.state.brand
);
}
/////////////////////////////////////////////////////////////////////////// SET brand array (items)
setBrandArr = (myObj) => {
// console.log('>>>', myObj[0].categoryArr);
const categorySlugBase = myObj[0].categoryArr.slug ? StripOpeningSlash(myObj[0].categoryArr.slug) : null;//get the base slug from first (any) item
const myArr = { list: myObj }; //put obj array into list for flatMap
const myUniqueBrandList = myArr.list
.flatMap((obj) => obj.subcategoryArr)
.filter((e, i, a) => a.findIndex(({ id, brand }) => id === e.id && brand === e.brand) === i);
// SORT alphabetically [A-Z]
myUniqueBrandList.sort(function(a, b) {
var nameA = a.brand.toLowerCase(),
nameB = b.brand.toLowerCase();
if (nameA < nameB)
//sort string ascending
return -1;
if (nameA > nameB) return 1;
return 0; //default return value (no sorting)
});
// COUNT items in subcategory
let myUniqueBrandListWithCount = this.countItemsInBrand(myUniqueBrandList, myObj);
myUniqueBrandListWithCount = [
{ id: 'all', brand: 'ALL', slug: categorySlugBase, itemCount: myObj.length },
...myUniqueBrandListWithCount
];
// console.log('[Context] myUniqueBrandList...', myUniqueBrandListWithCount);
return myUniqueBrandListWithCount;
};
/////////////////////////////////////////////////////////////////////////// COUNT items in each brand
countItemsInBrand(getBrandArr, getItemsArr) {
for (let i = 0; i < getBrandArr.length; i++) {
var tmp = getItemsArr.filter((item) => item.subcategoryArr.id === getBrandArr[i].id).length;
getBrandArr[i].itemCount = tmp;
}
return getBrandArr;
}
/////////////////////////////////////////////////////////////////////////// SORT used by filter
// REF: https://stackoverflow.com/questions/6913512/how-to-sort-an-array-of-objects-by-multiple-fields
fieldSorter = (fields) => {
return function(a, b) {
return fields
.map(function(o) {
var dir = 1;
if (o[0] === '-') {
dir = -1;
o = o.substring(1);
}
if (a[o] > b[o]) return dir;
if (a[o] < b[o]) return -dir;
return 0;
})
.reduce(function firstNonZeroValue(p, n) {
return p ? p : n;
}, 0);
};
};
/////////////////////////////////////////////////////////////////////////// FORMAT item data
formatData(getItemsData) {
let tempItems = getItemsData.map((dataItem) => {
let id = dataItem.id;
let name = parse(dataItem.name).toString();
let slug = this.generateSlugFromName(name);// 2do - get from API (dataItem.slug)
let status = dataItem.status;
let category = dataItem.category;
let categoryArr = this.getCategoryArr(category, dataItem.status);
let subcategoryArr = dataItem.catalogue_subcat;
if(!dataItem.catalogue_subcat.slug) dataItem.catalogue_subcat.slug = this.generateSlugFromName(dataItem.catalogue_subcat.brand);//if subcat slug is not defined
let price = dataItem.price;
let price_details = dataItem.price_details;
let source = dataItem.source !== '0' ? dataItem.source : null;
let brand = dataItem.brand;
let year = dataItem.year;
let date = dataItem.createdAt;
let excerpt = dataItem.excerpt ? getExcerpt(dataItem.excerpt) : '...';
let imageFilename = dataItem.image;
let image = dataItem.id > 801 ? `${process.env.REACT_APP_IMG_DIR}${imageFilename}` : `${process.env.REACT_APP_IMG_DIR_LARGE}${imageFilename}`;//show larger images for old items
let imageDir = dataItem.imageDir;
let imageHi = parseInt(dataItem.imageHi) === 1 ? true : false;
if(imageDir){//show images from new imageDir
image = `${process.env.REACT_APP_IMG_DDIR}${imageDir}/lg/${imageFilename}`;
// year += '.';//2do - remove this when all migration is complete
ConsoleLog(`[Context.js] IMG > ${id} > ${imageDir} + > ${image}`);
}else{
ConsoleLog(`[Context.js] IMG > ${id} >--- F ---> ${image}`);
}
let item = {
id,
status,
category,
categoryArr,
subcategoryArr,
name,
slug,
brand,
price,
price_details,
source,
year,
image,
imageDir,
imageHi,
imageFilename,
excerpt,
date
};
return item;
});
return tempItems;
}
/////////////////////////////////////////////////////////////////////////// FORMAT price
formatPrice = (price, status) => {
if (status === 2) return 'Sold';
if (status === 0) return 'This vehicle is no longer available';
if (price === 0) return '£0';
return price ? '£' + price.toString().replace(/(\d)(?=(\d{3})+(?!\d))/g, '$1,') : null;
};
/////////////////////////////////////////////////////////////////////////// FORMAT description
formatDescription = (str) => {
if (!str) return '';
var strPrep = str.replace(/\\/g, '');
var strParsed = parse(strPrep);
return strParsed;
};
/////////////////////////////////////////////////////////////////////////// FORMAT category link
// get slug from CatData based on categoryName
formatCategoryLink = (getCategoryName, getItemStatus) => {
let itemCategoryName = getCategoryName ? getCategoryName : this.state.categoryNameDefault;
const tmpCategoryArr = this.getCategoryArr(getCategoryName, getItemStatus);
if(tmpCategoryArr.slug) return tmpCategoryArr.slug;
// ConsoleLog(
// "[Context] formatCategoryLink > getCategoryName..." +
// getCategoryName + ", getItemStatus: " + getItemStatus
// );
return CatData[itemCategoryName].slug;
};
/////////////////////////////////////////////////////////////////////////// GET category link tag
// get slug from CatData based on categoryName
getCategoryLinkTag = (getCategoryArr) => {
ConsoleLog('[Context] getCategoryLinkTag...' + getCategoryArr);
return getCategoryArr.slug ? (
<Link to={getCategoryArr.slug} className="link-category">
{getCategoryArr.title}
</Link>
) : null;
};
/////////////////////////////////////////////////////////////////////////// FORMAT brand link
// get slug from CatData based on categoryName
formatBrandLink = ( getArr ) => {
ConsoleLog('[Context] formatBrandLink() | categoryName: ' + getArr.categoryName + ' | status: ' + getArr.status + ' | slug: ' + getArr.slug);
let apprendUrl = '';
if(getArr.categoryName === "Live") apprendUrl = CatData.Live.slugAppendBrand;//'/for-sale';
if(getArr.categoryName === "Archive") apprendUrl = CatData.Archive.slugAppendBrand;//'/sold';
if(getArr.categoryName === "Staff") apprendUrl = CatData.Staff.slugAppendBrand;//'/staff';
let slug = '/' + getArr.slug + apprendUrl;
ConsoleLog('[Context] formatBrandLink() slug: ' + slug + ' | apprendUrl: ' + apprendUrl);
return slug;
};
/////////////////////////////////////////////////////////////////////////// LOAD data (items)
// get all category data from CatData
getCategoryArr = (getCategoryName, getItemStatus) => {
let itemCategoryName = getCategoryName ? getCategoryName : this.state.categoryNameDefault;
if (itemCategoryName === 1) return CatData.General;
if (itemCategoryName === 2 && getItemStatus === 1) return CatData[this.state.categoryNameDefault];
if ((itemCategoryName === 2 || itemCategoryName === 'Archive') && getItemStatus === 2) return CatData.Archive;
if (itemCategoryName === 2 && getItemStatus === 0) return CatData.Archive;
if (itemCategoryName === 3) return CatData.Testimonials;
if (itemCategoryName === 4) return CatData.Press;
if (itemCategoryName === 5) return CatData.News;
if (itemCategoryName === 7) return CatData.PageText;
if (itemCategoryName === 10) return CatData.History;
if (itemCategoryName === 11) return CatData.Restoration;
if (itemCategoryName === 12) return CatData.Staff;
if (itemCategoryName === 13) return CatData.Videos;
// if (itemCategoryName === 2 && getItemStatus === 0) return CatData[this.state.categoryNameDefault];
// ConsoleLog(
// "[Context] getCategoryArr > getCategoryName...",
// getCategoryName + ", getItemStatus: " + getItemStatus
// );
return CatData[itemCategoryName];
};
/////////////////////////////////////////////////////////////////////////// FORMAT item link
// [domain]/item-slug/category-slug/item-id
formatItemLink = (getItem) => {
let { id, name, slug, status, category } = getItem;
// ConsoleLog("[Context] formatItemLink() > item this.state.categoryName: ", this.state.categoryName);
let itemSlug = slug;
if (!slug) itemSlug = slugify(name, { lower: true });
let itemLink = `/${itemSlug}`;
// if (this.state.categoryName && category === CatData[this.state.categoryName].id) {
// ConsoleLog("NO REPEAT CALL", CatData[this.state.categoryName].slug);
// itemLink += this.state.categoryArr.slug;
// } else {
itemLink += `${this.formatCategoryLink(category, status)}`; //this.state.categoryNameDefault
// }
itemLink += `/${id}`;
return itemLink;
};
/////////////////////////////////////////////////////////////////////////// GENERATE slug from name
// sanitize the name so it is URL friendly
// Porsche 911 GT3 3.8 PDK -> porsche-911-gt3-3.8-pdk
// 2do - have this done at the API or CMS (field) to save processing time
generateSlugFromName = (getName) => {
// console.log('[Context] generateSlugFromName: ', getName);
getName = getName ? slugify(getName.toString(), { lower: true }) : 'stock';
return getName;
};
/////////////////////////////////////////////////////////////////////////// STYLE append class
// accept default class (getDefault) ... append with any additional classes (getAppend)
// EXAMPLE: let classControl = styleAppendClass("form-control", "form-control-sm");
styleAppendClass = (getDefault, getAppend) => {
let classArr = [];
classArr.push(getDefault);
if (getAppend) {
classArr.push(getAppend);
}
let allClasses = classArr.join(' ');
return allClasses;
};
/////////////////////////////////////////////////////////////////////////// FILTER items
handleFilterChange = (event) => {
const target = event.target;
const value = target.type === 'checkbox' ? target.checked : target.value;
const name = event.target.name;
// const value = event.target.value;
// ConsoleLog("[Context] handleFilterChange > this is type..." + type);
ConsoleLog('[Context] handleFilterChange > ' + name + ' = ' + value);
// ConsoleLog("[Context] handleFilterChange > this is value..." + value);
this.setState(
{
[name]: value
},
this.filterItems
);
};
/////////////////////////////////////////////////////////////////////////// FILTER
// show/hide filter row
setFilterToggle = () => {
this.setState({ filterIsActive: this.state.filterIsActive ? false : true });
};
// filter items
filterItems = () => {
ConsoleLog('[Context] filterItems...');
let { items, categoryName, brand, minPrice, maxPrice, minYear, maxYear, sortBy, sortRangeArr } = this.state;
// all the items
let sortedItems = [ ...items ];
// filter by brand
if (brand && brand !== 'all') {
// ConsoleLog("[Context] filterItems() > BRAND CHANGED", brand);
sortedItems = sortedItems.filter((item) => item.brand === parseInt(brand));
}
// filter by price and year
sortedItems =
categoryName === 'Live'
? sortedItems
.filter((item) => item.price >= minPrice && item.price <= maxPrice)
.filter((item) => item.year >= minYear && item.year <= maxYear)
: sortedItems;
const sortByArr = sortRangeArr.find((item) => item.name === sortBy);
// ConsoleLog("[Context] filterItems > sortBy: " + sortBy + ", sortByArr: " + sortByArr);
const field = sortByArr.field;
const field2 = sortByArr.field2;
sortedItems.sort(this.fieldSorter([ field, field2 ]));
ConsoleLog('[Context] filterItems > sortedItems...' + sortedItems);
///////////////
// SET STATE //
///////////////
this.setState({
sortedItems
});
};
render() {
return (
<>
<ItemContext.Provider
value={{
...this.state,
getItem: this.getItem,
getData: this.getData,
getDataItems: this.getDataItems,
getCategoryArr: this.getCategoryArr,
formatPrice: this.formatPrice,
formatDescription: this.formatDescription,
formatItemLink: this.formatItemLink,
formatCategoryLink: this.formatCategoryLink,
getCategoryLinkTag: this.getCategoryLinkTag,
setBrandArr: this.setBrandArr,
setFilterToggle: this.setFilterToggle,
fieldSorter: this.fieldSorter,
handleFilterChange: this.handleFilterChange,
styleAppendClass: this.styleAppendClass,
formatBrandLink: this.formatBrandLink,
formatData: this.formatData
}}
>
{this.props.children}
</ItemContext.Provider>
</>
);
}
}
const ItemConsumer = ItemContext.Consumer;
export function withItemConsumer(Component) {
return function ConsumerWrapper(props) {
return <ItemConsumer>{(value) => <Component {...props} context={value} />}</ItemConsumer>;
};
}
export { ItemProvider, ItemConsumer, ItemContext }; | const dataArchive = await fetch(CatData.Archive.apiFeatured).then((dataArchive) => dataArchive.json());
const dataOther = await fetch(CatData.General.apiFeatured).then((dataOther) => dataOther.json());
let items = this.formatData(data);
| random_line_split |
WikitudePlugin.js | /**
* Release date: 29.07.14
*/
var WikitudePlugin = function() {
/**
* This is the SDK Key, provided to you after you purchased the Wikitude SDK from http =//www.wikitude.com/store/.
* If you're having a trial version, leave this string empty.
*/
this._sdkKey = "ENTER-YOUR-KEY-HERE";
/**
* This variable represents if the current device is capable of running ARchitect Worlds.
*/
this._isDeviceSupported = false;
/**
* The Wikitude SDK can run in different modes.
* * Geo means, that objects are placed at latitude/longitude positions.
* * IR means that only image recognition is used in the ARchitect World.
* When your ARchitect World uses both, geo and ir, than set this value to "IrAndGeo". Otherwise, if the ARchitectWorld only needs image recognition, placing "IR" will require less features from the device and therefore, support a wider range of devices. Keep in mind that image recognition requires a dual core cpu to work satisfyingly.
*/
this._augmentedRealityMode = "IrAndGeo"; // "IR" for image recognition worlds only, "Geo" if you want to use Geo AR only
/**
* Callbacks that are used during device compatibilty checks.
*/
this._onDeviceSupportedCallback = null;
this._onDeviceNotSupportedCallback = null;
/**
* Callbacks that are used if an ARchitect World was launched successfully or not.
*/
this._onARchitectWorldLaunchedCallback = null;
this._onARchitectWorldFailedLaunchingCallback = null;
};
/*
* =============================================================================================================================
*
* PUBLIC API
*
* =============================================================================================================================
*/
/* Managing ARchitect world loading */
/**
* Use this function to check if the current device is capable of running ARchitect Worlds.
*
* @param {function} successCallback A callback which is called if the device is capable of running ARchitect Worlds.
* @param {function} errorCallback A callback which is called if the device is not capable of running ARchitect Worlds.
*/
WikitudePlugin.prototype.isDeviceSupported = function(successCallback, errorCallback) {
// Store a reference to the success and error callback function because we intercept the callbacks ourself but need to call the developer ones afterwards
this._onDeviceSupportedCallback = successCallback;
this._onDeviceNotSupportedCallback = errorCallback;
// Check if the current device is capable of running Architect Worlds
cordova.exec(this.deviceIsARchitectReady, this.deviceIsNotARchitectReady, "WikitudePlugin", "isDeviceSupported", [this._augmentedRealityMode]);
};
/**
* Use this function to load an ARchitect World.
*
* @param {String} worldPath The path to an ARchitect world, ether on the device or on e.g. your Dropbox.
*/
WikitudePlugin.prototype.loadARchitectWorld = function(worldPath) {
// before we actually call load, we check again if the device is able to open the world
if (this._isDeviceSupported) {
// the 'open' function of the Wikitude Plugin requires some parameters
// @param {String} SDKKey (required) The Wikitude SDK license key that you received with your purchase
// @param {String} ARchitectWorldPath (required) The path to a local ARchitect world or to a ARchitect world on a server or your dropbox
// @param {String} AugmentedRealityMode (optional) describes in more detail how the Wikitude SDK should be instantiated
cordova.exec(this.worldLaunched, this.worldFailedLaunching, "WikitudePlugin", "open", [{
"SDKKey": this._sdkKey,
"ARchitectWorldPath": worldPath,
"AugmentedRealityMode": this._augmentedRealityMode
}]);
// We add an event listener on the resume and pause event of the application lifecycle
document.addEventListener("resume", this.onResume, false);
document.addEventListener("pause", this.onPause, false);
} else {
// If the device is not supported, we call the device not supported callback again.
if (this._onDeviceNotSupportedCallback) {
this._onDeviceNotSupportedCallback();
}
}
};
/* Managing the Wikitude SDK Lifecycle */
/**
* Use this function to stop the Wikitude SDK and to remove it from the screen.
*/
WikitudePlugin.prototype.close = function() {
document.removeEventListener("pause", this.onPause, false);
document.removeEventListener("resume", this.onResume, false);
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "close", [""]);
};
/**
* Use this function to only hide the Wikitude SDK. All location and rendering updates are still active.
*/
WikitudePlugin.prototype.hide = function() {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "hide", [""]);
};
/**
* Use this function to show the Wikitude SDK again if it was hidden before.
*/
WikitudePlugin.prototype.show = function() {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "show", [""]);
};
/* Interacting with the Wikitude SDK */
/**
* Use this function to call javascript which will be executed in the context of the currently loaded ARchitect World.
*
* @param js The JavaScript that should be evaluated in the ARchitect View.
*/
WikitudePlugin.prototype.callJavaScript = function(js) {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "callJavascript", [js]);
};
/**
* Use this function to set a callback which will be invoked when the ARchitect World opens an architectsdk =// url.
* document.location = "architectsdk =//opendetailpage?id=9";
*
* @param onUrlInvokeCallback A function which will be called when the ARchitect World invokes a call to "document.location = architectsdk =//"
*/
WikitudePlugin.prototype.setOnUrlInvokeCallback = function(onUrlInvokeCallback) {
cordova.exec(onUrlInvokeCallback, this.onWikitudeError, "WikitudePlugin", "onUrlInvoke", [""]);
};
/**
* Use this function to inject a location into the Wikituce SDK.
*
* @param latitude The latitude which should be simulated
* @param longitude The longitude which should be simulated
* @param altitude The altitude which should be simulated
* @param accuracy The simulated location accuracy
*/
WikitudePlugin.prototype.setLocation = function(latitude, longitude, altitude, accuracy) {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "setLocation", [latitude, longitude, altitude, accuracy]);
};
/**
* Use this function to generate a screenshot from the current Wikitude SDK view.
*
* @param includeWebView Indicates if the ARchitect web view should be included in the generated screenshot or not.
* @param imagePathInBundleorNullForPhotoLibrary If a file path or file name is given, the generated screenshot will be saved in the application bundle. Passing null will save the photo in the device photo library.
*/
WikitudePlugin.prototype.captureScreen = function(includeWebView, imagePathInBundleOrNullForPhotoLibrary, successCallback, errorCallback) {
cordova.exec(successCallback, errorCallback, "WikitudePlugin", "captureScreen", [includeWebView, imagePathInBundleOrNullForPhotoLibrary]);
};
/*
* =============================================================================================================================
*
* Callbacks of public functions
*
* =============================================================================================================================
*/
/**
* This function gets called if the Wikitude Plugin reports that the device is able to start the Wikitude SDK
*/
WikitudePlugin.prototype.deviceIsARchitectReady = function() {
// Keep track of the device status
module.exports._isDeviceSupported = true;
// if the developer passed in a device supported callback, call it
if (module.exports._onDeviceSupportedCallback) {
module.exports._onDeviceSupportedCallback();
}
};
/**
* This function gets called if the Wikitude Plugin reports that the device is not able of starting the Wikitude SDK.
*/
WikitudePlugin.prototype.deviceIsNotARchitectReady = function() {
// Keep track of the device status
module.exports._isDeviceSupported = false;
// if the developer passed in a device not supported callback, call it
if (module.exports._onDeviceNotSupportedCallback) {
module.exports._onDeviceNotSupportedCallback();
}
};
/**
* Use this callback to get notified when the ARchitect World was loaded successfully.
*/
WikitudePlugin.prototype.worldLaunched = function(url) {
if (module.exports._onARchitectWorldLaunchedCallback) {
module.exports._onARchitectWorldLaunchedCallback(url);
}
};
/**
* Use this callback to get notified when the ARchitect World could not be loaded.
*/
WikitudePlugin.prototype.worldFailedLaunching = function(err) {
if (module.exports._onARchitectWorldFailedLaunchingCallback) {
module.exports._onARchitectWorldFailedLaunchingCallback(err);
}
};
/* Lifecycle updates */
/**
* This function gets called every time the application did become active.
*/
WikitudePlugin.prototype.onResume = function() {
// Call the Wikitude SDK that it should resume.
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "onResume", [""]);
};
/**
* This function gets called every time the application is about to become inactive.
*/
WikitudePlugin.prototype.onPause = function() {
// Call the Wikitude SDK that the application did become inactive
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "onPause", [""]);
};
/**
* A generic success callback used inside this wrapper.
*/
WikitudePlugin.prototype.onWikitudeOK = function() {};
/**
* A generic error callback used inside this wrapper.
*/
WikitudePlugin.prototype.onWikitudeError = function() {};
/* Export a new WikitudePlugin instance */
var wikitudePlugin = new WikitudePlugin();
| module.exports = wikitudePlugin; | random_line_split | |
WikitudePlugin.js |
/**
* Release date: 29.07.14
*/
var WikitudePlugin = function() {
/**
* This is the SDK Key, provided to you after you purchased the Wikitude SDK from http =//www.wikitude.com/store/.
* If you're having a trial version, leave this string empty.
*/
this._sdkKey = "ENTER-YOUR-KEY-HERE";
/**
* This variable represents if the current device is capable of running ARchitect Worlds.
*/
this._isDeviceSupported = false;
/**
* The Wikitude SDK can run in different modes.
* * Geo means, that objects are placed at latitude/longitude positions.
* * IR means that only image recognition is used in the ARchitect World.
* When your ARchitect World uses both, geo and ir, than set this value to "IrAndGeo". Otherwise, if the ARchitectWorld only needs image recognition, placing "IR" will require less features from the device and therefore, support a wider range of devices. Keep in mind that image recognition requires a dual core cpu to work satisfyingly.
*/
this._augmentedRealityMode = "IrAndGeo"; // "IR" for image recognition worlds only, "Geo" if you want to use Geo AR only
/**
* Callbacks that are used during device compatibilty checks.
*/
this._onDeviceSupportedCallback = null;
this._onDeviceNotSupportedCallback = null;
/**
* Callbacks that are used if an ARchitect World was launched successfully or not.
*/
this._onARchitectWorldLaunchedCallback = null;
this._onARchitectWorldFailedLaunchingCallback = null;
};
/*
* =============================================================================================================================
*
* PUBLIC API
*
* =============================================================================================================================
*/
/* Managing ARchitect world loading */
/**
* Use this function to check if the current device is capable of running ARchitect Worlds.
*
* @param {function} successCallback A callback which is called if the device is capable of running ARchitect Worlds.
* @param {function} errorCallback A callback which is called if the device is not capable of running ARchitect Worlds.
*/
WikitudePlugin.prototype.isDeviceSupported = function(successCallback, errorCallback) {
// Store a reference to the success and error callback function because we intercept the callbacks ourself but need to call the developer ones afterwards
this._onDeviceSupportedCallback = successCallback;
this._onDeviceNotSupportedCallback = errorCallback;
// Check if the current device is capable of running Architect Worlds
cordova.exec(this.deviceIsARchitectReady, this.deviceIsNotARchitectReady, "WikitudePlugin", "isDeviceSupported", [this._augmentedRealityMode]);
};
/**
* Use this function to load an ARchitect World.
*
* @param {String} worldPath The path to an ARchitect world, ether on the device or on e.g. your Dropbox.
*/
WikitudePlugin.prototype.loadARchitectWorld = function(worldPath) {
// before we actually call load, we check again if the device is able to open the world
if (this._isDeviceSupported) {
// the 'open' function of the Wikitude Plugin requires some parameters
// @param {String} SDKKey (required) The Wikitude SDK license key that you received with your purchase
// @param {String} ARchitectWorldPath (required) The path to a local ARchitect world or to a ARchitect world on a server or your dropbox
// @param {String} AugmentedRealityMode (optional) describes in more detail how the Wikitude SDK should be instantiated
cordova.exec(this.worldLaunched, this.worldFailedLaunching, "WikitudePlugin", "open", [{
"SDKKey": this._sdkKey,
"ARchitectWorldPath": worldPath,
"AugmentedRealityMode": this._augmentedRealityMode
}]);
// We add an event listener on the resume and pause event of the application lifecycle
document.addEventListener("resume", this.onResume, false);
document.addEventListener("pause", this.onPause, false);
} else {
// If the device is not supported, we call the device not supported callback again.
if (this._onDeviceNotSupportedCallback) {
this._onDeviceNotSupportedCallback();
}
}
};
/* Managing the Wikitude SDK Lifecycle */
/**
* Use this function to stop the Wikitude SDK and to remove it from the screen.
*/
WikitudePlugin.prototype.close = function() {
document.removeEventListener("pause", this.onPause, false);
document.removeEventListener("resume", this.onResume, false);
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "close", [""]);
};
/**
* Use this function to only hide the Wikitude SDK. All location and rendering updates are still active.
*/
WikitudePlugin.prototype.hide = function() {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "hide", [""]);
};
/**
* Use this function to show the Wikitude SDK again if it was hidden before.
*/
WikitudePlugin.prototype.show = function() {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "show", [""]);
};
/* Interacting with the Wikitude SDK */
/**
* Use this function to call javascript which will be executed in the context of the currently loaded ARchitect World.
*
* @param js The JavaScript that should be evaluated in the ARchitect View.
*/
WikitudePlugin.prototype.callJavaScript = function(js) {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "callJavascript", [js]);
};
/**
* Use this function to set a callback which will be invoked when the ARchitect World opens an architectsdk =// url.
* document.location = "architectsdk =//opendetailpage?id=9";
*
* @param onUrlInvokeCallback A function which will be called when the ARchitect World invokes a call to "document.location = architectsdk =//"
*/
WikitudePlugin.prototype.setOnUrlInvokeCallback = function(onUrlInvokeCallback) {
cordova.exec(onUrlInvokeCallback, this.onWikitudeError, "WikitudePlugin", "onUrlInvoke", [""]);
};
/**
* Use this function to inject a location into the Wikituce SDK.
*
* @param latitude The latitude which should be simulated
* @param longitude The longitude which should be simulated
* @param altitude The altitude which should be simulated
* @param accuracy The simulated location accuracy
*/
WikitudePlugin.prototype.setLocation = function(latitude, longitude, altitude, accuracy) {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "setLocation", [latitude, longitude, altitude, accuracy]);
};
/**
* Use this function to generate a screenshot from the current Wikitude SDK view.
*
* @param includeWebView Indicates if the ARchitect web view should be included in the generated screenshot or not.
* @param imagePathInBundleorNullForPhotoLibrary If a file path or file name is given, the generated screenshot will be saved in the application bundle. Passing null will save the photo in the device photo library.
*/
WikitudePlugin.prototype.captureScreen = function(includeWebView, imagePathInBundleOrNullForPhotoLibrary, successCallback, errorCallback) {
cordova.exec(successCallback, errorCallback, "WikitudePlugin", "captureScreen", [includeWebView, imagePathInBundleOrNullForPhotoLibrary]);
};
/*
* =============================================================================================================================
*
* Callbacks of public functions
*
* =============================================================================================================================
*/
/**
* This function gets called if the Wikitude Plugin reports that the device is able to start the Wikitude SDK
*/
WikitudePlugin.prototype.deviceIsARchitectReady = function() {
// Keep track of the device status
module.exports._isDeviceSupported = true;
// if the developer passed in a device supported callback, call it
if (module.exports._onDeviceSupportedCallback) |
};
/**
* This function gets called if the Wikitude Plugin reports that the device is not able of starting the Wikitude SDK.
*/
WikitudePlugin.prototype.deviceIsNotARchitectReady = function() {
// Keep track of the device status
module.exports._isDeviceSupported = false;
// if the developer passed in a device not supported callback, call it
if (module.exports._onDeviceNotSupportedCallback) {
module.exports._onDeviceNotSupportedCallback();
}
};
/**
* Use this callback to get notified when the ARchitect World was loaded successfully.
*/
WikitudePlugin.prototype.worldLaunched = function(url) {
if (module.exports._onARchitectWorldLaunchedCallback) {
module.exports._onARchitectWorldLaunchedCallback(url);
}
};
/**
* Use this callback to get notified when the ARchitect World could not be loaded.
*/
WikitudePlugin.prototype.worldFailedLaunching = function(err) {
if (module.exports._onARchitectWorldFailedLaunchingCallback) {
module.exports._onARchitectWorldFailedLaunchingCallback(err);
}
};
/* Lifecycle updates */
/**
* This function gets called every time the application did become active.
*/
WikitudePlugin.prototype.onResume = function() {
// Call the Wikitude SDK that it should resume.
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "onResume", [""]);
};
/**
* This function gets called every time the application is about to become inactive.
*/
WikitudePlugin.prototype.onPause = function() {
// Call the Wikitude SDK that the application did become inactive
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "onPause", [""]);
};
/**
* A generic success callback used inside this wrapper.
*/
WikitudePlugin.prototype.onWikitudeOK = function() {};
/**
* A generic error callback used inside this wrapper.
*/
WikitudePlugin.prototype.onWikitudeError = function() {};
/* Export a new WikitudePlugin instance */
var wikitudePlugin = new WikitudePlugin();
module.exports = wikitudePlugin;
| {
module.exports._onDeviceSupportedCallback();
} | conditional_block |
actions_linux.go | // Copyright (c) 2019, Sylabs Inc. All rights reserved.
// This software is licensed under a 3-clause BSD license. Please consult the
// LICENSE.md file distributed with the sources of this project regarding your
// rights to use or distribute this software.
package cli
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/opencontainers/runtime-tools/generate"
"github.com/sylabs/singularity/internal/pkg/plugin"
"github.com/sylabs/singularity/pkg/image"
"github.com/sylabs/singularity/pkg/image/unpacker"
"github.com/sylabs/singularity/pkg/util/nvidia"
"github.com/spf13/cobra"
"github.com/sylabs/singularity/internal/pkg/buildcfg"
"github.com/sylabs/singularity/internal/pkg/instance"
"github.com/sylabs/singularity/internal/pkg/runtime/engines/config"
"github.com/sylabs/singularity/internal/pkg/runtime/engines/config/oci"
"github.com/sylabs/singularity/internal/pkg/security"
"github.com/sylabs/singularity/internal/pkg/sylog"
"github.com/sylabs/singularity/internal/pkg/util/env"
"github.com/sylabs/singularity/internal/pkg/util/exec"
"github.com/sylabs/singularity/internal/pkg/util/fs"
"github.com/sylabs/singularity/internal/pkg/util/user"
singularityConfig "github.com/sylabs/singularity/pkg/runtime/engines/singularity/config"
)
// EnsureRootPriv ensures that a command is executed with root privileges.
// To customize the output, arguments can be used to specify the context (e.g., "oci", "plugin"),
// where the first argument (string) will be displayed before the command itself.
func EnsureRootPriv(cmd *cobra.Command, args []string) |
func convertImage(filename string, unsquashfsPath string) (string, error) {
img, err := image.Init(filename, false)
if err != nil {
return "", fmt.Errorf("could not open image %s: %s", filename, err)
}
defer img.File.Close()
if !img.HasRootFs() {
return "", fmt.Errorf("no root filesystem found in %s", filename)
}
// squashfs only
if img.Partitions[0].Type != image.SQUASHFS {
return "", fmt.Errorf("not a squashfs root filesystem")
}
// create a reader for rootfs partition
reader, err := image.NewPartitionReader(img, "", 0)
if err != nil {
return "", fmt.Errorf("could not extract root filesystem: %s", err)
}
s := unpacker.NewSquashfs()
if !s.HasUnsquashfs() && unsquashfsPath != "" {
s.UnsquashfsPath = unsquashfsPath
}
// keep compatibility with v2
tmpdir := os.Getenv("SINGULARITY_LOCALCACHEDIR")
if tmpdir == "" {
tmpdir = os.Getenv("SINGULARITY_CACHEDIR")
}
// create temporary sandbox
dir, err := ioutil.TempDir(tmpdir, "rootfs-")
if err != nil {
return "", fmt.Errorf("could not create temporary sandbox: %s", err)
}
// extract root filesystem
if err := s.ExtractAll(reader, dir); err != nil {
os.RemoveAll(dir)
return "", fmt.Errorf("root filesystem extraction failed: %s", err)
}
return dir, err
}
// TODO: Let's stick this in another file so that that CLI is just CLI
func execStarter(cobraCmd *cobra.Command, image string, args []string, name string) {
targetUID := 0
targetGID := make([]int, 0)
procname := ""
uid := uint32(os.Getuid())
gid := uint32(os.Getgid())
// Are we running from a privileged account?
isPrivileged := uid == 0
checkPrivileges := func(cond bool, desc string, fn func()) {
if !cond {
return
}
if !isPrivileged {
sylog.Fatalf("%s requires root privileges", desc)
}
fn()
}
syscall.Umask(0022)
starter := buildcfg.LIBEXECDIR + "/singularity/bin/starter-suid"
engineConfig := singularityConfig.NewConfig()
configurationFile := buildcfg.SYSCONFDIR + "/singularity/singularity.conf"
if err := config.Parser(configurationFile, engineConfig.File); err != nil {
sylog.Fatalf("Unable to parse singularity.conf file: %s", err)
}
ociConfig := &oci.Config{}
generator := generate.Generator{Config: &ociConfig.Spec}
engineConfig.OciConfig = ociConfig
generator.SetProcessArgs(args)
uidParam := security.GetParam(Security, "uid")
gidParam := security.GetParam(Security, "gid")
// handle target UID/GID for root user
checkPrivileges(uidParam != "", "uid security feature", func() {
u, err := strconv.ParseUint(uidParam, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided UID")
}
targetUID = int(u)
uid = uint32(targetUID)
engineConfig.SetTargetUID(targetUID)
})
checkPrivileges(gidParam != "", "gid security feature", func() {
gids := strings.Split(gidParam, ":")
for _, id := range gids {
g, err := strconv.ParseUint(id, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided GID")
}
targetGID = append(targetGID, int(g))
}
if len(gids) > 0 {
gid = uint32(targetGID[0])
}
engineConfig.SetTargetGID(targetGID)
})
if strings.HasPrefix(image, "instance://") {
if name != "" {
sylog.Fatalf("Starting an instance from another is not allowed")
}
instanceName := instance.ExtractName(image)
file, err := instance.Get(instanceName, instance.SingSubDir)
if err != nil {
sylog.Fatalf("%s", err)
}
if !file.Privileged {
UserNamespace = true
}
generator.AddProcessEnv("SINGULARITY_CONTAINER", file.Image)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(file.Image))
engineConfig.SetImage(image)
engineConfig.SetInstanceJoin(true)
} else {
abspath, err := filepath.Abs(image)
generator.AddProcessEnv("SINGULARITY_CONTAINER", abspath)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(abspath))
if err != nil {
sylog.Fatalf("Failed to determine image absolute path for %s: %s", image, err)
}
engineConfig.SetImage(abspath)
}
if !NoNvidia && (Nvidia || engineConfig.File.AlwaysUseNv) {
userPath := os.Getenv("USER_PATH")
if engineConfig.File.AlwaysUseNv {
sylog.Verbosef("'always use nv = yes' found in singularity.conf")
sylog.Verbosef("binding nvidia files into container")
}
libs, bins, err := nvidia.Paths(buildcfg.SINGULARITY_CONFDIR, userPath)
if err != nil {
sylog.Warningf("Unable to capture NVIDIA bind points: %v", err)
} else {
if len(bins) == 0 {
sylog.Infof("Could not find any NVIDIA binaries on this host!")
} else {
if IsWritable {
sylog.Warningf("NVIDIA binaries may not be bound with --writable")
}
for _, binary := range bins {
usrBinBinary := filepath.Join("/usr/bin", filepath.Base(binary))
bind := strings.Join([]string{binary, usrBinBinary}, ":")
BindPaths = append(BindPaths, bind)
}
}
if len(libs) == 0 {
sylog.Warningf("Could not find any NVIDIA libraries on this host!")
sylog.Warningf("You may need to edit %v/nvliblist.conf", buildcfg.SINGULARITY_CONFDIR)
} else {
ContainLibsPath = append(ContainLibsPath, libs...)
}
}
}
engineConfig.SetBindPath(BindPaths)
engineConfig.SetNetwork(Network)
engineConfig.SetDNS(DNS)
engineConfig.SetNetworkArgs(NetworkArgs)
engineConfig.SetOverlayImage(OverlayPath)
engineConfig.SetWritableImage(IsWritable)
engineConfig.SetNoHome(NoHome)
engineConfig.SetNv(Nvidia)
engineConfig.SetAddCaps(AddCaps)
engineConfig.SetDropCaps(DropCaps)
checkPrivileges(AllowSUID, "--allow-setuid", func() {
engineConfig.SetAllowSUID(AllowSUID)
})
checkPrivileges(KeepPrivs, "--keep-privs", func() {
engineConfig.SetKeepPrivs(KeepPrivs)
})
engineConfig.SetNoPrivs(NoPrivs)
engineConfig.SetSecurity(Security)
engineConfig.SetShell(ShellPath)
engineConfig.SetLibrariesPath(ContainLibsPath)
if ShellPath != "" {
generator.AddProcessEnv("SINGULARITY_SHELL", ShellPath)
}
checkPrivileges(CgroupsPath != "", "--apply-cgroups", func() {
engineConfig.SetCgroupsPath(CgroupsPath)
})
if IsWritable && IsWritableTmpfs {
sylog.Warningf("Disabling --writable-tmpfs flag, mutually exclusive with --writable")
engineConfig.SetWritableTmpfs(false)
} else {
engineConfig.SetWritableTmpfs(IsWritableTmpfs)
}
homeFlag := cobraCmd.Flag("home")
engineConfig.SetCustomHome(homeFlag.Changed)
// set home directory for the targeted UID if it exists on host system
if !homeFlag.Changed && targetUID != 0 {
if targetUID > 500 {
if pwd, err := user.GetPwUID(uint32(targetUID)); err == nil {
sylog.Debugf("Target UID requested, set home directory to %s", pwd.Dir)
HomePath = pwd.Dir
engineConfig.SetCustomHome(true)
} else {
sylog.Verbosef("Home directory for UID %d not found, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
} else {
sylog.Verbosef("System UID %d requested, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
}
if Hostname != "" {
UtsNamespace = true
engineConfig.SetHostname(Hostname)
}
checkPrivileges(IsBoot, "--boot", func() {})
if IsContained || IsContainAll || IsBoot {
engineConfig.SetContain(true)
if IsContainAll {
PidNamespace = true
IpcNamespace = true
IsCleanEnv = true
}
}
engineConfig.SetScratchDir(ScratchPath)
engineConfig.SetWorkdir(WorkdirPath)
homeSlice := strings.Split(HomePath, ":")
if len(homeSlice) > 2 || len(homeSlice) == 0 {
sylog.Fatalf("home argument has incorrect number of elements: %v", len(homeSlice))
}
engineConfig.SetHomeSource(homeSlice[0])
if len(homeSlice) == 1 {
engineConfig.SetHomeDest(homeSlice[0])
} else {
engineConfig.SetHomeDest(homeSlice[1])
}
if !engineConfig.File.AllowSetuid || IsFakeroot {
UserNamespace = true
}
/* if name submitted, run as instance */
if name != "" {
PidNamespace = true
IpcNamespace = true
engineConfig.SetInstance(true)
engineConfig.SetBootInstance(IsBoot)
_, err := instance.Get(name, instance.SingSubDir)
if err == nil {
sylog.Fatalf("instance %s already exists", name)
}
if IsBoot {
UtsNamespace = true
NetNamespace = true
if Hostname == "" {
engineConfig.SetHostname(name)
}
if !KeepPrivs {
engineConfig.SetDropCaps("CAP_SYS_BOOT,CAP_SYS_RAWIO")
}
generator.SetProcessArgs([]string{"/sbin/init"})
}
pwd, err := user.GetPwUID(uint32(os.Getuid()))
if err != nil {
sylog.Fatalf("failed to retrieve user information for UID %d: %s", os.Getuid(), err)
}
procname, err = instance.ProcName(name, pwd.Name)
if err != nil {
sylog.Fatalf("%s", err)
}
} else {
generator.SetProcessArgs(args)
procname = "Singularity runtime parent"
}
if NetNamespace {
generator.AddOrReplaceLinuxNamespace("network", "")
}
if UtsNamespace {
generator.AddOrReplaceLinuxNamespace("uts", "")
}
if PidNamespace {
generator.AddOrReplaceLinuxNamespace("pid", "")
engineConfig.SetNoInit(NoInit)
}
if IpcNamespace {
generator.AddOrReplaceLinuxNamespace("ipc", "")
}
if !UserNamespace {
if _, err := os.Stat(starter); os.IsNotExist(err) {
sylog.Verbosef("starter-suid not found, using user namespace")
UserNamespace = true
}
}
if UserNamespace {
generator.AddOrReplaceLinuxNamespace("user", "")
starter = buildcfg.LIBEXECDIR + "/singularity/bin/starter"
if IsFakeroot {
generator.AddLinuxUIDMapping(uid, 0, 1)
generator.AddLinuxGIDMapping(gid, 0, 1)
} else {
generator.AddLinuxUIDMapping(uid, uid, 1)
generator.AddLinuxGIDMapping(gid, gid, 1)
}
}
// Copy and cache environment
environment := os.Environ()
// Clean environment
env.SetContainerEnv(&generator, environment, IsCleanEnv, engineConfig.GetHomeDest())
// force to use getwd syscall
os.Unsetenv("PWD")
if pwd, err := os.Getwd(); err == nil {
if PwdPath != "" {
generator.SetProcessCwd(PwdPath)
} else {
if engineConfig.GetContain() {
generator.SetProcessCwd(engineConfig.GetHomeDest())
} else {
generator.SetProcessCwd(pwd)
}
}
} else {
sylog.Warningf("can't determine current working directory: %s", err)
}
Env := []string{sylog.GetEnvVar()}
generator.AddProcessEnv("SINGULARITY_APPNAME", AppName)
// convert image file to sandbox if image contains
// a squashfs filesystem
if UserNamespace && fs.IsFile(image) {
unsquashfsPath := ""
if engineConfig.File.MksquashfsPath != "" {
d := filepath.Dir(engineConfig.File.MksquashfsPath)
unsquashfsPath = filepath.Join(d, "unsquashfs")
}
sylog.Verbosef("User namespace requested, convert image %s to sandbox", image)
sylog.Infof("Convert SIF file to sandbox...")
dir, err := convertImage(image, unsquashfsPath)
if err != nil {
sylog.Fatalf("while extracting %s: %s", image, err)
}
engineConfig.SetImage(dir)
engineConfig.SetDeleteImage(true)
generator.AddProcessEnv("SINGULARITY_CONTAINER", dir)
}
plugin.FlagHookCallbacks(engineConfig)
cfg := &config.Common{
EngineName: singularityConfig.Name,
ContainerID: name,
EngineConfig: engineConfig,
}
configData, err := json.Marshal(cfg)
if err != nil {
sylog.Fatalf("CLI Failed to marshal CommonEngineConfig: %s\n", err)
}
if engineConfig.GetInstance() {
stdout, stderr, err := instance.SetLogFile(name, int(uid), instance.SingSubDir)
if err != nil {
sylog.Fatalf("failed to create instance log files: %s", err)
}
start, err := stderr.Seek(0, io.SeekEnd)
if err != nil {
sylog.Warningf("failed to get standard error stream offset: %s", err)
}
cmd, err := exec.PipeCommand(starter, []string{procname}, Env, configData)
if err != nil {
sylog.Warningf("failed to prepare command: %s", err)
}
cmd.Stdout = stdout
cmd.Stderr = stderr
cmdErr := cmd.Run()
if sylog.GetLevel() != 0 {
// starter can exit a bit before all errors has been reported
// by instance process, wait a bit to catch all errors
time.Sleep(100 * time.Millisecond)
end, err := stderr.Seek(0, io.SeekEnd)
if err != nil {
sylog.Warningf("failed to get standard error stream offset: %s", err)
}
if end-start > 0 {
output := make([]byte, end-start)
stderr.ReadAt(output, start)
fmt.Println(string(output))
}
}
if cmdErr != nil {
sylog.Fatalf("failed to start instance: %s", cmdErr)
} else {
sylog.Verbosef("you will find instance output here: %s", stdout.Name())
sylog.Verbosef("you will find instance error here: %s", stderr.Name())
sylog.Infof("instance started successfully")
}
} else {
if err := exec.Pipe(starter, []string{procname}, Env, configData); err != nil {
sylog.Fatalf("%s", err)
}
}
}
| {
if os.Geteuid() != 0 {
if len(args) >= 1 && len(args[0]) > 0 {
// The first argument is the context
sylog.Fatalf("command '%s %s' requires root privileges", args[0], cmd.Name())
} else {
sylog.Fatalf("command %s requires root privileges", cmd.Name())
}
}
} | identifier_body |
actions_linux.go | // Copyright (c) 2019, Sylabs Inc. All rights reserved.
// This software is licensed under a 3-clause BSD license. Please consult the
// LICENSE.md file distributed with the sources of this project regarding your
// rights to use or distribute this software.
package cli
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/opencontainers/runtime-tools/generate"
"github.com/sylabs/singularity/internal/pkg/plugin"
"github.com/sylabs/singularity/pkg/image"
"github.com/sylabs/singularity/pkg/image/unpacker"
"github.com/sylabs/singularity/pkg/util/nvidia"
"github.com/spf13/cobra"
"github.com/sylabs/singularity/internal/pkg/buildcfg"
"github.com/sylabs/singularity/internal/pkg/instance"
"github.com/sylabs/singularity/internal/pkg/runtime/engines/config"
"github.com/sylabs/singularity/internal/pkg/runtime/engines/config/oci"
"github.com/sylabs/singularity/internal/pkg/security"
"github.com/sylabs/singularity/internal/pkg/sylog"
"github.com/sylabs/singularity/internal/pkg/util/env"
"github.com/sylabs/singularity/internal/pkg/util/exec"
"github.com/sylabs/singularity/internal/pkg/util/fs"
"github.com/sylabs/singularity/internal/pkg/util/user"
singularityConfig "github.com/sylabs/singularity/pkg/runtime/engines/singularity/config"
)
// EnsureRootPriv ensures that a command is executed with root privileges.
// To customize the output, arguments can be used to specify the context (e.g., "oci", "plugin"),
// where the first argument (string) will be displayed before the command itself.
func EnsureRootPriv(cmd *cobra.Command, args []string) {
if os.Geteuid() != 0 {
if len(args) >= 1 && len(args[0]) > 0 {
// The first argument is the context
sylog.Fatalf("command '%s %s' requires root privileges", args[0], cmd.Name())
} else {
sylog.Fatalf("command %s requires root privileges", cmd.Name())
}
}
}
func convertImage(filename string, unsquashfsPath string) (string, error) {
img, err := image.Init(filename, false)
if err != nil {
return "", fmt.Errorf("could not open image %s: %s", filename, err)
}
defer img.File.Close()
if !img.HasRootFs() {
return "", fmt.Errorf("no root filesystem found in %s", filename)
}
// squashfs only
if img.Partitions[0].Type != image.SQUASHFS {
return "", fmt.Errorf("not a squashfs root filesystem")
}
// create a reader for rootfs partition
reader, err := image.NewPartitionReader(img, "", 0)
if err != nil {
return "", fmt.Errorf("could not extract root filesystem: %s", err)
}
s := unpacker.NewSquashfs()
if !s.HasUnsquashfs() && unsquashfsPath != "" {
s.UnsquashfsPath = unsquashfsPath
}
// keep compatibility with v2
tmpdir := os.Getenv("SINGULARITY_LOCALCACHEDIR")
if tmpdir == "" {
tmpdir = os.Getenv("SINGULARITY_CACHEDIR")
}
// create temporary sandbox
dir, err := ioutil.TempDir(tmpdir, "rootfs-")
if err != nil {
return "", fmt.Errorf("could not create temporary sandbox: %s", err)
}
// extract root filesystem
if err := s.ExtractAll(reader, dir); err != nil {
os.RemoveAll(dir)
return "", fmt.Errorf("root filesystem extraction failed: %s", err)
}
return dir, err
}
// TODO: Let's stick this in another file so that that CLI is just CLI
func execStarter(cobraCmd *cobra.Command, image string, args []string, name string) {
targetUID := 0
targetGID := make([]int, 0)
procname := ""
uid := uint32(os.Getuid())
gid := uint32(os.Getgid())
// Are we running from a privileged account?
isPrivileged := uid == 0
checkPrivileges := func(cond bool, desc string, fn func()) {
if !cond {
return
}
if !isPrivileged {
sylog.Fatalf("%s requires root privileges", desc)
}
fn()
}
syscall.Umask(0022)
starter := buildcfg.LIBEXECDIR + "/singularity/bin/starter-suid"
engineConfig := singularityConfig.NewConfig()
configurationFile := buildcfg.SYSCONFDIR + "/singularity/singularity.conf"
if err := config.Parser(configurationFile, engineConfig.File); err != nil {
sylog.Fatalf("Unable to parse singularity.conf file: %s", err)
}
ociConfig := &oci.Config{}
generator := generate.Generator{Config: &ociConfig.Spec}
engineConfig.OciConfig = ociConfig
generator.SetProcessArgs(args)
uidParam := security.GetParam(Security, "uid")
gidParam := security.GetParam(Security, "gid")
// handle target UID/GID for root user
checkPrivileges(uidParam != "", "uid security feature", func() {
u, err := strconv.ParseUint(uidParam, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided UID")
}
targetUID = int(u)
uid = uint32(targetUID)
engineConfig.SetTargetUID(targetUID)
})
checkPrivileges(gidParam != "", "gid security feature", func() {
gids := strings.Split(gidParam, ":")
for _, id := range gids {
g, err := strconv.ParseUint(id, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided GID")
}
targetGID = append(targetGID, int(g))
}
if len(gids) > 0 {
gid = uint32(targetGID[0])
}
engineConfig.SetTargetGID(targetGID)
})
if strings.HasPrefix(image, "instance://") {
if name != "" {
sylog.Fatalf("Starting an instance from another is not allowed")
}
instanceName := instance.ExtractName(image)
file, err := instance.Get(instanceName, instance.SingSubDir)
if err != nil {
sylog.Fatalf("%s", err)
}
if !file.Privileged {
UserNamespace = true
}
generator.AddProcessEnv("SINGULARITY_CONTAINER", file.Image)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(file.Image))
engineConfig.SetImage(image)
engineConfig.SetInstanceJoin(true)
} else {
abspath, err := filepath.Abs(image)
generator.AddProcessEnv("SINGULARITY_CONTAINER", abspath)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(abspath))
if err != nil {
sylog.Fatalf("Failed to determine image absolute path for %s: %s", image, err)
}
engineConfig.SetImage(abspath)
}
if !NoNvidia && (Nvidia || engineConfig.File.AlwaysUseNv) {
userPath := os.Getenv("USER_PATH")
if engineConfig.File.AlwaysUseNv {
sylog.Verbosef("'always use nv = yes' found in singularity.conf")
sylog.Verbosef("binding nvidia files into container")
}
libs, bins, err := nvidia.Paths(buildcfg.SINGULARITY_CONFDIR, userPath)
if err != nil {
sylog.Warningf("Unable to capture NVIDIA bind points: %v", err)
} else {
if len(bins) == 0 {
sylog.Infof("Could not find any NVIDIA binaries on this host!")
} else {
if IsWritable {
sylog.Warningf("NVIDIA binaries may not be bound with --writable")
}
for _, binary := range bins {
usrBinBinary := filepath.Join("/usr/bin", filepath.Base(binary))
bind := strings.Join([]string{binary, usrBinBinary}, ":")
BindPaths = append(BindPaths, bind)
}
}
if len(libs) == 0 {
sylog.Warningf("Could not find any NVIDIA libraries on this host!")
sylog.Warningf("You may need to edit %v/nvliblist.conf", buildcfg.SINGULARITY_CONFDIR)
} else {
ContainLibsPath = append(ContainLibsPath, libs...)
}
}
}
engineConfig.SetBindPath(BindPaths)
engineConfig.SetNetwork(Network)
engineConfig.SetDNS(DNS)
engineConfig.SetNetworkArgs(NetworkArgs)
engineConfig.SetOverlayImage(OverlayPath)
engineConfig.SetWritableImage(IsWritable)
engineConfig.SetNoHome(NoHome)
engineConfig.SetNv(Nvidia)
engineConfig.SetAddCaps(AddCaps)
engineConfig.SetDropCaps(DropCaps)
checkPrivileges(AllowSUID, "--allow-setuid", func() {
engineConfig.SetAllowSUID(AllowSUID)
})
checkPrivileges(KeepPrivs, "--keep-privs", func() {
engineConfig.SetKeepPrivs(KeepPrivs)
})
engineConfig.SetNoPrivs(NoPrivs)
engineConfig.SetSecurity(Security)
engineConfig.SetShell(ShellPath)
engineConfig.SetLibrariesPath(ContainLibsPath)
if ShellPath != "" {
generator.AddProcessEnv("SINGULARITY_SHELL", ShellPath)
}
checkPrivileges(CgroupsPath != "", "--apply-cgroups", func() {
engineConfig.SetCgroupsPath(CgroupsPath)
})
if IsWritable && IsWritableTmpfs {
sylog.Warningf("Disabling --writable-tmpfs flag, mutually exclusive with --writable")
engineConfig.SetWritableTmpfs(false)
} else {
engineConfig.SetWritableTmpfs(IsWritableTmpfs)
}
homeFlag := cobraCmd.Flag("home")
engineConfig.SetCustomHome(homeFlag.Changed)
// set home directory for the targeted UID if it exists on host system
if !homeFlag.Changed && targetUID != 0 {
if targetUID > 500 {
if pwd, err := user.GetPwUID(uint32(targetUID)); err == nil {
sylog.Debugf("Target UID requested, set home directory to %s", pwd.Dir)
HomePath = pwd.Dir
engineConfig.SetCustomHome(true)
} else {
sylog.Verbosef("Home directory for UID %d not found, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
} else {
sylog.Verbosef("System UID %d requested, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
}
if Hostname != "" {
UtsNamespace = true
engineConfig.SetHostname(Hostname)
}
checkPrivileges(IsBoot, "--boot", func() {})
if IsContained || IsContainAll || IsBoot {
engineConfig.SetContain(true)
if IsContainAll {
PidNamespace = true
IpcNamespace = true
IsCleanEnv = true
}
}
engineConfig.SetScratchDir(ScratchPath)
engineConfig.SetWorkdir(WorkdirPath)
homeSlice := strings.Split(HomePath, ":")
if len(homeSlice) > 2 || len(homeSlice) == 0 {
sylog.Fatalf("home argument has incorrect number of elements: %v", len(homeSlice))
}
engineConfig.SetHomeSource(homeSlice[0])
if len(homeSlice) == 1 {
engineConfig.SetHomeDest(homeSlice[0])
} else {
engineConfig.SetHomeDest(homeSlice[1])
}
if !engineConfig.File.AllowSetuid || IsFakeroot {
UserNamespace = true
}
/* if name submitted, run as instance */
if name != "" {
PidNamespace = true
IpcNamespace = true
engineConfig.SetInstance(true)
engineConfig.SetBootInstance(IsBoot)
_, err := instance.Get(name, instance.SingSubDir)
if err == nil {
sylog.Fatalf("instance %s already exists", name)
}
if IsBoot {
UtsNamespace = true
NetNamespace = true
if Hostname == "" {
engineConfig.SetHostname(name)
}
if !KeepPrivs {
engineConfig.SetDropCaps("CAP_SYS_BOOT,CAP_SYS_RAWIO")
}
generator.SetProcessArgs([]string{"/sbin/init"})
}
pwd, err := user.GetPwUID(uint32(os.Getuid()))
if err != nil {
sylog.Fatalf("failed to retrieve user information for UID %d: %s", os.Getuid(), err)
}
procname, err = instance.ProcName(name, pwd.Name)
if err != nil {
sylog.Fatalf("%s", err)
}
} else {
generator.SetProcessArgs(args)
procname = "Singularity runtime parent"
}
if NetNamespace {
generator.AddOrReplaceLinuxNamespace("network", "")
}
if UtsNamespace {
generator.AddOrReplaceLinuxNamespace("uts", "")
}
if PidNamespace {
generator.AddOrReplaceLinuxNamespace("pid", "")
engineConfig.SetNoInit(NoInit)
}
if IpcNamespace {
generator.AddOrReplaceLinuxNamespace("ipc", "")
}
if !UserNamespace {
if _, err := os.Stat(starter); os.IsNotExist(err) {
sylog.Verbosef("starter-suid not found, using user namespace")
UserNamespace = true
}
}
if UserNamespace {
generator.AddOrReplaceLinuxNamespace("user", "")
starter = buildcfg.LIBEXECDIR + "/singularity/bin/starter"
if IsFakeroot {
generator.AddLinuxUIDMapping(uid, 0, 1)
generator.AddLinuxGIDMapping(gid, 0, 1)
} else {
generator.AddLinuxUIDMapping(uid, uid, 1)
generator.AddLinuxGIDMapping(gid, gid, 1)
}
}
// Copy and cache environment
environment := os.Environ()
// Clean environment
env.SetContainerEnv(&generator, environment, IsCleanEnv, engineConfig.GetHomeDest())
// force to use getwd syscall
os.Unsetenv("PWD")
if pwd, err := os.Getwd(); err == nil {
if PwdPath != "" {
generator.SetProcessCwd(PwdPath)
} else {
if engineConfig.GetContain() {
generator.SetProcessCwd(engineConfig.GetHomeDest())
} else {
generator.SetProcessCwd(pwd)
}
}
} else {
sylog.Warningf("can't determine current working directory: %s", err)
}
Env := []string{sylog.GetEnvVar()}
generator.AddProcessEnv("SINGULARITY_APPNAME", AppName)
// convert image file to sandbox if image contains
// a squashfs filesystem
if UserNamespace && fs.IsFile(image) |
plugin.FlagHookCallbacks(engineConfig)
cfg := &config.Common{
EngineName: singularityConfig.Name,
ContainerID: name,
EngineConfig: engineConfig,
}
configData, err := json.Marshal(cfg)
if err != nil {
sylog.Fatalf("CLI Failed to marshal CommonEngineConfig: %s\n", err)
}
if engineConfig.GetInstance() {
stdout, stderr, err := instance.SetLogFile(name, int(uid), instance.SingSubDir)
if err != nil {
sylog.Fatalf("failed to create instance log files: %s", err)
}
start, err := stderr.Seek(0, io.SeekEnd)
if err != nil {
sylog.Warningf("failed to get standard error stream offset: %s", err)
}
cmd, err := exec.PipeCommand(starter, []string{procname}, Env, configData)
if err != nil {
sylog.Warningf("failed to prepare command: %s", err)
}
cmd.Stdout = stdout
cmd.Stderr = stderr
cmdErr := cmd.Run()
if sylog.GetLevel() != 0 {
// starter can exit a bit before all errors has been reported
// by instance process, wait a bit to catch all errors
time.Sleep(100 * time.Millisecond)
end, err := stderr.Seek(0, io.SeekEnd)
if err != nil {
sylog.Warningf("failed to get standard error stream offset: %s", err)
}
if end-start > 0 {
output := make([]byte, end-start)
stderr.ReadAt(output, start)
fmt.Println(string(output))
}
}
if cmdErr != nil {
sylog.Fatalf("failed to start instance: %s", cmdErr)
} else {
sylog.Verbosef("you will find instance output here: %s", stdout.Name())
sylog.Verbosef("you will find instance error here: %s", stderr.Name())
sylog.Infof("instance started successfully")
}
} else {
if err := exec.Pipe(starter, []string{procname}, Env, configData); err != nil {
sylog.Fatalf("%s", err)
}
}
}
| {
unsquashfsPath := ""
if engineConfig.File.MksquashfsPath != "" {
d := filepath.Dir(engineConfig.File.MksquashfsPath)
unsquashfsPath = filepath.Join(d, "unsquashfs")
}
sylog.Verbosef("User namespace requested, convert image %s to sandbox", image)
sylog.Infof("Convert SIF file to sandbox...")
dir, err := convertImage(image, unsquashfsPath)
if err != nil {
sylog.Fatalf("while extracting %s: %s", image, err)
}
engineConfig.SetImage(dir)
engineConfig.SetDeleteImage(true)
generator.AddProcessEnv("SINGULARITY_CONTAINER", dir)
} | conditional_block |
actions_linux.go | // Copyright (c) 2019, Sylabs Inc. All rights reserved.
// This software is licensed under a 3-clause BSD license. Please consult the
// LICENSE.md file distributed with the sources of this project regarding your
// rights to use or distribute this software.
package cli
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/opencontainers/runtime-tools/generate"
"github.com/sylabs/singularity/internal/pkg/plugin"
"github.com/sylabs/singularity/pkg/image"
"github.com/sylabs/singularity/pkg/image/unpacker"
"github.com/sylabs/singularity/pkg/util/nvidia"
"github.com/spf13/cobra"
"github.com/sylabs/singularity/internal/pkg/buildcfg"
"github.com/sylabs/singularity/internal/pkg/instance"
"github.com/sylabs/singularity/internal/pkg/runtime/engines/config"
"github.com/sylabs/singularity/internal/pkg/runtime/engines/config/oci"
"github.com/sylabs/singularity/internal/pkg/security"
"github.com/sylabs/singularity/internal/pkg/sylog"
"github.com/sylabs/singularity/internal/pkg/util/env"
"github.com/sylabs/singularity/internal/pkg/util/exec"
"github.com/sylabs/singularity/internal/pkg/util/fs"
"github.com/sylabs/singularity/internal/pkg/util/user"
singularityConfig "github.com/sylabs/singularity/pkg/runtime/engines/singularity/config"
)
// EnsureRootPriv ensures that a command is executed with root privileges.
// To customize the output, arguments can be used to specify the context (e.g., "oci", "plugin"),
// where the first argument (string) will be displayed before the command itself.
func EnsureRootPriv(cmd *cobra.Command, args []string) {
if os.Geteuid() != 0 {
if len(args) >= 1 && len(args[0]) > 0 {
// The first argument is the context
sylog.Fatalf("command '%s %s' requires root privileges", args[0], cmd.Name())
} else {
sylog.Fatalf("command %s requires root privileges", cmd.Name())
}
}
}
func convertImage(filename string, unsquashfsPath string) (string, error) {
img, err := image.Init(filename, false)
if err != nil {
return "", fmt.Errorf("could not open image %s: %s", filename, err)
}
defer img.File.Close()
if !img.HasRootFs() {
return "", fmt.Errorf("no root filesystem found in %s", filename)
}
// squashfs only
if img.Partitions[0].Type != image.SQUASHFS {
return "", fmt.Errorf("not a squashfs root filesystem")
}
// create a reader for rootfs partition
reader, err := image.NewPartitionReader(img, "", 0)
if err != nil {
return "", fmt.Errorf("could not extract root filesystem: %s", err)
}
s := unpacker.NewSquashfs()
if !s.HasUnsquashfs() && unsquashfsPath != "" {
s.UnsquashfsPath = unsquashfsPath
}
// keep compatibility with v2
tmpdir := os.Getenv("SINGULARITY_LOCALCACHEDIR")
if tmpdir == "" {
tmpdir = os.Getenv("SINGULARITY_CACHEDIR")
}
// create temporary sandbox
dir, err := ioutil.TempDir(tmpdir, "rootfs-")
if err != nil {
return "", fmt.Errorf("could not create temporary sandbox: %s", err)
}
// extract root filesystem
if err := s.ExtractAll(reader, dir); err != nil {
os.RemoveAll(dir)
return "", fmt.Errorf("root filesystem extraction failed: %s", err)
}
return dir, err
}
// TODO: Let's stick this in another file so that that CLI is just CLI
func execStarter(cobraCmd *cobra.Command, image string, args []string, name string) {
targetUID := 0
targetGID := make([]int, 0)
procname := ""
uid := uint32(os.Getuid())
gid := uint32(os.Getgid())
// Are we running from a privileged account?
isPrivileged := uid == 0
checkPrivileges := func(cond bool, desc string, fn func()) {
if !cond {
return
}
if !isPrivileged {
sylog.Fatalf("%s requires root privileges", desc)
}
fn()
}
syscall.Umask(0022)
starter := buildcfg.LIBEXECDIR + "/singularity/bin/starter-suid"
engineConfig := singularityConfig.NewConfig()
configurationFile := buildcfg.SYSCONFDIR + "/singularity/singularity.conf"
if err := config.Parser(configurationFile, engineConfig.File); err != nil {
sylog.Fatalf("Unable to parse singularity.conf file: %s", err)
}
ociConfig := &oci.Config{}
generator := generate.Generator{Config: &ociConfig.Spec}
engineConfig.OciConfig = ociConfig
generator.SetProcessArgs(args)
uidParam := security.GetParam(Security, "uid")
gidParam := security.GetParam(Security, "gid")
// handle target UID/GID for root user
checkPrivileges(uidParam != "", "uid security feature", func() {
u, err := strconv.ParseUint(uidParam, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided UID")
}
targetUID = int(u)
uid = uint32(targetUID)
engineConfig.SetTargetUID(targetUID)
})
checkPrivileges(gidParam != "", "gid security feature", func() {
gids := strings.Split(gidParam, ":")
for _, id := range gids {
g, err := strconv.ParseUint(id, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided GID")
}
targetGID = append(targetGID, int(g))
}
if len(gids) > 0 {
gid = uint32(targetGID[0])
}
engineConfig.SetTargetGID(targetGID)
})
if strings.HasPrefix(image, "instance://") {
if name != "" {
sylog.Fatalf("Starting an instance from another is not allowed")
}
instanceName := instance.ExtractName(image)
file, err := instance.Get(instanceName, instance.SingSubDir)
if err != nil {
sylog.Fatalf("%s", err)
}
if !file.Privileged {
UserNamespace = true
}
generator.AddProcessEnv("SINGULARITY_CONTAINER", file.Image)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(file.Image))
engineConfig.SetImage(image)
engineConfig.SetInstanceJoin(true)
} else {
abspath, err := filepath.Abs(image)
generator.AddProcessEnv("SINGULARITY_CONTAINER", abspath)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(abspath))
if err != nil {
sylog.Fatalf("Failed to determine image absolute path for %s: %s", image, err)
}
engineConfig.SetImage(abspath)
}
if !NoNvidia && (Nvidia || engineConfig.File.AlwaysUseNv) {
userPath := os.Getenv("USER_PATH")
if engineConfig.File.AlwaysUseNv {
sylog.Verbosef("'always use nv = yes' found in singularity.conf")
sylog.Verbosef("binding nvidia files into container")
}
libs, bins, err := nvidia.Paths(buildcfg.SINGULARITY_CONFDIR, userPath)
if err != nil {
sylog.Warningf("Unable to capture NVIDIA bind points: %v", err)
} else {
if len(bins) == 0 {
sylog.Infof("Could not find any NVIDIA binaries on this host!")
} else {
if IsWritable {
sylog.Warningf("NVIDIA binaries may not be bound with --writable")
}
for _, binary := range bins {
usrBinBinary := filepath.Join("/usr/bin", filepath.Base(binary))
bind := strings.Join([]string{binary, usrBinBinary}, ":")
BindPaths = append(BindPaths, bind)
}
}
if len(libs) == 0 {
sylog.Warningf("Could not find any NVIDIA libraries on this host!")
sylog.Warningf("You may need to edit %v/nvliblist.conf", buildcfg.SINGULARITY_CONFDIR)
} else {
ContainLibsPath = append(ContainLibsPath, libs...)
}
}
}
engineConfig.SetBindPath(BindPaths)
engineConfig.SetNetwork(Network)
engineConfig.SetDNS(DNS)
engineConfig.SetNetworkArgs(NetworkArgs)
engineConfig.SetOverlayImage(OverlayPath)
engineConfig.SetWritableImage(IsWritable)
engineConfig.SetNoHome(NoHome)
engineConfig.SetNv(Nvidia)
engineConfig.SetAddCaps(AddCaps)
engineConfig.SetDropCaps(DropCaps)
checkPrivileges(AllowSUID, "--allow-setuid", func() {
engineConfig.SetAllowSUID(AllowSUID)
})
checkPrivileges(KeepPrivs, "--keep-privs", func() {
engineConfig.SetKeepPrivs(KeepPrivs)
})
engineConfig.SetNoPrivs(NoPrivs)
engineConfig.SetSecurity(Security)
engineConfig.SetShell(ShellPath)
engineConfig.SetLibrariesPath(ContainLibsPath)
if ShellPath != "" {
generator.AddProcessEnv("SINGULARITY_SHELL", ShellPath)
}
checkPrivileges(CgroupsPath != "", "--apply-cgroups", func() {
engineConfig.SetCgroupsPath(CgroupsPath)
})
if IsWritable && IsWritableTmpfs {
sylog.Warningf("Disabling --writable-tmpfs flag, mutually exclusive with --writable")
engineConfig.SetWritableTmpfs(false)
} else {
engineConfig.SetWritableTmpfs(IsWritableTmpfs)
}
homeFlag := cobraCmd.Flag("home")
engineConfig.SetCustomHome(homeFlag.Changed)
// set home directory for the targeted UID if it exists on host system
if !homeFlag.Changed && targetUID != 0 {
if targetUID > 500 {
if pwd, err := user.GetPwUID(uint32(targetUID)); err == nil {
sylog.Debugf("Target UID requested, set home directory to %s", pwd.Dir)
HomePath = pwd.Dir
engineConfig.SetCustomHome(true)
} else {
sylog.Verbosef("Home directory for UID %d not found, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
} else {
sylog.Verbosef("System UID %d requested, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
}
if Hostname != "" {
UtsNamespace = true
engineConfig.SetHostname(Hostname)
}
checkPrivileges(IsBoot, "--boot", func() {})
if IsContained || IsContainAll || IsBoot {
engineConfig.SetContain(true)
if IsContainAll {
PidNamespace = true
IpcNamespace = true
IsCleanEnv = true
}
}
engineConfig.SetScratchDir(ScratchPath)
engineConfig.SetWorkdir(WorkdirPath)
homeSlice := strings.Split(HomePath, ":")
if len(homeSlice) > 2 || len(homeSlice) == 0 {
sylog.Fatalf("home argument has incorrect number of elements: %v", len(homeSlice))
} | engineConfig.SetHomeDest(homeSlice[0])
} else {
engineConfig.SetHomeDest(homeSlice[1])
}
if !engineConfig.File.AllowSetuid || IsFakeroot {
UserNamespace = true
}
/* if name submitted, run as instance */
if name != "" {
PidNamespace = true
IpcNamespace = true
engineConfig.SetInstance(true)
engineConfig.SetBootInstance(IsBoot)
_, err := instance.Get(name, instance.SingSubDir)
if err == nil {
sylog.Fatalf("instance %s already exists", name)
}
if IsBoot {
UtsNamespace = true
NetNamespace = true
if Hostname == "" {
engineConfig.SetHostname(name)
}
if !KeepPrivs {
engineConfig.SetDropCaps("CAP_SYS_BOOT,CAP_SYS_RAWIO")
}
generator.SetProcessArgs([]string{"/sbin/init"})
}
pwd, err := user.GetPwUID(uint32(os.Getuid()))
if err != nil {
sylog.Fatalf("failed to retrieve user information for UID %d: %s", os.Getuid(), err)
}
procname, err = instance.ProcName(name, pwd.Name)
if err != nil {
sylog.Fatalf("%s", err)
}
} else {
generator.SetProcessArgs(args)
procname = "Singularity runtime parent"
}
if NetNamespace {
generator.AddOrReplaceLinuxNamespace("network", "")
}
if UtsNamespace {
generator.AddOrReplaceLinuxNamespace("uts", "")
}
if PidNamespace {
generator.AddOrReplaceLinuxNamespace("pid", "")
engineConfig.SetNoInit(NoInit)
}
if IpcNamespace {
generator.AddOrReplaceLinuxNamespace("ipc", "")
}
if !UserNamespace {
if _, err := os.Stat(starter); os.IsNotExist(err) {
sylog.Verbosef("starter-suid not found, using user namespace")
UserNamespace = true
}
}
if UserNamespace {
generator.AddOrReplaceLinuxNamespace("user", "")
starter = buildcfg.LIBEXECDIR + "/singularity/bin/starter"
if IsFakeroot {
generator.AddLinuxUIDMapping(uid, 0, 1)
generator.AddLinuxGIDMapping(gid, 0, 1)
} else {
generator.AddLinuxUIDMapping(uid, uid, 1)
generator.AddLinuxGIDMapping(gid, gid, 1)
}
}
// Copy and cache environment
environment := os.Environ()
// Clean environment
env.SetContainerEnv(&generator, environment, IsCleanEnv, engineConfig.GetHomeDest())
// force to use getwd syscall
os.Unsetenv("PWD")
if pwd, err := os.Getwd(); err == nil {
if PwdPath != "" {
generator.SetProcessCwd(PwdPath)
} else {
if engineConfig.GetContain() {
generator.SetProcessCwd(engineConfig.GetHomeDest())
} else {
generator.SetProcessCwd(pwd)
}
}
} else {
sylog.Warningf("can't determine current working directory: %s", err)
}
Env := []string{sylog.GetEnvVar()}
generator.AddProcessEnv("SINGULARITY_APPNAME", AppName)
// convert image file to sandbox if image contains
// a squashfs filesystem
if UserNamespace && fs.IsFile(image) {
unsquashfsPath := ""
if engineConfig.File.MksquashfsPath != "" {
d := filepath.Dir(engineConfig.File.MksquashfsPath)
unsquashfsPath = filepath.Join(d, "unsquashfs")
}
sylog.Verbosef("User namespace requested, convert image %s to sandbox", image)
sylog.Infof("Convert SIF file to sandbox...")
dir, err := convertImage(image, unsquashfsPath)
if err != nil {
sylog.Fatalf("while extracting %s: %s", image, err)
}
engineConfig.SetImage(dir)
engineConfig.SetDeleteImage(true)
generator.AddProcessEnv("SINGULARITY_CONTAINER", dir)
}
plugin.FlagHookCallbacks(engineConfig)
cfg := &config.Common{
EngineName: singularityConfig.Name,
ContainerID: name,
EngineConfig: engineConfig,
}
configData, err := json.Marshal(cfg)
if err != nil {
sylog.Fatalf("CLI Failed to marshal CommonEngineConfig: %s\n", err)
}
if engineConfig.GetInstance() {
stdout, stderr, err := instance.SetLogFile(name, int(uid), instance.SingSubDir)
if err != nil {
sylog.Fatalf("failed to create instance log files: %s", err)
}
start, err := stderr.Seek(0, io.SeekEnd)
if err != nil {
sylog.Warningf("failed to get standard error stream offset: %s", err)
}
cmd, err := exec.PipeCommand(starter, []string{procname}, Env, configData)
if err != nil {
sylog.Warningf("failed to prepare command: %s", err)
}
cmd.Stdout = stdout
cmd.Stderr = stderr
cmdErr := cmd.Run()
if sylog.GetLevel() != 0 {
// starter can exit a bit before all errors has been reported
// by instance process, wait a bit to catch all errors
time.Sleep(100 * time.Millisecond)
end, err := stderr.Seek(0, io.SeekEnd)
if err != nil {
sylog.Warningf("failed to get standard error stream offset: %s", err)
}
if end-start > 0 {
output := make([]byte, end-start)
stderr.ReadAt(output, start)
fmt.Println(string(output))
}
}
if cmdErr != nil {
sylog.Fatalf("failed to start instance: %s", cmdErr)
} else {
sylog.Verbosef("you will find instance output here: %s", stdout.Name())
sylog.Verbosef("you will find instance error here: %s", stderr.Name())
sylog.Infof("instance started successfully")
}
} else {
if err := exec.Pipe(starter, []string{procname}, Env, configData); err != nil {
sylog.Fatalf("%s", err)
}
}
} |
engineConfig.SetHomeSource(homeSlice[0])
if len(homeSlice) == 1 { | random_line_split |
actions_linux.go | // Copyright (c) 2019, Sylabs Inc. All rights reserved.
// This software is licensed under a 3-clause BSD license. Please consult the
// LICENSE.md file distributed with the sources of this project regarding your
// rights to use or distribute this software.
package cli
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"github.com/opencontainers/runtime-tools/generate"
"github.com/sylabs/singularity/internal/pkg/plugin"
"github.com/sylabs/singularity/pkg/image"
"github.com/sylabs/singularity/pkg/image/unpacker"
"github.com/sylabs/singularity/pkg/util/nvidia"
"github.com/spf13/cobra"
"github.com/sylabs/singularity/internal/pkg/buildcfg"
"github.com/sylabs/singularity/internal/pkg/instance"
"github.com/sylabs/singularity/internal/pkg/runtime/engines/config"
"github.com/sylabs/singularity/internal/pkg/runtime/engines/config/oci"
"github.com/sylabs/singularity/internal/pkg/security"
"github.com/sylabs/singularity/internal/pkg/sylog"
"github.com/sylabs/singularity/internal/pkg/util/env"
"github.com/sylabs/singularity/internal/pkg/util/exec"
"github.com/sylabs/singularity/internal/pkg/util/fs"
"github.com/sylabs/singularity/internal/pkg/util/user"
singularityConfig "github.com/sylabs/singularity/pkg/runtime/engines/singularity/config"
)
// EnsureRootPriv ensures that a command is executed with root privileges.
// To customize the output, arguments can be used to specify the context (e.g., "oci", "plugin"),
// where the first argument (string) will be displayed before the command itself.
func | (cmd *cobra.Command, args []string) {
if os.Geteuid() != 0 {
if len(args) >= 1 && len(args[0]) > 0 {
// The first argument is the context
sylog.Fatalf("command '%s %s' requires root privileges", args[0], cmd.Name())
} else {
sylog.Fatalf("command %s requires root privileges", cmd.Name())
}
}
}
func convertImage(filename string, unsquashfsPath string) (string, error) {
img, err := image.Init(filename, false)
if err != nil {
return "", fmt.Errorf("could not open image %s: %s", filename, err)
}
defer img.File.Close()
if !img.HasRootFs() {
return "", fmt.Errorf("no root filesystem found in %s", filename)
}
// squashfs only
if img.Partitions[0].Type != image.SQUASHFS {
return "", fmt.Errorf("not a squashfs root filesystem")
}
// create a reader for rootfs partition
reader, err := image.NewPartitionReader(img, "", 0)
if err != nil {
return "", fmt.Errorf("could not extract root filesystem: %s", err)
}
s := unpacker.NewSquashfs()
if !s.HasUnsquashfs() && unsquashfsPath != "" {
s.UnsquashfsPath = unsquashfsPath
}
// keep compatibility with v2
tmpdir := os.Getenv("SINGULARITY_LOCALCACHEDIR")
if tmpdir == "" {
tmpdir = os.Getenv("SINGULARITY_CACHEDIR")
}
// create temporary sandbox
dir, err := ioutil.TempDir(tmpdir, "rootfs-")
if err != nil {
return "", fmt.Errorf("could not create temporary sandbox: %s", err)
}
// extract root filesystem
if err := s.ExtractAll(reader, dir); err != nil {
os.RemoveAll(dir)
return "", fmt.Errorf("root filesystem extraction failed: %s", err)
}
return dir, err
}
// TODO: Let's stick this in another file so that that CLI is just CLI
func execStarter(cobraCmd *cobra.Command, image string, args []string, name string) {
targetUID := 0
targetGID := make([]int, 0)
procname := ""
uid := uint32(os.Getuid())
gid := uint32(os.Getgid())
// Are we running from a privileged account?
isPrivileged := uid == 0
checkPrivileges := func(cond bool, desc string, fn func()) {
if !cond {
return
}
if !isPrivileged {
sylog.Fatalf("%s requires root privileges", desc)
}
fn()
}
syscall.Umask(0022)
starter := buildcfg.LIBEXECDIR + "/singularity/bin/starter-suid"
engineConfig := singularityConfig.NewConfig()
configurationFile := buildcfg.SYSCONFDIR + "/singularity/singularity.conf"
if err := config.Parser(configurationFile, engineConfig.File); err != nil {
sylog.Fatalf("Unable to parse singularity.conf file: %s", err)
}
ociConfig := &oci.Config{}
generator := generate.Generator{Config: &ociConfig.Spec}
engineConfig.OciConfig = ociConfig
generator.SetProcessArgs(args)
uidParam := security.GetParam(Security, "uid")
gidParam := security.GetParam(Security, "gid")
// handle target UID/GID for root user
checkPrivileges(uidParam != "", "uid security feature", func() {
u, err := strconv.ParseUint(uidParam, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided UID")
}
targetUID = int(u)
uid = uint32(targetUID)
engineConfig.SetTargetUID(targetUID)
})
checkPrivileges(gidParam != "", "gid security feature", func() {
gids := strings.Split(gidParam, ":")
for _, id := range gids {
g, err := strconv.ParseUint(id, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided GID")
}
targetGID = append(targetGID, int(g))
}
if len(gids) > 0 {
gid = uint32(targetGID[0])
}
engineConfig.SetTargetGID(targetGID)
})
if strings.HasPrefix(image, "instance://") {
if name != "" {
sylog.Fatalf("Starting an instance from another is not allowed")
}
instanceName := instance.ExtractName(image)
file, err := instance.Get(instanceName, instance.SingSubDir)
if err != nil {
sylog.Fatalf("%s", err)
}
if !file.Privileged {
UserNamespace = true
}
generator.AddProcessEnv("SINGULARITY_CONTAINER", file.Image)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(file.Image))
engineConfig.SetImage(image)
engineConfig.SetInstanceJoin(true)
} else {
abspath, err := filepath.Abs(image)
generator.AddProcessEnv("SINGULARITY_CONTAINER", abspath)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(abspath))
if err != nil {
sylog.Fatalf("Failed to determine image absolute path for %s: %s", image, err)
}
engineConfig.SetImage(abspath)
}
if !NoNvidia && (Nvidia || engineConfig.File.AlwaysUseNv) {
userPath := os.Getenv("USER_PATH")
if engineConfig.File.AlwaysUseNv {
sylog.Verbosef("'always use nv = yes' found in singularity.conf")
sylog.Verbosef("binding nvidia files into container")
}
libs, bins, err := nvidia.Paths(buildcfg.SINGULARITY_CONFDIR, userPath)
if err != nil {
sylog.Warningf("Unable to capture NVIDIA bind points: %v", err)
} else {
if len(bins) == 0 {
sylog.Infof("Could not find any NVIDIA binaries on this host!")
} else {
if IsWritable {
sylog.Warningf("NVIDIA binaries may not be bound with --writable")
}
for _, binary := range bins {
usrBinBinary := filepath.Join("/usr/bin", filepath.Base(binary))
bind := strings.Join([]string{binary, usrBinBinary}, ":")
BindPaths = append(BindPaths, bind)
}
}
if len(libs) == 0 {
sylog.Warningf("Could not find any NVIDIA libraries on this host!")
sylog.Warningf("You may need to edit %v/nvliblist.conf", buildcfg.SINGULARITY_CONFDIR)
} else {
ContainLibsPath = append(ContainLibsPath, libs...)
}
}
}
engineConfig.SetBindPath(BindPaths)
engineConfig.SetNetwork(Network)
engineConfig.SetDNS(DNS)
engineConfig.SetNetworkArgs(NetworkArgs)
engineConfig.SetOverlayImage(OverlayPath)
engineConfig.SetWritableImage(IsWritable)
engineConfig.SetNoHome(NoHome)
engineConfig.SetNv(Nvidia)
engineConfig.SetAddCaps(AddCaps)
engineConfig.SetDropCaps(DropCaps)
checkPrivileges(AllowSUID, "--allow-setuid", func() {
engineConfig.SetAllowSUID(AllowSUID)
})
checkPrivileges(KeepPrivs, "--keep-privs", func() {
engineConfig.SetKeepPrivs(KeepPrivs)
})
engineConfig.SetNoPrivs(NoPrivs)
engineConfig.SetSecurity(Security)
engineConfig.SetShell(ShellPath)
engineConfig.SetLibrariesPath(ContainLibsPath)
if ShellPath != "" {
generator.AddProcessEnv("SINGULARITY_SHELL", ShellPath)
}
checkPrivileges(CgroupsPath != "", "--apply-cgroups", func() {
engineConfig.SetCgroupsPath(CgroupsPath)
})
if IsWritable && IsWritableTmpfs {
sylog.Warningf("Disabling --writable-tmpfs flag, mutually exclusive with --writable")
engineConfig.SetWritableTmpfs(false)
} else {
engineConfig.SetWritableTmpfs(IsWritableTmpfs)
}
homeFlag := cobraCmd.Flag("home")
engineConfig.SetCustomHome(homeFlag.Changed)
// set home directory for the targeted UID if it exists on host system
if !homeFlag.Changed && targetUID != 0 {
if targetUID > 500 {
if pwd, err := user.GetPwUID(uint32(targetUID)); err == nil {
sylog.Debugf("Target UID requested, set home directory to %s", pwd.Dir)
HomePath = pwd.Dir
engineConfig.SetCustomHome(true)
} else {
sylog.Verbosef("Home directory for UID %d not found, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
} else {
sylog.Verbosef("System UID %d requested, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
}
if Hostname != "" {
UtsNamespace = true
engineConfig.SetHostname(Hostname)
}
checkPrivileges(IsBoot, "--boot", func() {})
if IsContained || IsContainAll || IsBoot {
engineConfig.SetContain(true)
if IsContainAll {
PidNamespace = true
IpcNamespace = true
IsCleanEnv = true
}
}
engineConfig.SetScratchDir(ScratchPath)
engineConfig.SetWorkdir(WorkdirPath)
homeSlice := strings.Split(HomePath, ":")
if len(homeSlice) > 2 || len(homeSlice) == 0 {
sylog.Fatalf("home argument has incorrect number of elements: %v", len(homeSlice))
}
engineConfig.SetHomeSource(homeSlice[0])
if len(homeSlice) == 1 {
engineConfig.SetHomeDest(homeSlice[0])
} else {
engineConfig.SetHomeDest(homeSlice[1])
}
if !engineConfig.File.AllowSetuid || IsFakeroot {
UserNamespace = true
}
/* if name submitted, run as instance */
if name != "" {
PidNamespace = true
IpcNamespace = true
engineConfig.SetInstance(true)
engineConfig.SetBootInstance(IsBoot)
_, err := instance.Get(name, instance.SingSubDir)
if err == nil {
sylog.Fatalf("instance %s already exists", name)
}
if IsBoot {
UtsNamespace = true
NetNamespace = true
if Hostname == "" {
engineConfig.SetHostname(name)
}
if !KeepPrivs {
engineConfig.SetDropCaps("CAP_SYS_BOOT,CAP_SYS_RAWIO")
}
generator.SetProcessArgs([]string{"/sbin/init"})
}
pwd, err := user.GetPwUID(uint32(os.Getuid()))
if err != nil {
sylog.Fatalf("failed to retrieve user information for UID %d: %s", os.Getuid(), err)
}
procname, err = instance.ProcName(name, pwd.Name)
if err != nil {
sylog.Fatalf("%s", err)
}
} else {
generator.SetProcessArgs(args)
procname = "Singularity runtime parent"
}
if NetNamespace {
generator.AddOrReplaceLinuxNamespace("network", "")
}
if UtsNamespace {
generator.AddOrReplaceLinuxNamespace("uts", "")
}
if PidNamespace {
generator.AddOrReplaceLinuxNamespace("pid", "")
engineConfig.SetNoInit(NoInit)
}
if IpcNamespace {
generator.AddOrReplaceLinuxNamespace("ipc", "")
}
if !UserNamespace {
if _, err := os.Stat(starter); os.IsNotExist(err) {
sylog.Verbosef("starter-suid not found, using user namespace")
UserNamespace = true
}
}
if UserNamespace {
generator.AddOrReplaceLinuxNamespace("user", "")
starter = buildcfg.LIBEXECDIR + "/singularity/bin/starter"
if IsFakeroot {
generator.AddLinuxUIDMapping(uid, 0, 1)
generator.AddLinuxGIDMapping(gid, 0, 1)
} else {
generator.AddLinuxUIDMapping(uid, uid, 1)
generator.AddLinuxGIDMapping(gid, gid, 1)
}
}
// Copy and cache environment
environment := os.Environ()
// Clean environment
env.SetContainerEnv(&generator, environment, IsCleanEnv, engineConfig.GetHomeDest())
// force to use getwd syscall
os.Unsetenv("PWD")
if pwd, err := os.Getwd(); err == nil {
if PwdPath != "" {
generator.SetProcessCwd(PwdPath)
} else {
if engineConfig.GetContain() {
generator.SetProcessCwd(engineConfig.GetHomeDest())
} else {
generator.SetProcessCwd(pwd)
}
}
} else {
sylog.Warningf("can't determine current working directory: %s", err)
}
Env := []string{sylog.GetEnvVar()}
generator.AddProcessEnv("SINGULARITY_APPNAME", AppName)
// convert image file to sandbox if image contains
// a squashfs filesystem
if UserNamespace && fs.IsFile(image) {
unsquashfsPath := ""
if engineConfig.File.MksquashfsPath != "" {
d := filepath.Dir(engineConfig.File.MksquashfsPath)
unsquashfsPath = filepath.Join(d, "unsquashfs")
}
sylog.Verbosef("User namespace requested, convert image %s to sandbox", image)
sylog.Infof("Convert SIF file to sandbox...")
dir, err := convertImage(image, unsquashfsPath)
if err != nil {
sylog.Fatalf("while extracting %s: %s", image, err)
}
engineConfig.SetImage(dir)
engineConfig.SetDeleteImage(true)
generator.AddProcessEnv("SINGULARITY_CONTAINER", dir)
}
plugin.FlagHookCallbacks(engineConfig)
cfg := &config.Common{
EngineName: singularityConfig.Name,
ContainerID: name,
EngineConfig: engineConfig,
}
configData, err := json.Marshal(cfg)
if err != nil {
sylog.Fatalf("CLI Failed to marshal CommonEngineConfig: %s\n", err)
}
if engineConfig.GetInstance() {
stdout, stderr, err := instance.SetLogFile(name, int(uid), instance.SingSubDir)
if err != nil {
sylog.Fatalf("failed to create instance log files: %s", err)
}
start, err := stderr.Seek(0, io.SeekEnd)
if err != nil {
sylog.Warningf("failed to get standard error stream offset: %s", err)
}
cmd, err := exec.PipeCommand(starter, []string{procname}, Env, configData)
if err != nil {
sylog.Warningf("failed to prepare command: %s", err)
}
cmd.Stdout = stdout
cmd.Stderr = stderr
cmdErr := cmd.Run()
if sylog.GetLevel() != 0 {
// starter can exit a bit before all errors has been reported
// by instance process, wait a bit to catch all errors
time.Sleep(100 * time.Millisecond)
end, err := stderr.Seek(0, io.SeekEnd)
if err != nil {
sylog.Warningf("failed to get standard error stream offset: %s", err)
}
if end-start > 0 {
output := make([]byte, end-start)
stderr.ReadAt(output, start)
fmt.Println(string(output))
}
}
if cmdErr != nil {
sylog.Fatalf("failed to start instance: %s", cmdErr)
} else {
sylog.Verbosef("you will find instance output here: %s", stdout.Name())
sylog.Verbosef("you will find instance error here: %s", stderr.Name())
sylog.Infof("instance started successfully")
}
} else {
if err := exec.Pipe(starter, []string{procname}, Env, configData); err != nil {
sylog.Fatalf("%s", err)
}
}
}
| EnsureRootPriv | identifier_name |
plugin.py | import os
import sys
import re
import py
import pytest
import logging
import time
import datetime
import argparse
from builtins import object, int
from past.builtins import basestring
def pytest_addhooks(pluginmanager):
pluginmanager.add_hookspecs(LoggerHookspec)
def _late_addoptions(parser, logcfg):
"""Add options to control logger"""
parser.addini(
name='logger_logsdir',
help='base directory with log files for file loggers [basetemp]',
default=None,
)
group = parser.getgroup('logger')
group.addoption('--logger-logsdir',
help='pick you own logs directory instead of default '
'directory under session tmpdir')
if logcfg._enabled:
parser = _log_option_parser(logcfg._loggers)
group.addoption('--loggers',
default=parser(logcfg._log_option_default),
type=parser,
metavar='LOGGER,LOGGER.LEVEL,...',
help='comma delimited list of loggers optionally suffixed with level '
'preceded by a dot. Levels can be lower or uppercase, or numeric. '
'For example: "logger1,logger2.info,logger3.FATAL,logger4.25"')
@pytest.hookimpl(trylast=True)
def pytest_load_initial_conftests(early_config, parser, args):
logcfg = LoggerConfig()
early_config.hook.pytest_logger_config(logger_config=logcfg)
early_config.pluginmanager.register(EarlyLoggerPlugin(logcfg), '_early_logger')
_late_addoptions(parser, logcfg)
def pytest_configure(config):
early_logger = config.pluginmanager.getplugin('_early_logger')
config.pluginmanager.register(LoggerPlugin(config, early_logger._logcfg), '_logger')
class EarlyLoggerPlugin(object):
def __init__(self, logcfg):
self._logcfg = logcfg
class LoggerPlugin(object):
def __init__(self, config, logcfg):
self._config = config
self._logdirlinks = config.hook.pytest_logger_logdirlink(config=config)
self._loggers = _loggers_from_logcfg(logcfg, config.getoption('loggers')) if logcfg._enabled else None
self._formatter_class = logcfg._formatter_class or DefaultFormatter
self._logsdir = None
self._split_by_outcome_subdir = logcfg._split_by_outcome_subdir
self._split_by_outcome_outcomes = logcfg._split_by_outcome_outcomes
def logsdir(self):
ldir = self._logsdir
if ldir:
return ldir
logger_logsdir = self._config.getoption('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.getini('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.hook.pytest_logger_logsdir(config=self._config)
if logger_logsdir:
ldir = _make_logsdir_dir(logger_logsdir)
else:
ldir = _make_logsdir_tmpdir(self._config._tmpdirhandler)
self._logsdir = ldir
for link in self._logdirlinks:
_refresh_link(str(ldir), link)
return ldir
def pytest_runtest_setup(self, item):
loggers = _choose_loggers(self._loggers, _loggers_from_hooks(item))
formatter = self._formatter_class()
item._logger = state = LoggerState(item=item,
stdoutloggers=loggers.stdout,
fileloggers=loggers.file,
formatter=formatter)
state.on_setup()
def pytest_runtest_teardown(self, item, nextitem):
logger = getattr(item, '_logger', None)
if logger:
logger.on_teardown()
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
outcome = yield
tr = outcome.get_result()
logger = getattr(item, '_logger', None)
if logger:
if self._logsdir and self._split_by_outcome_subdir and tr.outcome in self._split_by_outcome_outcomes:
split_by_outcome_logdir = self._logsdir.join(self._split_by_outcome_subdir, tr.outcome)
nodeid = _sanitize_nodeid(item.nodeid)
nodepath = os.path.dirname(nodeid)
split_by_outcome_logdir.join(nodepath).ensure(dir=1)
destdir_relpath = os.path.relpath(str(self._logsdir.join(nodeid)),
str(split_by_outcome_logdir.join(nodepath)))
_refresh_link(destdir_relpath, str(split_by_outcome_logdir.join(nodeid)))
if call.when == 'teardown':
logger.on_makereport()
class LoggerState(object):
def __init__(self, item, stdoutloggers, fileloggers, formatter):
self._put_newlines = bool(item.config.option.capture == 'no' and stdoutloggers)
self.handlers = _make_handlers(stdoutloggers, fileloggers, item, formatter)
self.root_enabler = RootEnabler(bool(stdoutloggers and fileloggers))
def put_newline(self):
if self._put_newlines:
sys.stdout.write('\n')
def on_setup(self):
self.put_newline()
_enable(self.handlers)
self.root_enabler.enable()
def on_teardown(self):
self.put_newline()
def on_makereport(self):
self.root_enabler.disable()
_disable(self.handlers)
class RootEnabler(object):
def __init__(self, enabled):
self._enabled = enabled
self._root_level = logging.root.level
def enable(self):
if self._enabled:
self._root_level = logging.root.level
logging.root.setLevel(logging.NOTSET) # stops root logger from blocking logs
def disable(self):
if self._enabled:
logging.root.setLevel(self._root_level)
class Loggers(object):
def __init__(self, stdout, file_):
self.stdout = stdout
self.file = file_
def __bool__(self):
return bool(self.stdout) or bool(self.file)
class LoggerConfig(object):
"""Configuration of logging to stdout and filesystem."""
def __init__(self):
self._enabled = False
self._loggers = []
self._formatter_class = None
self._log_option_default = ''
self._split_by_outcome_subdir = None
self._split_by_outcome_outcomes = []
def add_loggers(self, loggers, stdout_level=logging.NOTSET, file_level=logging.NOTSET):
"""Adds loggers for stdout/filesystem handling.
Stdout: loggers will log to stdout only when mentioned in `loggers` option. If they're
mentioned without explicit level, `stdout_level` will be used.
Filesystem: loggers will log to files at `file_level`.
:arg loggers: List of logger names.
:arg stdout_level: Default level at which stdout handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
:arg file_level: Level at which filesystem handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
"""
self._enabled = True
self._loggers.append((loggers, _sanitize_level(stdout_level), _sanitize_level(file_level)))
def set_formatter_class(self, formatter_class):
"""Sets the `logging.Formatter` class to be used by all loggers.
:arg formatter_class: The `logging.Formatter` class
"""
if isinstance(formatter_class, logging.Formatter):
raise ValueError("Got a formatter instance instead of its class !")
if not issubclass(formatter_class, logging.Formatter):
raise ValueError("Formatter should be a class inheriting from logging.Formatter")
self._formatter_class = formatter_class
def set_log_option_default(self, value):
""" Sets default value of `log` option."""
self._log_option_default = value
def split_by_outcome(self, outcomes=None, subdir='by_outcome'):
"""Makes a directory inside main logdir where logs are further split by test outcome
:param outcomes: list of test outcomes to be handled (failed/passed/skipped)
:param subdir: name for the subdirectory in main log directory
"""
if outcomes is not None:
allowed_outcomes = ['passed', 'failed', 'skipped']
unexpected_outcomes = set(outcomes) - set(allowed_outcomes)
if unexpected_outcomes:
raise ValueError('got unexpected_outcomes: <' + str(list(unexpected_outcomes)) + '>')
self._split_by_outcome_outcomes = outcomes
else:
self._split_by_outcome_outcomes = ['failed']
self._split_by_outcome_subdir = subdir
class LoggerHookspec(object):
def pytest_logger_config(self, logger_config):
""" called before cmdline options parsing. Accepts terse configuration
of both stdout and file logging, adds cmdline options to manipulate
stdout logging. Cannot be used together with \\*loggers hooks.
:arg logger_config: instance of :py:class:`LoggerConfig`, allows
setting loggers for stdout and file handling and their levels.
"""
def pytest_logger_stdoutloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to terminal output. Cannot be used together with
logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_fileloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to files within logs temporary directory.
Cannot be used together with logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def | (self, config):
""" called after cmdline options parsing.
If implemented, symlink to logs directory will be created.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of requested link to logs directory.
"""
@pytest.hookspec(firstresult=True)
def pytest_logger_logsdir(self, config):
""" called after cmdline options parsing.
If implemented, place logs into the location returned. This is similar
to using --logger-logsdir or the logger_logsdir ini option, but will
only be used if those are not.
Additionally, if multiple implementations of this hook are found, only
the first non-None value will be used.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of logs directory.
"""
class DefaultFormatter(logging.Formatter):
short_level_names = {
logging.FATAL: 'ftl',
logging.ERROR: 'err',
logging.WARN: 'wrn',
logging.INFO: 'inf',
logging.DEBUG: 'dbg',
}
format_string = '%(asctime)s %(levelshortname)s %(name)s: %(message)s'
def __init__(self):
logging.Formatter.__init__(self, DefaultFormatter.format_string)
self._start = time.time()
def formatTime(self, record, datefmt=None):
ct = record.created - self._start
dt = datetime.datetime.utcfromtimestamp(ct)
return dt.strftime("%M:%S.%f")[:-3] # omit useconds, leave mseconds
def format(self, record):
record.levelshortname = DefaultFormatter.short_level_names.get(record.levelno,
'l%s' % record.levelno)
return logging.Formatter.format(self, record)
@pytest.fixture
def logdir(request):
return _make_logdir(request._pyfuncitem)
def _sanitize_nodeid(filename):
filename = filename.replace('::()::', '/')
filename = filename.replace('::', '/')
filename = re.sub(r'\[(.+)\]', r'-\1', filename)
return filename
def _sanitize_level(level, raises=True):
if isinstance(level, basestring):
try:
return int(level)
except ValueError:
int_level = getattr(logging, level.upper(), None)
if int_level is not None:
return int_level
elif isinstance(level, int):
return level
if raises:
raise TypeError('bad logging level, expected int or string, got "%s"' % level)
def _refresh_link(source, link_name):
try:
os.unlink(link_name)
except OSError:
pass
try:
os.symlink(source, link_name)
except (OSError, AttributeError, NotImplementedError):
pass
def _make_logsdir_tmpdir(tmpdirhandler):
logsdir = tmpdirhandler.getbasetemp()
if logsdir.basename.startswith('popen-gw'):
logsdir = logsdir.join('..')
logsdir = logsdir.join('logs').ensure(dir=1)
return logsdir
def _make_logsdir_dir(dstname, cleandir=True):
logsdir = py.path.local(dstname)
if cleandir:
if logsdir.check():
logsdir.remove()
logsdir.mkdir()
return logsdir
def _make_logdir(item):
plugin = item.config.pluginmanager.getplugin('_logger')
return plugin.logsdir().join(_sanitize_nodeid(item.nodeid)).ensure(dir=1)
def _enable(handlers):
for hdlr in handlers:
hdlr.logger.addHandler(hdlr)
def _disable(handlers):
for hdlr in handlers:
hdlr.logger.removeHandler(hdlr)
hdlr.close()
def _log_option_parser(loggers):
def parser(arg):
def to_out(elem):
def find_row(name):
return next((row for row in loggers if name in row[0]), None)
def bad_logger(name):
names = [x for row in loggers for x in row[0]]
pretty_names = '(' + ', '.join(names) + ')'
raise argparse.ArgumentTypeError(
'wrong logger, expected %s, got "%s"' % (pretty_names, name))
def bad_level(level):
raise argparse.ArgumentTypeError(
'wrong level, expected (INFO, warn, 15, ...), got "%s"' % level)
row = find_row(elem)
if row:
return elem, row[1]
if '.' in elem:
elem_name, elem_level = elem.rsplit('.', 1)
row = find_row(elem_name)
level = _sanitize_level(elem_level, raises=False)
if row and level is not None:
return elem_name, level
if row:
bad_level(elem_level)
if level is not None:
bad_logger(elem_name)
bad_logger(elem)
return [to_out(x) for x in arg.split(',') if x]
return parser
def _loggers_from_logcfg(logcfg, logopt):
def to_stdout(loggers, opt):
def one(loggers, one):
if isinstance(one, basestring):
return one, next(row for row in loggers if one in row[0])[1]
else:
return one
return [one(loggers, x) for x in opt]
def to_file(loggers):
return [(name, row[2]) for row in loggers for name in row[0]]
return Loggers(
stdout=to_stdout(logcfg._loggers, logopt),
file_=to_file(logcfg._loggers)
)
def _loggers_from_hooks(item):
def to_loggers(configs_lists):
def to_logger_and_level(cfg):
if isinstance(cfg, basestring):
name, level = cfg, logging.NOTSET
else:
name, level = cfg
return name, level
return [to_logger_and_level(cfg) for configs in configs_lists for cfg in configs]
return Loggers(
stdout=to_loggers(item.config.hook.pytest_logger_stdoutloggers(item=item)),
file_=to_loggers(item.config.hook.pytest_logger_fileloggers(item=item))
)
def _choose_loggers(config_loggers, hook_loggers):
assert (not config_loggers) or (not hook_loggers),\
'pytest_logger_config and pytest_logger_*loggers hooks used at the same time'
return config_loggers or hook_loggers
def _make_handlers(stdoutloggers, fileloggers, item, formatter):
handlers = []
if stdoutloggers:
handlers += _make_stdout_handlers(stdoutloggers, formatter)
if fileloggers:
logdir = _make_logdir(item)
handlers += _make_file_handlers(fileloggers, formatter, logdir)
return handlers
def _make_stdout_handlers(loggers, fmt):
def make_handler(logger_and_level, fmt):
name, level = logger_and_level
logger = logging.getLogger(name)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(fmt)
handler.setLevel(level)
handler.logger = logger
return handler
return [make_handler(lgr, fmt) for lgr in loggers]
def _make_file_handlers(loggers, fmt, logdir):
def make_handler(logdir, logger_and_level, fmt):
name, level = logger_and_level
logger = logging.getLogger(name)
name = name or 'logs'
logfile = str(logdir.join(name))
handler = logging.FileHandler(filename=logfile, mode='w', delay=True)
handler.setFormatter(fmt)
handler.setLevel(level)
handler.logger = logger
return handler
return [make_handler(logdir, lgr, fmt) for lgr in loggers]
| pytest_logger_logdirlink | identifier_name |
plugin.py | import os
import sys
import re
import py
import pytest
import logging
import time
import datetime
import argparse
from builtins import object, int
from past.builtins import basestring
def pytest_addhooks(pluginmanager):
pluginmanager.add_hookspecs(LoggerHookspec)
def _late_addoptions(parser, logcfg):
"""Add options to control logger"""
parser.addini(
name='logger_logsdir',
help='base directory with log files for file loggers [basetemp]',
default=None,
)
group = parser.getgroup('logger')
group.addoption('--logger-logsdir',
help='pick you own logs directory instead of default '
'directory under session tmpdir')
if logcfg._enabled:
parser = _log_option_parser(logcfg._loggers)
group.addoption('--loggers',
default=parser(logcfg._log_option_default),
type=parser,
metavar='LOGGER,LOGGER.LEVEL,...',
help='comma delimited list of loggers optionally suffixed with level '
'preceded by a dot. Levels can be lower or uppercase, or numeric. '
'For example: "logger1,logger2.info,logger3.FATAL,logger4.25"')
@pytest.hookimpl(trylast=True)
def pytest_load_initial_conftests(early_config, parser, args):
logcfg = LoggerConfig()
early_config.hook.pytest_logger_config(logger_config=logcfg)
early_config.pluginmanager.register(EarlyLoggerPlugin(logcfg), '_early_logger')
_late_addoptions(parser, logcfg)
def pytest_configure(config):
early_logger = config.pluginmanager.getplugin('_early_logger')
config.pluginmanager.register(LoggerPlugin(config, early_logger._logcfg), '_logger')
class EarlyLoggerPlugin(object):
def __init__(self, logcfg):
self._logcfg = logcfg
class LoggerPlugin(object):
def __init__(self, config, logcfg):
self._config = config
self._logdirlinks = config.hook.pytest_logger_logdirlink(config=config)
self._loggers = _loggers_from_logcfg(logcfg, config.getoption('loggers')) if logcfg._enabled else None
self._formatter_class = logcfg._formatter_class or DefaultFormatter
self._logsdir = None
self._split_by_outcome_subdir = logcfg._split_by_outcome_subdir
self._split_by_outcome_outcomes = logcfg._split_by_outcome_outcomes
def logsdir(self):
ldir = self._logsdir
if ldir:
return ldir
logger_logsdir = self._config.getoption('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.getini('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.hook.pytest_logger_logsdir(config=self._config)
if logger_logsdir:
ldir = _make_logsdir_dir(logger_logsdir)
else:
ldir = _make_logsdir_tmpdir(self._config._tmpdirhandler)
self._logsdir = ldir
for link in self._logdirlinks:
_refresh_link(str(ldir), link)
return ldir
def pytest_runtest_setup(self, item):
loggers = _choose_loggers(self._loggers, _loggers_from_hooks(item))
formatter = self._formatter_class()
item._logger = state = LoggerState(item=item,
stdoutloggers=loggers.stdout,
fileloggers=loggers.file,
formatter=formatter)
state.on_setup()
def pytest_runtest_teardown(self, item, nextitem):
logger = getattr(item, '_logger', None)
if logger:
logger.on_teardown()
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
outcome = yield
tr = outcome.get_result()
logger = getattr(item, '_logger', None)
if logger:
if self._logsdir and self._split_by_outcome_subdir and tr.outcome in self._split_by_outcome_outcomes:
split_by_outcome_logdir = self._logsdir.join(self._split_by_outcome_subdir, tr.outcome)
nodeid = _sanitize_nodeid(item.nodeid)
nodepath = os.path.dirname(nodeid)
split_by_outcome_logdir.join(nodepath).ensure(dir=1)
destdir_relpath = os.path.relpath(str(self._logsdir.join(nodeid)),
str(split_by_outcome_logdir.join(nodepath)))
_refresh_link(destdir_relpath, str(split_by_outcome_logdir.join(nodeid)))
if call.when == 'teardown':
logger.on_makereport()
class LoggerState(object):
def __init__(self, item, stdoutloggers, fileloggers, formatter):
self._put_newlines = bool(item.config.option.capture == 'no' and stdoutloggers)
self.handlers = _make_handlers(stdoutloggers, fileloggers, item, formatter)
self.root_enabler = RootEnabler(bool(stdoutloggers and fileloggers))
def put_newline(self):
if self._put_newlines:
sys.stdout.write('\n')
def on_setup(self):
self.put_newline()
_enable(self.handlers)
self.root_enabler.enable()
def on_teardown(self):
self.put_newline()
def on_makereport(self):
self.root_enabler.disable()
_disable(self.handlers)
class RootEnabler(object):
def __init__(self, enabled):
self._enabled = enabled
self._root_level = logging.root.level
def enable(self):
if self._enabled:
self._root_level = logging.root.level
logging.root.setLevel(logging.NOTSET) # stops root logger from blocking logs
def disable(self):
if self._enabled:
logging.root.setLevel(self._root_level)
class Loggers(object):
def __init__(self, stdout, file_):
self.stdout = stdout
self.file = file_
def __bool__(self):
return bool(self.stdout) or bool(self.file)
class LoggerConfig(object):
"""Configuration of logging to stdout and filesystem."""
def __init__(self):
self._enabled = False
self._loggers = []
self._formatter_class = None
self._log_option_default = ''
self._split_by_outcome_subdir = None
self._split_by_outcome_outcomes = []
def add_loggers(self, loggers, stdout_level=logging.NOTSET, file_level=logging.NOTSET):
"""Adds loggers for stdout/filesystem handling.
Stdout: loggers will log to stdout only when mentioned in `loggers` option. If they're
mentioned without explicit level, `stdout_level` will be used.
Filesystem: loggers will log to files at `file_level`.
:arg loggers: List of logger names.
:arg stdout_level: Default level at which stdout handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
:arg file_level: Level at which filesystem handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
"""
self._enabled = True
self._loggers.append((loggers, _sanitize_level(stdout_level), _sanitize_level(file_level)))
def set_formatter_class(self, formatter_class):
"""Sets the `logging.Formatter` class to be used by all loggers.
:arg formatter_class: The `logging.Formatter` class
"""
if isinstance(formatter_class, logging.Formatter):
raise ValueError("Got a formatter instance instead of its class !")
if not issubclass(formatter_class, logging.Formatter):
raise ValueError("Formatter should be a class inheriting from logging.Formatter") | """ Sets default value of `log` option."""
self._log_option_default = value
def split_by_outcome(self, outcomes=None, subdir='by_outcome'):
"""Makes a directory inside main logdir where logs are further split by test outcome
:param outcomes: list of test outcomes to be handled (failed/passed/skipped)
:param subdir: name for the subdirectory in main log directory
"""
if outcomes is not None:
allowed_outcomes = ['passed', 'failed', 'skipped']
unexpected_outcomes = set(outcomes) - set(allowed_outcomes)
if unexpected_outcomes:
raise ValueError('got unexpected_outcomes: <' + str(list(unexpected_outcomes)) + '>')
self._split_by_outcome_outcomes = outcomes
else:
self._split_by_outcome_outcomes = ['failed']
self._split_by_outcome_subdir = subdir
class LoggerHookspec(object):
def pytest_logger_config(self, logger_config):
""" called before cmdline options parsing. Accepts terse configuration
of both stdout and file logging, adds cmdline options to manipulate
stdout logging. Cannot be used together with \\*loggers hooks.
:arg logger_config: instance of :py:class:`LoggerConfig`, allows
setting loggers for stdout and file handling and their levels.
"""
def pytest_logger_stdoutloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to terminal output. Cannot be used together with
logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_fileloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to files within logs temporary directory.
Cannot be used together with logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_logdirlink(self, config):
""" called after cmdline options parsing.
If implemented, symlink to logs directory will be created.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of requested link to logs directory.
"""
@pytest.hookspec(firstresult=True)
def pytest_logger_logsdir(self, config):
""" called after cmdline options parsing.
If implemented, place logs into the location returned. This is similar
to using --logger-logsdir or the logger_logsdir ini option, but will
only be used if those are not.
Additionally, if multiple implementations of this hook are found, only
the first non-None value will be used.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of logs directory.
"""
class DefaultFormatter(logging.Formatter):
short_level_names = {
logging.FATAL: 'ftl',
logging.ERROR: 'err',
logging.WARN: 'wrn',
logging.INFO: 'inf',
logging.DEBUG: 'dbg',
}
format_string = '%(asctime)s %(levelshortname)s %(name)s: %(message)s'
def __init__(self):
logging.Formatter.__init__(self, DefaultFormatter.format_string)
self._start = time.time()
def formatTime(self, record, datefmt=None):
ct = record.created - self._start
dt = datetime.datetime.utcfromtimestamp(ct)
return dt.strftime("%M:%S.%f")[:-3] # omit useconds, leave mseconds
def format(self, record):
record.levelshortname = DefaultFormatter.short_level_names.get(record.levelno,
'l%s' % record.levelno)
return logging.Formatter.format(self, record)
@pytest.fixture
def logdir(request):
return _make_logdir(request._pyfuncitem)
def _sanitize_nodeid(filename):
filename = filename.replace('::()::', '/')
filename = filename.replace('::', '/')
filename = re.sub(r'\[(.+)\]', r'-\1', filename)
return filename
def _sanitize_level(level, raises=True):
if isinstance(level, basestring):
try:
return int(level)
except ValueError:
int_level = getattr(logging, level.upper(), None)
if int_level is not None:
return int_level
elif isinstance(level, int):
return level
if raises:
raise TypeError('bad logging level, expected int or string, got "%s"' % level)
def _refresh_link(source, link_name):
try:
os.unlink(link_name)
except OSError:
pass
try:
os.symlink(source, link_name)
except (OSError, AttributeError, NotImplementedError):
pass
def _make_logsdir_tmpdir(tmpdirhandler):
logsdir = tmpdirhandler.getbasetemp()
if logsdir.basename.startswith('popen-gw'):
logsdir = logsdir.join('..')
logsdir = logsdir.join('logs').ensure(dir=1)
return logsdir
def _make_logsdir_dir(dstname, cleandir=True):
logsdir = py.path.local(dstname)
if cleandir:
if logsdir.check():
logsdir.remove()
logsdir.mkdir()
return logsdir
def _make_logdir(item):
plugin = item.config.pluginmanager.getplugin('_logger')
return plugin.logsdir().join(_sanitize_nodeid(item.nodeid)).ensure(dir=1)
def _enable(handlers):
for hdlr in handlers:
hdlr.logger.addHandler(hdlr)
def _disable(handlers):
for hdlr in handlers:
hdlr.logger.removeHandler(hdlr)
hdlr.close()
def _log_option_parser(loggers):
def parser(arg):
def to_out(elem):
def find_row(name):
return next((row for row in loggers if name in row[0]), None)
def bad_logger(name):
names = [x for row in loggers for x in row[0]]
pretty_names = '(' + ', '.join(names) + ')'
raise argparse.ArgumentTypeError(
'wrong logger, expected %s, got "%s"' % (pretty_names, name))
def bad_level(level):
raise argparse.ArgumentTypeError(
'wrong level, expected (INFO, warn, 15, ...), got "%s"' % level)
row = find_row(elem)
if row:
return elem, row[1]
if '.' in elem:
elem_name, elem_level = elem.rsplit('.', 1)
row = find_row(elem_name)
level = _sanitize_level(elem_level, raises=False)
if row and level is not None:
return elem_name, level
if row:
bad_level(elem_level)
if level is not None:
bad_logger(elem_name)
bad_logger(elem)
return [to_out(x) for x in arg.split(',') if x]
return parser
def _loggers_from_logcfg(logcfg, logopt):
def to_stdout(loggers, opt):
def one(loggers, one):
if isinstance(one, basestring):
return one, next(row for row in loggers if one in row[0])[1]
else:
return one
return [one(loggers, x) for x in opt]
def to_file(loggers):
return [(name, row[2]) for row in loggers for name in row[0]]
return Loggers(
stdout=to_stdout(logcfg._loggers, logopt),
file_=to_file(logcfg._loggers)
)
def _loggers_from_hooks(item):
def to_loggers(configs_lists):
def to_logger_and_level(cfg):
if isinstance(cfg, basestring):
name, level = cfg, logging.NOTSET
else:
name, level = cfg
return name, level
return [to_logger_and_level(cfg) for configs in configs_lists for cfg in configs]
return Loggers(
stdout=to_loggers(item.config.hook.pytest_logger_stdoutloggers(item=item)),
file_=to_loggers(item.config.hook.pytest_logger_fileloggers(item=item))
)
def _choose_loggers(config_loggers, hook_loggers):
assert (not config_loggers) or (not hook_loggers),\
'pytest_logger_config and pytest_logger_*loggers hooks used at the same time'
return config_loggers or hook_loggers
def _make_handlers(stdoutloggers, fileloggers, item, formatter):
handlers = []
if stdoutloggers:
handlers += _make_stdout_handlers(stdoutloggers, formatter)
if fileloggers:
logdir = _make_logdir(item)
handlers += _make_file_handlers(fileloggers, formatter, logdir)
return handlers
def _make_stdout_handlers(loggers, fmt):
def make_handler(logger_and_level, fmt):
name, level = logger_and_level
logger = logging.getLogger(name)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(fmt)
handler.setLevel(level)
handler.logger = logger
return handler
return [make_handler(lgr, fmt) for lgr in loggers]
def _make_file_handlers(loggers, fmt, logdir):
def make_handler(logdir, logger_and_level, fmt):
name, level = logger_and_level
logger = logging.getLogger(name)
name = name or 'logs'
logfile = str(logdir.join(name))
handler = logging.FileHandler(filename=logfile, mode='w', delay=True)
handler.setFormatter(fmt)
handler.setLevel(level)
handler.logger = logger
return handler
return [make_handler(logdir, lgr, fmt) for lgr in loggers] | self._formatter_class = formatter_class
def set_log_option_default(self, value): | random_line_split |
plugin.py | import os
import sys
import re
import py
import pytest
import logging
import time
import datetime
import argparse
from builtins import object, int
from past.builtins import basestring
def pytest_addhooks(pluginmanager):
pluginmanager.add_hookspecs(LoggerHookspec)
def _late_addoptions(parser, logcfg):
"""Add options to control logger"""
parser.addini(
name='logger_logsdir',
help='base directory with log files for file loggers [basetemp]',
default=None,
)
group = parser.getgroup('logger')
group.addoption('--logger-logsdir',
help='pick you own logs directory instead of default '
'directory under session tmpdir')
if logcfg._enabled:
parser = _log_option_parser(logcfg._loggers)
group.addoption('--loggers',
default=parser(logcfg._log_option_default),
type=parser,
metavar='LOGGER,LOGGER.LEVEL,...',
help='comma delimited list of loggers optionally suffixed with level '
'preceded by a dot. Levels can be lower or uppercase, or numeric. '
'For example: "logger1,logger2.info,logger3.FATAL,logger4.25"')
@pytest.hookimpl(trylast=True)
def pytest_load_initial_conftests(early_config, parser, args):
logcfg = LoggerConfig()
early_config.hook.pytest_logger_config(logger_config=logcfg)
early_config.pluginmanager.register(EarlyLoggerPlugin(logcfg), '_early_logger')
_late_addoptions(parser, logcfg)
def pytest_configure(config):
early_logger = config.pluginmanager.getplugin('_early_logger')
config.pluginmanager.register(LoggerPlugin(config, early_logger._logcfg), '_logger')
class EarlyLoggerPlugin(object):
def __init__(self, logcfg):
self._logcfg = logcfg
class LoggerPlugin(object):
def __init__(self, config, logcfg):
self._config = config
self._logdirlinks = config.hook.pytest_logger_logdirlink(config=config)
self._loggers = _loggers_from_logcfg(logcfg, config.getoption('loggers')) if logcfg._enabled else None
self._formatter_class = logcfg._formatter_class or DefaultFormatter
self._logsdir = None
self._split_by_outcome_subdir = logcfg._split_by_outcome_subdir
self._split_by_outcome_outcomes = logcfg._split_by_outcome_outcomes
def logsdir(self):
ldir = self._logsdir
if ldir:
return ldir
logger_logsdir = self._config.getoption('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.getini('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.hook.pytest_logger_logsdir(config=self._config)
if logger_logsdir:
ldir = _make_logsdir_dir(logger_logsdir)
else:
ldir = _make_logsdir_tmpdir(self._config._tmpdirhandler)
self._logsdir = ldir
for link in self._logdirlinks:
_refresh_link(str(ldir), link)
return ldir
def pytest_runtest_setup(self, item):
loggers = _choose_loggers(self._loggers, _loggers_from_hooks(item))
formatter = self._formatter_class()
item._logger = state = LoggerState(item=item,
stdoutloggers=loggers.stdout,
fileloggers=loggers.file,
formatter=formatter)
state.on_setup()
def pytest_runtest_teardown(self, item, nextitem):
logger = getattr(item, '_logger', None)
if logger:
logger.on_teardown()
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
outcome = yield
tr = outcome.get_result()
logger = getattr(item, '_logger', None)
if logger:
if self._logsdir and self._split_by_outcome_subdir and tr.outcome in self._split_by_outcome_outcomes:
split_by_outcome_logdir = self._logsdir.join(self._split_by_outcome_subdir, tr.outcome)
nodeid = _sanitize_nodeid(item.nodeid)
nodepath = os.path.dirname(nodeid)
split_by_outcome_logdir.join(nodepath).ensure(dir=1)
destdir_relpath = os.path.relpath(str(self._logsdir.join(nodeid)),
str(split_by_outcome_logdir.join(nodepath)))
_refresh_link(destdir_relpath, str(split_by_outcome_logdir.join(nodeid)))
if call.when == 'teardown':
logger.on_makereport()
class LoggerState(object):
def __init__(self, item, stdoutloggers, fileloggers, formatter):
self._put_newlines = bool(item.config.option.capture == 'no' and stdoutloggers)
self.handlers = _make_handlers(stdoutloggers, fileloggers, item, formatter)
self.root_enabler = RootEnabler(bool(stdoutloggers and fileloggers))
def put_newline(self):
if self._put_newlines:
sys.stdout.write('\n')
def on_setup(self):
self.put_newline()
_enable(self.handlers)
self.root_enabler.enable()
def on_teardown(self):
self.put_newline()
def on_makereport(self):
self.root_enabler.disable()
_disable(self.handlers)
class RootEnabler(object):
def __init__(self, enabled):
self._enabled = enabled
self._root_level = logging.root.level
def enable(self):
if self._enabled:
self._root_level = logging.root.level
logging.root.setLevel(logging.NOTSET) # stops root logger from blocking logs
def disable(self):
if self._enabled:
logging.root.setLevel(self._root_level)
class Loggers(object):
def __init__(self, stdout, file_):
self.stdout = stdout
self.file = file_
def __bool__(self):
return bool(self.stdout) or bool(self.file)
class LoggerConfig(object):
"""Configuration of logging to stdout and filesystem."""
def __init__(self):
self._enabled = False
self._loggers = []
self._formatter_class = None
self._log_option_default = ''
self._split_by_outcome_subdir = None
self._split_by_outcome_outcomes = []
def add_loggers(self, loggers, stdout_level=logging.NOTSET, file_level=logging.NOTSET):
"""Adds loggers for stdout/filesystem handling.
Stdout: loggers will log to stdout only when mentioned in `loggers` option. If they're
mentioned without explicit level, `stdout_level` will be used.
Filesystem: loggers will log to files at `file_level`.
:arg loggers: List of logger names.
:arg stdout_level: Default level at which stdout handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
:arg file_level: Level at which filesystem handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
"""
self._enabled = True
self._loggers.append((loggers, _sanitize_level(stdout_level), _sanitize_level(file_level)))
def set_formatter_class(self, formatter_class):
"""Sets the `logging.Formatter` class to be used by all loggers.
:arg formatter_class: The `logging.Formatter` class
"""
if isinstance(formatter_class, logging.Formatter):
raise ValueError("Got a formatter instance instead of its class !")
if not issubclass(formatter_class, logging.Formatter):
raise ValueError("Formatter should be a class inheriting from logging.Formatter")
self._formatter_class = formatter_class
def set_log_option_default(self, value):
""" Sets default value of `log` option."""
self._log_option_default = value
def split_by_outcome(self, outcomes=None, subdir='by_outcome'):
"""Makes a directory inside main logdir where logs are further split by test outcome
:param outcomes: list of test outcomes to be handled (failed/passed/skipped)
:param subdir: name for the subdirectory in main log directory
"""
if outcomes is not None:
allowed_outcomes = ['passed', 'failed', 'skipped']
unexpected_outcomes = set(outcomes) - set(allowed_outcomes)
if unexpected_outcomes:
raise ValueError('got unexpected_outcomes: <' + str(list(unexpected_outcomes)) + '>')
self._split_by_outcome_outcomes = outcomes
else:
self._split_by_outcome_outcomes = ['failed']
self._split_by_outcome_subdir = subdir
class LoggerHookspec(object):
def pytest_logger_config(self, logger_config):
""" called before cmdline options parsing. Accepts terse configuration
of both stdout and file logging, adds cmdline options to manipulate
stdout logging. Cannot be used together with \\*loggers hooks.
:arg logger_config: instance of :py:class:`LoggerConfig`, allows
setting loggers for stdout and file handling and their levels.
"""
def pytest_logger_stdoutloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to terminal output. Cannot be used together with
logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_fileloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to files within logs temporary directory.
Cannot be used together with logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_logdirlink(self, config):
""" called after cmdline options parsing.
If implemented, symlink to logs directory will be created.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of requested link to logs directory.
"""
@pytest.hookspec(firstresult=True)
def pytest_logger_logsdir(self, config):
""" called after cmdline options parsing.
If implemented, place logs into the location returned. This is similar
to using --logger-logsdir or the logger_logsdir ini option, but will
only be used if those are not.
Additionally, if multiple implementations of this hook are found, only
the first non-None value will be used.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of logs directory.
"""
class DefaultFormatter(logging.Formatter):
short_level_names = {
logging.FATAL: 'ftl',
logging.ERROR: 'err',
logging.WARN: 'wrn',
logging.INFO: 'inf',
logging.DEBUG: 'dbg',
}
format_string = '%(asctime)s %(levelshortname)s %(name)s: %(message)s'
def __init__(self):
logging.Formatter.__init__(self, DefaultFormatter.format_string)
self._start = time.time()
def formatTime(self, record, datefmt=None):
ct = record.created - self._start
dt = datetime.datetime.utcfromtimestamp(ct)
return dt.strftime("%M:%S.%f")[:-3] # omit useconds, leave mseconds
def format(self, record):
record.levelshortname = DefaultFormatter.short_level_names.get(record.levelno,
'l%s' % record.levelno)
return logging.Formatter.format(self, record)
@pytest.fixture
def logdir(request):
return _make_logdir(request._pyfuncitem)
def _sanitize_nodeid(filename):
filename = filename.replace('::()::', '/')
filename = filename.replace('::', '/')
filename = re.sub(r'\[(.+)\]', r'-\1', filename)
return filename
def _sanitize_level(level, raises=True):
if isinstance(level, basestring):
try:
return int(level)
except ValueError:
int_level = getattr(logging, level.upper(), None)
if int_level is not None:
return int_level
elif isinstance(level, int):
|
if raises:
raise TypeError('bad logging level, expected int or string, got "%s"' % level)
def _refresh_link(source, link_name):
try:
os.unlink(link_name)
except OSError:
pass
try:
os.symlink(source, link_name)
except (OSError, AttributeError, NotImplementedError):
pass
def _make_logsdir_tmpdir(tmpdirhandler):
logsdir = tmpdirhandler.getbasetemp()
if logsdir.basename.startswith('popen-gw'):
logsdir = logsdir.join('..')
logsdir = logsdir.join('logs').ensure(dir=1)
return logsdir
def _make_logsdir_dir(dstname, cleandir=True):
logsdir = py.path.local(dstname)
if cleandir:
if logsdir.check():
logsdir.remove()
logsdir.mkdir()
return logsdir
def _make_logdir(item):
plugin = item.config.pluginmanager.getplugin('_logger')
return plugin.logsdir().join(_sanitize_nodeid(item.nodeid)).ensure(dir=1)
def _enable(handlers):
for hdlr in handlers:
hdlr.logger.addHandler(hdlr)
def _disable(handlers):
for hdlr in handlers:
hdlr.logger.removeHandler(hdlr)
hdlr.close()
def _log_option_parser(loggers):
def parser(arg):
def to_out(elem):
def find_row(name):
return next((row for row in loggers if name in row[0]), None)
def bad_logger(name):
names = [x for row in loggers for x in row[0]]
pretty_names = '(' + ', '.join(names) + ')'
raise argparse.ArgumentTypeError(
'wrong logger, expected %s, got "%s"' % (pretty_names, name))
def bad_level(level):
raise argparse.ArgumentTypeError(
'wrong level, expected (INFO, warn, 15, ...), got "%s"' % level)
row = find_row(elem)
if row:
return elem, row[1]
if '.' in elem:
elem_name, elem_level = elem.rsplit('.', 1)
row = find_row(elem_name)
level = _sanitize_level(elem_level, raises=False)
if row and level is not None:
return elem_name, level
if row:
bad_level(elem_level)
if level is not None:
bad_logger(elem_name)
bad_logger(elem)
return [to_out(x) for x in arg.split(',') if x]
return parser
def _loggers_from_logcfg(logcfg, logopt):
def to_stdout(loggers, opt):
def one(loggers, one):
if isinstance(one, basestring):
return one, next(row for row in loggers if one in row[0])[1]
else:
return one
return [one(loggers, x) for x in opt]
def to_file(loggers):
return [(name, row[2]) for row in loggers for name in row[0]]
return Loggers(
stdout=to_stdout(logcfg._loggers, logopt),
file_=to_file(logcfg._loggers)
)
def _loggers_from_hooks(item):
def to_loggers(configs_lists):
def to_logger_and_level(cfg):
if isinstance(cfg, basestring):
name, level = cfg, logging.NOTSET
else:
name, level = cfg
return name, level
return [to_logger_and_level(cfg) for configs in configs_lists for cfg in configs]
return Loggers(
stdout=to_loggers(item.config.hook.pytest_logger_stdoutloggers(item=item)),
file_=to_loggers(item.config.hook.pytest_logger_fileloggers(item=item))
)
def _choose_loggers(config_loggers, hook_loggers):
assert (not config_loggers) or (not hook_loggers),\
'pytest_logger_config and pytest_logger_*loggers hooks used at the same time'
return config_loggers or hook_loggers
def _make_handlers(stdoutloggers, fileloggers, item, formatter):
handlers = []
if stdoutloggers:
handlers += _make_stdout_handlers(stdoutloggers, formatter)
if fileloggers:
logdir = _make_logdir(item)
handlers += _make_file_handlers(fileloggers, formatter, logdir)
return handlers
def _make_stdout_handlers(loggers, fmt):
def make_handler(logger_and_level, fmt):
name, level = logger_and_level
logger = logging.getLogger(name)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(fmt)
handler.setLevel(level)
handler.logger = logger
return handler
return [make_handler(lgr, fmt) for lgr in loggers]
def _make_file_handlers(loggers, fmt, logdir):
def make_handler(logdir, logger_and_level, fmt):
name, level = logger_and_level
logger = logging.getLogger(name)
name = name or 'logs'
logfile = str(logdir.join(name))
handler = logging.FileHandler(filename=logfile, mode='w', delay=True)
handler.setFormatter(fmt)
handler.setLevel(level)
handler.logger = logger
return handler
return [make_handler(logdir, lgr, fmt) for lgr in loggers]
| return level | conditional_block |
plugin.py | import os
import sys
import re
import py
import pytest
import logging
import time
import datetime
import argparse
from builtins import object, int
from past.builtins import basestring
def pytest_addhooks(pluginmanager):
pluginmanager.add_hookspecs(LoggerHookspec)
def _late_addoptions(parser, logcfg):
"""Add options to control logger"""
parser.addini(
name='logger_logsdir',
help='base directory with log files for file loggers [basetemp]',
default=None,
)
group = parser.getgroup('logger')
group.addoption('--logger-logsdir',
help='pick you own logs directory instead of default '
'directory under session tmpdir')
if logcfg._enabled:
parser = _log_option_parser(logcfg._loggers)
group.addoption('--loggers',
default=parser(logcfg._log_option_default),
type=parser,
metavar='LOGGER,LOGGER.LEVEL,...',
help='comma delimited list of loggers optionally suffixed with level '
'preceded by a dot. Levels can be lower or uppercase, or numeric. '
'For example: "logger1,logger2.info,logger3.FATAL,logger4.25"')
@pytest.hookimpl(trylast=True)
def pytest_load_initial_conftests(early_config, parser, args):
logcfg = LoggerConfig()
early_config.hook.pytest_logger_config(logger_config=logcfg)
early_config.pluginmanager.register(EarlyLoggerPlugin(logcfg), '_early_logger')
_late_addoptions(parser, logcfg)
def pytest_configure(config):
early_logger = config.pluginmanager.getplugin('_early_logger')
config.pluginmanager.register(LoggerPlugin(config, early_logger._logcfg), '_logger')
class EarlyLoggerPlugin(object):
def __init__(self, logcfg):
self._logcfg = logcfg
class LoggerPlugin(object):
def __init__(self, config, logcfg):
self._config = config
self._logdirlinks = config.hook.pytest_logger_logdirlink(config=config)
self._loggers = _loggers_from_logcfg(logcfg, config.getoption('loggers')) if logcfg._enabled else None
self._formatter_class = logcfg._formatter_class or DefaultFormatter
self._logsdir = None
self._split_by_outcome_subdir = logcfg._split_by_outcome_subdir
self._split_by_outcome_outcomes = logcfg._split_by_outcome_outcomes
def logsdir(self):
ldir = self._logsdir
if ldir:
return ldir
logger_logsdir = self._config.getoption('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.getini('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.hook.pytest_logger_logsdir(config=self._config)
if logger_logsdir:
ldir = _make_logsdir_dir(logger_logsdir)
else:
ldir = _make_logsdir_tmpdir(self._config._tmpdirhandler)
self._logsdir = ldir
for link in self._logdirlinks:
_refresh_link(str(ldir), link)
return ldir
def pytest_runtest_setup(self, item):
loggers = _choose_loggers(self._loggers, _loggers_from_hooks(item))
formatter = self._formatter_class()
item._logger = state = LoggerState(item=item,
stdoutloggers=loggers.stdout,
fileloggers=loggers.file,
formatter=formatter)
state.on_setup()
def pytest_runtest_teardown(self, item, nextitem):
logger = getattr(item, '_logger', None)
if logger:
logger.on_teardown()
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
outcome = yield
tr = outcome.get_result()
logger = getattr(item, '_logger', None)
if logger:
if self._logsdir and self._split_by_outcome_subdir and tr.outcome in self._split_by_outcome_outcomes:
split_by_outcome_logdir = self._logsdir.join(self._split_by_outcome_subdir, tr.outcome)
nodeid = _sanitize_nodeid(item.nodeid)
nodepath = os.path.dirname(nodeid)
split_by_outcome_logdir.join(nodepath).ensure(dir=1)
destdir_relpath = os.path.relpath(str(self._logsdir.join(nodeid)),
str(split_by_outcome_logdir.join(nodepath)))
_refresh_link(destdir_relpath, str(split_by_outcome_logdir.join(nodeid)))
if call.when == 'teardown':
logger.on_makereport()
class LoggerState(object):
def __init__(self, item, stdoutloggers, fileloggers, formatter):
self._put_newlines = bool(item.config.option.capture == 'no' and stdoutloggers)
self.handlers = _make_handlers(stdoutloggers, fileloggers, item, formatter)
self.root_enabler = RootEnabler(bool(stdoutloggers and fileloggers))
def put_newline(self):
if self._put_newlines:
sys.stdout.write('\n')
def on_setup(self):
self.put_newline()
_enable(self.handlers)
self.root_enabler.enable()
def on_teardown(self):
self.put_newline()
def on_makereport(self):
self.root_enabler.disable()
_disable(self.handlers)
class RootEnabler(object):
|
class Loggers(object):
def __init__(self, stdout, file_):
self.stdout = stdout
self.file = file_
def __bool__(self):
return bool(self.stdout) or bool(self.file)
class LoggerConfig(object):
"""Configuration of logging to stdout and filesystem."""
def __init__(self):
self._enabled = False
self._loggers = []
self._formatter_class = None
self._log_option_default = ''
self._split_by_outcome_subdir = None
self._split_by_outcome_outcomes = []
def add_loggers(self, loggers, stdout_level=logging.NOTSET, file_level=logging.NOTSET):
"""Adds loggers for stdout/filesystem handling.
Stdout: loggers will log to stdout only when mentioned in `loggers` option. If they're
mentioned without explicit level, `stdout_level` will be used.
Filesystem: loggers will log to files at `file_level`.
:arg loggers: List of logger names.
:arg stdout_level: Default level at which stdout handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
:arg file_level: Level at which filesystem handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
"""
self._enabled = True
self._loggers.append((loggers, _sanitize_level(stdout_level), _sanitize_level(file_level)))
def set_formatter_class(self, formatter_class):
"""Sets the `logging.Formatter` class to be used by all loggers.
:arg formatter_class: The `logging.Formatter` class
"""
if isinstance(formatter_class, logging.Formatter):
raise ValueError("Got a formatter instance instead of its class !")
if not issubclass(formatter_class, logging.Formatter):
raise ValueError("Formatter should be a class inheriting from logging.Formatter")
self._formatter_class = formatter_class
def set_log_option_default(self, value):
""" Sets default value of `log` option."""
self._log_option_default = value
def split_by_outcome(self, outcomes=None, subdir='by_outcome'):
"""Makes a directory inside main logdir where logs are further split by test outcome
:param outcomes: list of test outcomes to be handled (failed/passed/skipped)
:param subdir: name for the subdirectory in main log directory
"""
if outcomes is not None:
allowed_outcomes = ['passed', 'failed', 'skipped']
unexpected_outcomes = set(outcomes) - set(allowed_outcomes)
if unexpected_outcomes:
raise ValueError('got unexpected_outcomes: <' + str(list(unexpected_outcomes)) + '>')
self._split_by_outcome_outcomes = outcomes
else:
self._split_by_outcome_outcomes = ['failed']
self._split_by_outcome_subdir = subdir
class LoggerHookspec(object):
def pytest_logger_config(self, logger_config):
""" called before cmdline options parsing. Accepts terse configuration
of both stdout and file logging, adds cmdline options to manipulate
stdout logging. Cannot be used together with \\*loggers hooks.
:arg logger_config: instance of :py:class:`LoggerConfig`, allows
setting loggers for stdout and file handling and their levels.
"""
def pytest_logger_stdoutloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to terminal output. Cannot be used together with
logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_fileloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to files within logs temporary directory.
Cannot be used together with logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_logdirlink(self, config):
""" called after cmdline options parsing.
If implemented, symlink to logs directory will be created.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of requested link to logs directory.
"""
@pytest.hookspec(firstresult=True)
def pytest_logger_logsdir(self, config):
""" called after cmdline options parsing.
If implemented, place logs into the location returned. This is similar
to using --logger-logsdir or the logger_logsdir ini option, but will
only be used if those are not.
Additionally, if multiple implementations of this hook are found, only
the first non-None value will be used.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of logs directory.
"""
class DefaultFormatter(logging.Formatter):
short_level_names = {
logging.FATAL: 'ftl',
logging.ERROR: 'err',
logging.WARN: 'wrn',
logging.INFO: 'inf',
logging.DEBUG: 'dbg',
}
format_string = '%(asctime)s %(levelshortname)s %(name)s: %(message)s'
def __init__(self):
logging.Formatter.__init__(self, DefaultFormatter.format_string)
self._start = time.time()
def formatTime(self, record, datefmt=None):
ct = record.created - self._start
dt = datetime.datetime.utcfromtimestamp(ct)
return dt.strftime("%M:%S.%f")[:-3] # omit useconds, leave mseconds
def format(self, record):
record.levelshortname = DefaultFormatter.short_level_names.get(record.levelno,
'l%s' % record.levelno)
return logging.Formatter.format(self, record)
@pytest.fixture
def logdir(request):
return _make_logdir(request._pyfuncitem)
def _sanitize_nodeid(filename):
filename = filename.replace('::()::', '/')
filename = filename.replace('::', '/')
filename = re.sub(r'\[(.+)\]', r'-\1', filename)
return filename
def _sanitize_level(level, raises=True):
if isinstance(level, basestring):
try:
return int(level)
except ValueError:
int_level = getattr(logging, level.upper(), None)
if int_level is not None:
return int_level
elif isinstance(level, int):
return level
if raises:
raise TypeError('bad logging level, expected int or string, got "%s"' % level)
def _refresh_link(source, link_name):
try:
os.unlink(link_name)
except OSError:
pass
try:
os.symlink(source, link_name)
except (OSError, AttributeError, NotImplementedError):
pass
def _make_logsdir_tmpdir(tmpdirhandler):
logsdir = tmpdirhandler.getbasetemp()
if logsdir.basename.startswith('popen-gw'):
logsdir = logsdir.join('..')
logsdir = logsdir.join('logs').ensure(dir=1)
return logsdir
def _make_logsdir_dir(dstname, cleandir=True):
logsdir = py.path.local(dstname)
if cleandir:
if logsdir.check():
logsdir.remove()
logsdir.mkdir()
return logsdir
def _make_logdir(item):
plugin = item.config.pluginmanager.getplugin('_logger')
return plugin.logsdir().join(_sanitize_nodeid(item.nodeid)).ensure(dir=1)
def _enable(handlers):
for hdlr in handlers:
hdlr.logger.addHandler(hdlr)
def _disable(handlers):
for hdlr in handlers:
hdlr.logger.removeHandler(hdlr)
hdlr.close()
def _log_option_parser(loggers):
def parser(arg):
def to_out(elem):
def find_row(name):
return next((row for row in loggers if name in row[0]), None)
def bad_logger(name):
names = [x for row in loggers for x in row[0]]
pretty_names = '(' + ', '.join(names) + ')'
raise argparse.ArgumentTypeError(
'wrong logger, expected %s, got "%s"' % (pretty_names, name))
def bad_level(level):
raise argparse.ArgumentTypeError(
'wrong level, expected (INFO, warn, 15, ...), got "%s"' % level)
row = find_row(elem)
if row:
return elem, row[1]
if '.' in elem:
elem_name, elem_level = elem.rsplit('.', 1)
row = find_row(elem_name)
level = _sanitize_level(elem_level, raises=False)
if row and level is not None:
return elem_name, level
if row:
bad_level(elem_level)
if level is not None:
bad_logger(elem_name)
bad_logger(elem)
return [to_out(x) for x in arg.split(',') if x]
return parser
def _loggers_from_logcfg(logcfg, logopt):
def to_stdout(loggers, opt):
def one(loggers, one):
if isinstance(one, basestring):
return one, next(row for row in loggers if one in row[0])[1]
else:
return one
return [one(loggers, x) for x in opt]
def to_file(loggers):
return [(name, row[2]) for row in loggers for name in row[0]]
return Loggers(
stdout=to_stdout(logcfg._loggers, logopt),
file_=to_file(logcfg._loggers)
)
def _loggers_from_hooks(item):
def to_loggers(configs_lists):
def to_logger_and_level(cfg):
if isinstance(cfg, basestring):
name, level = cfg, logging.NOTSET
else:
name, level = cfg
return name, level
return [to_logger_and_level(cfg) for configs in configs_lists for cfg in configs]
return Loggers(
stdout=to_loggers(item.config.hook.pytest_logger_stdoutloggers(item=item)),
file_=to_loggers(item.config.hook.pytest_logger_fileloggers(item=item))
)
def _choose_loggers(config_loggers, hook_loggers):
assert (not config_loggers) or (not hook_loggers),\
'pytest_logger_config and pytest_logger_*loggers hooks used at the same time'
return config_loggers or hook_loggers
def _make_handlers(stdoutloggers, fileloggers, item, formatter):
handlers = []
if stdoutloggers:
handlers += _make_stdout_handlers(stdoutloggers, formatter)
if fileloggers:
logdir = _make_logdir(item)
handlers += _make_file_handlers(fileloggers, formatter, logdir)
return handlers
def _make_stdout_handlers(loggers, fmt):
def make_handler(logger_and_level, fmt):
name, level = logger_and_level
logger = logging.getLogger(name)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(fmt)
handler.setLevel(level)
handler.logger = logger
return handler
return [make_handler(lgr, fmt) for lgr in loggers]
def _make_file_handlers(loggers, fmt, logdir):
def make_handler(logdir, logger_and_level, fmt):
name, level = logger_and_level
logger = logging.getLogger(name)
name = name or 'logs'
logfile = str(logdir.join(name))
handler = logging.FileHandler(filename=logfile, mode='w', delay=True)
handler.setFormatter(fmt)
handler.setLevel(level)
handler.logger = logger
return handler
return [make_handler(logdir, lgr, fmt) for lgr in loggers]
| def __init__(self, enabled):
self._enabled = enabled
self._root_level = logging.root.level
def enable(self):
if self._enabled:
self._root_level = logging.root.level
logging.root.setLevel(logging.NOTSET) # stops root logger from blocking logs
def disable(self):
if self._enabled:
logging.root.setLevel(self._root_level) | identifier_body |
main.go | /*
Copyright 2020 Kamal Nasser All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/apex/log"
"github.com/apex/log/handlers/cli"
"github.com/digitalocean/godo"
"gopkg.in/alecthomas/kingpin.v2"
"gopkg.in/yaml.v2"
)
var (
doToken = kingpin.Flag("access-token", "DigitalOcean API Token - if unset, attempts to use doctl's stored token of its current default context. env var: DIGITALOCEAN_ACCESS_TOKEN").Short('t').Envar("DIGITALOCEAN_ACCESS_TOKEN").String()
sshUser = kingpin.Flag("ssh-user", "default ssh user").String()
sshPort = kingpin.Flag("ssh-port", "default ssh port").Int()
tag = kingpin.Flag("tag", "filter droplets by tag").String()
ignore = kingpin.Flag("ignore", "ignore a Droplet by name, can be specified multiple times").Strings()
groupByRegion = kingpin.Flag("group-by-region", "group hosts by region, defaults to true").Default("true").Bool()
groupByTag = kingpin.Flag("group-by-tag", "group hosts by their Droplet tags, defaults to true").Default("true").Bool()
groupByProject = kingpin.Flag("group-by-project", "group hosts by their Projects, defaults to true").Default("true").Bool()
privateIPs = kingpin.Flag("private-ips", "use private Droplet IPs instead of public IPs").Bool()
out = kingpin.Flag("out", "write the ansible inventory to this file - if unset, print to stdout").String()
timeout = kingpin.Flag("timeout", "timeout for total runtime of the command, defaults to 2m").Default("2m").Duration()
)
var doRegions = []string{"ams1", "ams2", "ams3", "blr1", "fra1", "lon1", "nyc1", "nyc2", "nyc3", "sfo1", "sfo2", "sfo3", "sgp1", "tor1"}
func main() {
kingpin.Parse()
log.SetHandler(cli.Default)
if *doToken == "" {
log.Info("no access token provided, attempting to look up doctl's access token")
token, context, err := doctlToken()
if err != nil {
log.WithError(err).Fatalf("couldn't look up token")
}
*doToken = token
log.WithField("context", context).Info("using doctl access token")
}
ctx, cancel := context.WithTimeout(context.Background(), *timeout)
defer cancel()
client := godo.NewFromToken(*doToken)
// get droplets
if *tag != "" {
log.WithField("tag", *tag).Info("only selecting tagged Droplets")
}
log.Info("listing Droplets")
droplets, err := listDroplets(ctx, client, *tag)
if err != nil {
log.WithError(err).Fatal("couldn't fetch Droplets")
}
// filter out ignored droplets
droplets = removeIgnored(droplets, *ignore)
// initialize some maps
var dropletsByRegion map[string][]string
if *groupByRegion {
dropletsByRegion = make(map[string][]string, len(doRegions))
for _, r := range doRegions {
dropletsByRegion[r] = []string{}
}
}
var dropletsByTag map[string][]string
if *groupByTag {
dropletsByTag = make(map[string][]string, 0)
}
var inventory bytes.Buffer
dropletsByID := make(map[int]string, len(droplets))
for _, d := range droplets {
ll := log.WithField("droplet", d.Name)
ll.Info("processing")
dropletsByID[d.ID] = d.Name
if *groupByRegion {
r := d.Region.Slug
dropletsByRegion[r] = append(dropletsByRegion[r], d.Name)
}
if *groupByTag {
for _, tag := range d.Tags {
dropletsByTag[tag] = append(dropletsByTag[tag], d.Name)
}
}
var (
ip string
err error
)
if *privateIPs {
ip, err = d.PrivateIPv4()
} else {
ip, err = d.PublicIPv4()
}
if err != nil {
ll.WithError(err).Error("couldn't look up the Droplet's IP address, skipped")
continue
}
inventory.WriteString(d.Name)
inventory.WriteRune('\t')
if *sshUser != "" {
inventory.WriteString(fmt.Sprintf("ansible_user=%s ", *sshUser))
}
if *sshPort != 0 {
inventory.WriteString(fmt.Sprintf("ansible_port=%d ", *sshPort))
}
if ip != "" {
inventory.WriteString(fmt.Sprintf("ansible_host=%s", ip))
} else {
ll.Warn("could not get the Droplet's IP address, using hostname")
}
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
// write the region groups
if *groupByRegion {
// loop over the doRegions slice to maintain alphabetic order
for _, region := range doRegions {
log.WithField("region", region).Info("building region group")
droplets := dropletsByRegion[region]
inventory.WriteString(fmt.Sprintf("[%s]", region))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the tag groups
if *groupByTag {
for tag, droplets := range dropletsByTag {
tag = sanitizeAnsibleGroup(tag)
log.WithField("tag", tag).Info("building tag group")
inventory.WriteString(fmt.Sprintf("[%s]", tag))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the project groups
if *groupByProject {
log.Info("listing projects")
projects, _, err := client.Projects.List(ctx, nil)
if err != nil {
log.WithError(err).Fatal("couldn't list projects")
}
dropletsByProject := make(map[string][]string)
for _, project := range projects {
ll := log.WithField("project", project.Name)
ll.Info("listing project resources")
resources, err := listProjectResources(ctx, client, project.ID)
if err != nil {
ll.WithError(err).Fatal("")
}
for _, r := range resources {
if !strings.HasPrefix(r.URN, "do:droplet:") {
continue
}
id := strings.TrimPrefix(r.URN, "do:droplet:")
idInt, err := strconv.Atoi(id)
if err != nil {
ll.WithError(err).WithField("urn", r.URN).Error("parsing droplet ID, skipping")
continue
}
// skip droplets that aren't included in the inventory
droplet, exists := dropletsByID[idInt]
if !exists {
continue
}
dropletsByProject[project.Name] = append(dropletsByProject[project.Name], droplet)
}
}
for project, droplets := range dropletsByProject {
project = sanitizeAnsibleGroup(project)
log.WithField("project", project).Info("building project group")
inventory.WriteString(fmt.Sprintf("[%s]", project))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
if *out != "" {
ll := log.WithField("out", *out)
ll.Info("writing inventory to file")
f, err := os.Create(*out)
if err != nil {
ll.WithError(err).Fatal("couldn't open file for writing")
}
defer f.Close()
_, err = inventory.WriteTo(f)
if err != nil {
ll.WithError(err).Fatal("couldn't write inventory to file")
}
} else {
inventory.WriteTo(os.Stdout)
}
log.Info("done!")
}
func doctlToken() (string, string, error) {
type doctlConfig struct {
Context string `yaml:"context"`
AccessToken string `yaml:"access-token"`
AuthContexts map[string]string `yaml:"auth-contexts"`
}
cfgDir, err := os.UserConfigDir()
if err != nil {
return "", "", fmt.Errorf("couldn't look up user config dir: %w", err)
}
cfgFile, err := ioutil.ReadFile(filepath.Join(cfgDir, "doctl", "config.yaml"))
if err != nil {
return "", "", fmt.Errorf("couldn't read doctl's config.yaml: %w", err)
}
cfg := doctlConfig{}
err = yaml.Unmarshal(cfgFile, &cfg)
if err != nil {
return "", "", fmt.Errorf("couldn't unmarshal doctl's config.yaml: %w", err)
}
switch cfg.Context {
case "default":
return cfg.AccessToken, cfg.Context, nil
default:
return cfg.AuthContexts[cfg.Context], cfg.Context, nil
}
}
func sanitizeAnsibleGroup(s string) string {
// replace invalid characters
s = strings.NewReplacer(
" ", "_",
"-", "_",
":", "_",
).Replace(s)
// group names cannot start with a digit
if '0' <= s[0] && s[0] <= '9' {
s = "_" + s
}
return s
}
func removeIgnored(droplets []godo.Droplet, ignored []string) []godo.Droplet {
if len(ignored) == 0 {
return droplets
}
// copy ignored droplets into a map
ignoreList := make(map[string]interface{}, len(ignored))
for _, i := range ignored {
ignoreList[i] = struct{}{}
}
// remove ignored droplets from the list
newDroplets := droplets[:0]
for _, d := range droplets {
if _, ignored := ignoreList[d.Name]; ignored {
log.WithField("droplet", d.Name).Info("ignoring")
continue
}
newDroplets = append(newDroplets, d)
}
return newDroplets
}
// get droplets w/ pagination
func | (ctx context.Context, client *godo.Client, tag string) ([]godo.Droplet, error) {
droplets := []godo.Droplet{}
call := func(opt *godo.ListOptions) (interface{}, *godo.Response, error) {
if tag != "" {
return client.Droplets.ListByTag(ctx, tag, opt)
}
return client.Droplets.List(ctx, opt)
}
handler := func(d interface{}) error {
dd, ok := d.([]godo.Droplet)
if !ok {
return fmt.Errorf("listing Droplets")
}
droplets = append(droplets, dd...)
return nil
}
err := paginateGodo(ctx, call, handler)
if err != nil {
return nil, err
}
return droplets, nil
}
// get project resources w/ pagination
func listProjectResources(ctx context.Context, client *godo.Client, projectID string) ([]godo.ProjectResource, error) {
prs := []godo.ProjectResource{}
call := func(opt *godo.ListOptions) (interface{}, *godo.Response, error) {
return client.Projects.ListResources(ctx, projectID, opt)
}
handler := func(r interface{}) error {
rr, ok := r.([]godo.ProjectResource)
if !ok {
return fmt.Errorf("listing project resources")
}
prs = append(prs, rr...)
return nil
}
err := paginateGodo(ctx, call, handler)
if err != nil {
return nil, err
}
return prs, nil
}
func paginateGodo(ctx context.Context, call func(*godo.ListOptions) (interface{}, *godo.Response, error), handler func(interface{}) error) error {
// create options. initially, these will be blank
opt := &godo.ListOptions{}
for {
results, resp, err := call(opt)
if err != nil {
return err
}
err = handler(results)
if err != nil {
return nil
}
// if we are at the last page, break out the for loop
if resp.Links == nil || resp.Links.IsLastPage() {
break
}
page, err := resp.Links.CurrentPage()
if err != nil {
return err
}
// set the page we want for the next request
opt.Page = page + 1
}
return nil
}
| listDroplets | identifier_name |
main.go | /*
Copyright 2020 Kamal Nasser All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/apex/log"
"github.com/apex/log/handlers/cli"
"github.com/digitalocean/godo"
"gopkg.in/alecthomas/kingpin.v2"
"gopkg.in/yaml.v2"
)
var (
doToken = kingpin.Flag("access-token", "DigitalOcean API Token - if unset, attempts to use doctl's stored token of its current default context. env var: DIGITALOCEAN_ACCESS_TOKEN").Short('t').Envar("DIGITALOCEAN_ACCESS_TOKEN").String()
sshUser = kingpin.Flag("ssh-user", "default ssh user").String()
sshPort = kingpin.Flag("ssh-port", "default ssh port").Int()
tag = kingpin.Flag("tag", "filter droplets by tag").String()
ignore = kingpin.Flag("ignore", "ignore a Droplet by name, can be specified multiple times").Strings()
groupByRegion = kingpin.Flag("group-by-region", "group hosts by region, defaults to true").Default("true").Bool()
groupByTag = kingpin.Flag("group-by-tag", "group hosts by their Droplet tags, defaults to true").Default("true").Bool()
groupByProject = kingpin.Flag("group-by-project", "group hosts by their Projects, defaults to true").Default("true").Bool()
privateIPs = kingpin.Flag("private-ips", "use private Droplet IPs instead of public IPs").Bool()
out = kingpin.Flag("out", "write the ansible inventory to this file - if unset, print to stdout").String()
timeout = kingpin.Flag("timeout", "timeout for total runtime of the command, defaults to 2m").Default("2m").Duration()
)
var doRegions = []string{"ams1", "ams2", "ams3", "blr1", "fra1", "lon1", "nyc1", "nyc2", "nyc3", "sfo1", "sfo2", "sfo3", "sgp1", "tor1"}
func main() {
kingpin.Parse()
log.SetHandler(cli.Default)
if *doToken == "" {
log.Info("no access token provided, attempting to look up doctl's access token")
token, context, err := doctlToken()
if err != nil {
log.WithError(err).Fatalf("couldn't look up token")
}
*doToken = token
log.WithField("context", context).Info("using doctl access token")
}
ctx, cancel := context.WithTimeout(context.Background(), *timeout)
defer cancel()
client := godo.NewFromToken(*doToken)
// get droplets
if *tag != "" {
log.WithField("tag", *tag).Info("only selecting tagged Droplets")
}
log.Info("listing Droplets")
droplets, err := listDroplets(ctx, client, *tag)
if err != nil {
log.WithError(err).Fatal("couldn't fetch Droplets")
}
// filter out ignored droplets
droplets = removeIgnored(droplets, *ignore)
// initialize some maps
var dropletsByRegion map[string][]string
if *groupByRegion {
dropletsByRegion = make(map[string][]string, len(doRegions))
for _, r := range doRegions {
dropletsByRegion[r] = []string{}
}
}
var dropletsByTag map[string][]string
if *groupByTag {
dropletsByTag = make(map[string][]string, 0)
}
var inventory bytes.Buffer
dropletsByID := make(map[int]string, len(droplets))
for _, d := range droplets {
ll := log.WithField("droplet", d.Name)
ll.Info("processing")
dropletsByID[d.ID] = d.Name
if *groupByRegion {
r := d.Region.Slug
dropletsByRegion[r] = append(dropletsByRegion[r], d.Name)
}
if *groupByTag {
for _, tag := range d.Tags {
dropletsByTag[tag] = append(dropletsByTag[tag], d.Name)
}
}
var (
ip string
err error
)
if *privateIPs {
ip, err = d.PrivateIPv4()
} else {
ip, err = d.PublicIPv4()
}
if err != nil {
ll.WithError(err).Error("couldn't look up the Droplet's IP address, skipped")
continue
}
inventory.WriteString(d.Name)
inventory.WriteRune('\t')
if *sshUser != "" {
inventory.WriteString(fmt.Sprintf("ansible_user=%s ", *sshUser))
}
if *sshPort != 0 {
inventory.WriteString(fmt.Sprintf("ansible_port=%d ", *sshPort))
}
if ip != "" {
inventory.WriteString(fmt.Sprintf("ansible_host=%s", ip))
} else {
ll.Warn("could not get the Droplet's IP address, using hostname")
}
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
// write the region groups
if *groupByRegion {
// loop over the doRegions slice to maintain alphabetic order
for _, region := range doRegions {
log.WithField("region", region).Info("building region group")
droplets := dropletsByRegion[region]
inventory.WriteString(fmt.Sprintf("[%s]", region))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the tag groups
if *groupByTag {
for tag, droplets := range dropletsByTag {
tag = sanitizeAnsibleGroup(tag)
log.WithField("tag", tag).Info("building tag group")
inventory.WriteString(fmt.Sprintf("[%s]", tag))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the project groups
if *groupByProject {
log.Info("listing projects")
projects, _, err := client.Projects.List(ctx, nil)
if err != nil {
log.WithError(err).Fatal("couldn't list projects")
}
dropletsByProject := make(map[string][]string)
for _, project := range projects {
ll := log.WithField("project", project.Name)
ll.Info("listing project resources")
resources, err := listProjectResources(ctx, client, project.ID)
if err != nil {
ll.WithError(err).Fatal("")
}
for _, r := range resources {
if !strings.HasPrefix(r.URN, "do:droplet:") {
continue
}
id := strings.TrimPrefix(r.URN, "do:droplet:")
idInt, err := strconv.Atoi(id)
if err != nil |
// skip droplets that aren't included in the inventory
droplet, exists := dropletsByID[idInt]
if !exists {
continue
}
dropletsByProject[project.Name] = append(dropletsByProject[project.Name], droplet)
}
}
for project, droplets := range dropletsByProject {
project = sanitizeAnsibleGroup(project)
log.WithField("project", project).Info("building project group")
inventory.WriteString(fmt.Sprintf("[%s]", project))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
if *out != "" {
ll := log.WithField("out", *out)
ll.Info("writing inventory to file")
f, err := os.Create(*out)
if err != nil {
ll.WithError(err).Fatal("couldn't open file for writing")
}
defer f.Close()
_, err = inventory.WriteTo(f)
if err != nil {
ll.WithError(err).Fatal("couldn't write inventory to file")
}
} else {
inventory.WriteTo(os.Stdout)
}
log.Info("done!")
}
func doctlToken() (string, string, error) {
type doctlConfig struct {
Context string `yaml:"context"`
AccessToken string `yaml:"access-token"`
AuthContexts map[string]string `yaml:"auth-contexts"`
}
cfgDir, err := os.UserConfigDir()
if err != nil {
return "", "", fmt.Errorf("couldn't look up user config dir: %w", err)
}
cfgFile, err := ioutil.ReadFile(filepath.Join(cfgDir, "doctl", "config.yaml"))
if err != nil {
return "", "", fmt.Errorf("couldn't read doctl's config.yaml: %w", err)
}
cfg := doctlConfig{}
err = yaml.Unmarshal(cfgFile, &cfg)
if err != nil {
return "", "", fmt.Errorf("couldn't unmarshal doctl's config.yaml: %w", err)
}
switch cfg.Context {
case "default":
return cfg.AccessToken, cfg.Context, nil
default:
return cfg.AuthContexts[cfg.Context], cfg.Context, nil
}
}
func sanitizeAnsibleGroup(s string) string {
// replace invalid characters
s = strings.NewReplacer(
" ", "_",
"-", "_",
":", "_",
).Replace(s)
// group names cannot start with a digit
if '0' <= s[0] && s[0] <= '9' {
s = "_" + s
}
return s
}
func removeIgnored(droplets []godo.Droplet, ignored []string) []godo.Droplet {
if len(ignored) == 0 {
return droplets
}
// copy ignored droplets into a map
ignoreList := make(map[string]interface{}, len(ignored))
for _, i := range ignored {
ignoreList[i] = struct{}{}
}
// remove ignored droplets from the list
newDroplets := droplets[:0]
for _, d := range droplets {
if _, ignored := ignoreList[d.Name]; ignored {
log.WithField("droplet", d.Name).Info("ignoring")
continue
}
newDroplets = append(newDroplets, d)
}
return newDroplets
}
// get droplets w/ pagination
func listDroplets(ctx context.Context, client *godo.Client, tag string) ([]godo.Droplet, error) {
droplets := []godo.Droplet{}
call := func(opt *godo.ListOptions) (interface{}, *godo.Response, error) {
if tag != "" {
return client.Droplets.ListByTag(ctx, tag, opt)
}
return client.Droplets.List(ctx, opt)
}
handler := func(d interface{}) error {
dd, ok := d.([]godo.Droplet)
if !ok {
return fmt.Errorf("listing Droplets")
}
droplets = append(droplets, dd...)
return nil
}
err := paginateGodo(ctx, call, handler)
if err != nil {
return nil, err
}
return droplets, nil
}
// get project resources w/ pagination
func listProjectResources(ctx context.Context, client *godo.Client, projectID string) ([]godo.ProjectResource, error) {
prs := []godo.ProjectResource{}
call := func(opt *godo.ListOptions) (interface{}, *godo.Response, error) {
return client.Projects.ListResources(ctx, projectID, opt)
}
handler := func(r interface{}) error {
rr, ok := r.([]godo.ProjectResource)
if !ok {
return fmt.Errorf("listing project resources")
}
prs = append(prs, rr...)
return nil
}
err := paginateGodo(ctx, call, handler)
if err != nil {
return nil, err
}
return prs, nil
}
func paginateGodo(ctx context.Context, call func(*godo.ListOptions) (interface{}, *godo.Response, error), handler func(interface{}) error) error {
// create options. initially, these will be blank
opt := &godo.ListOptions{}
for {
results, resp, err := call(opt)
if err != nil {
return err
}
err = handler(results)
if err != nil {
return nil
}
// if we are at the last page, break out the for loop
if resp.Links == nil || resp.Links.IsLastPage() {
break
}
page, err := resp.Links.CurrentPage()
if err != nil {
return err
}
// set the page we want for the next request
opt.Page = page + 1
}
return nil
}
| {
ll.WithError(err).WithField("urn", r.URN).Error("parsing droplet ID, skipping")
continue
} | conditional_block |
main.go | /*
Copyright 2020 Kamal Nasser All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/apex/log"
"github.com/apex/log/handlers/cli"
"github.com/digitalocean/godo"
"gopkg.in/alecthomas/kingpin.v2"
"gopkg.in/yaml.v2"
)
var (
doToken = kingpin.Flag("access-token", "DigitalOcean API Token - if unset, attempts to use doctl's stored token of its current default context. env var: DIGITALOCEAN_ACCESS_TOKEN").Short('t').Envar("DIGITALOCEAN_ACCESS_TOKEN").String()
sshUser = kingpin.Flag("ssh-user", "default ssh user").String()
sshPort = kingpin.Flag("ssh-port", "default ssh port").Int()
tag = kingpin.Flag("tag", "filter droplets by tag").String()
ignore = kingpin.Flag("ignore", "ignore a Droplet by name, can be specified multiple times").Strings()
groupByRegion = kingpin.Flag("group-by-region", "group hosts by region, defaults to true").Default("true").Bool()
groupByTag = kingpin.Flag("group-by-tag", "group hosts by their Droplet tags, defaults to true").Default("true").Bool()
groupByProject = kingpin.Flag("group-by-project", "group hosts by their Projects, defaults to true").Default("true").Bool()
privateIPs = kingpin.Flag("private-ips", "use private Droplet IPs instead of public IPs").Bool()
out = kingpin.Flag("out", "write the ansible inventory to this file - if unset, print to stdout").String()
timeout = kingpin.Flag("timeout", "timeout for total runtime of the command, defaults to 2m").Default("2m").Duration()
)
var doRegions = []string{"ams1", "ams2", "ams3", "blr1", "fra1", "lon1", "nyc1", "nyc2", "nyc3", "sfo1", "sfo2", "sfo3", "sgp1", "tor1"}
func main() {
kingpin.Parse()
log.SetHandler(cli.Default)
if *doToken == "" {
log.Info("no access token provided, attempting to look up doctl's access token")
token, context, err := doctlToken()
if err != nil {
log.WithError(err).Fatalf("couldn't look up token")
}
*doToken = token
log.WithField("context", context).Info("using doctl access token")
}
ctx, cancel := context.WithTimeout(context.Background(), *timeout)
defer cancel()
client := godo.NewFromToken(*doToken)
// get droplets
if *tag != "" {
log.WithField("tag", *tag).Info("only selecting tagged Droplets")
}
log.Info("listing Droplets")
droplets, err := listDroplets(ctx, client, *tag)
if err != nil {
log.WithError(err).Fatal("couldn't fetch Droplets")
}
// filter out ignored droplets
droplets = removeIgnored(droplets, *ignore)
// initialize some maps
var dropletsByRegion map[string][]string
if *groupByRegion {
dropletsByRegion = make(map[string][]string, len(doRegions))
for _, r := range doRegions {
dropletsByRegion[r] = []string{}
}
}
var dropletsByTag map[string][]string
if *groupByTag {
dropletsByTag = make(map[string][]string, 0)
}
var inventory bytes.Buffer
dropletsByID := make(map[int]string, len(droplets))
for _, d := range droplets {
ll := log.WithField("droplet", d.Name)
ll.Info("processing")
dropletsByID[d.ID] = d.Name
if *groupByRegion {
r := d.Region.Slug
dropletsByRegion[r] = append(dropletsByRegion[r], d.Name)
}
if *groupByTag {
for _, tag := range d.Tags {
dropletsByTag[tag] = append(dropletsByTag[tag], d.Name)
}
}
var (
ip string
err error
)
if *privateIPs {
ip, err = d.PrivateIPv4()
} else {
ip, err = d.PublicIPv4()
}
if err != nil {
ll.WithError(err).Error("couldn't look up the Droplet's IP address, skipped")
continue
}
inventory.WriteString(d.Name)
inventory.WriteRune('\t')
if *sshUser != "" {
inventory.WriteString(fmt.Sprintf("ansible_user=%s ", *sshUser))
}
if *sshPort != 0 {
inventory.WriteString(fmt.Sprintf("ansible_port=%d ", *sshPort))
}
if ip != "" {
inventory.WriteString(fmt.Sprintf("ansible_host=%s", ip))
} else {
ll.Warn("could not get the Droplet's IP address, using hostname")
}
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
// write the region groups
if *groupByRegion {
// loop over the doRegions slice to maintain alphabetic order
for _, region := range doRegions {
log.WithField("region", region).Info("building region group")
droplets := dropletsByRegion[region]
inventory.WriteString(fmt.Sprintf("[%s]", region))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the tag groups
if *groupByTag {
for tag, droplets := range dropletsByTag {
tag = sanitizeAnsibleGroup(tag)
log.WithField("tag", tag).Info("building tag group")
inventory.WriteString(fmt.Sprintf("[%s]", tag))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the project groups
if *groupByProject {
log.Info("listing projects")
projects, _, err := client.Projects.List(ctx, nil)
if err != nil {
log.WithError(err).Fatal("couldn't list projects")
}
dropletsByProject := make(map[string][]string)
for _, project := range projects {
ll := log.WithField("project", project.Name)
ll.Info("listing project resources")
resources, err := listProjectResources(ctx, client, project.ID)
if err != nil {
ll.WithError(err).Fatal("")
}
for _, r := range resources {
if !strings.HasPrefix(r.URN, "do:droplet:") {
continue
}
id := strings.TrimPrefix(r.URN, "do:droplet:")
idInt, err := strconv.Atoi(id)
if err != nil {
ll.WithError(err).WithField("urn", r.URN).Error("parsing droplet ID, skipping")
continue
}
// skip droplets that aren't included in the inventory
droplet, exists := dropletsByID[idInt]
if !exists {
continue
}
dropletsByProject[project.Name] = append(dropletsByProject[project.Name], droplet)
}
}
for project, droplets := range dropletsByProject {
project = sanitizeAnsibleGroup(project)
log.WithField("project", project).Info("building project group")
inventory.WriteString(fmt.Sprintf("[%s]", project))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
if *out != "" {
ll := log.WithField("out", *out)
ll.Info("writing inventory to file")
f, err := os.Create(*out)
if err != nil {
ll.WithError(err).Fatal("couldn't open file for writing")
}
defer f.Close()
_, err = inventory.WriteTo(f)
if err != nil {
ll.WithError(err).Fatal("couldn't write inventory to file")
}
} else {
inventory.WriteTo(os.Stdout)
}
log.Info("done!")
}
func doctlToken() (string, string, error) {
type doctlConfig struct {
Context string `yaml:"context"`
AccessToken string `yaml:"access-token"`
AuthContexts map[string]string `yaml:"auth-contexts"`
}
cfgDir, err := os.UserConfigDir()
if err != nil {
return "", "", fmt.Errorf("couldn't look up user config dir: %w", err)
}
cfgFile, err := ioutil.ReadFile(filepath.Join(cfgDir, "doctl", "config.yaml"))
if err != nil {
return "", "", fmt.Errorf("couldn't read doctl's config.yaml: %w", err)
}
cfg := doctlConfig{}
err = yaml.Unmarshal(cfgFile, &cfg)
if err != nil {
return "", "", fmt.Errorf("couldn't unmarshal doctl's config.yaml: %w", err)
}
switch cfg.Context {
case "default":
return cfg.AccessToken, cfg.Context, nil
default:
return cfg.AuthContexts[cfg.Context], cfg.Context, nil
}
}
func sanitizeAnsibleGroup(s string) string |
func removeIgnored(droplets []godo.Droplet, ignored []string) []godo.Droplet {
if len(ignored) == 0 {
return droplets
}
// copy ignored droplets into a map
ignoreList := make(map[string]interface{}, len(ignored))
for _, i := range ignored {
ignoreList[i] = struct{}{}
}
// remove ignored droplets from the list
newDroplets := droplets[:0]
for _, d := range droplets {
if _, ignored := ignoreList[d.Name]; ignored {
log.WithField("droplet", d.Name).Info("ignoring")
continue
}
newDroplets = append(newDroplets, d)
}
return newDroplets
}
// get droplets w/ pagination
func listDroplets(ctx context.Context, client *godo.Client, tag string) ([]godo.Droplet, error) {
droplets := []godo.Droplet{}
call := func(opt *godo.ListOptions) (interface{}, *godo.Response, error) {
if tag != "" {
return client.Droplets.ListByTag(ctx, tag, opt)
}
return client.Droplets.List(ctx, opt)
}
handler := func(d interface{}) error {
dd, ok := d.([]godo.Droplet)
if !ok {
return fmt.Errorf("listing Droplets")
}
droplets = append(droplets, dd...)
return nil
}
err := paginateGodo(ctx, call, handler)
if err != nil {
return nil, err
}
return droplets, nil
}
// get project resources w/ pagination
func listProjectResources(ctx context.Context, client *godo.Client, projectID string) ([]godo.ProjectResource, error) {
prs := []godo.ProjectResource{}
call := func(opt *godo.ListOptions) (interface{}, *godo.Response, error) {
return client.Projects.ListResources(ctx, projectID, opt)
}
handler := func(r interface{}) error {
rr, ok := r.([]godo.ProjectResource)
if !ok {
return fmt.Errorf("listing project resources")
}
prs = append(prs, rr...)
return nil
}
err := paginateGodo(ctx, call, handler)
if err != nil {
return nil, err
}
return prs, nil
}
func paginateGodo(ctx context.Context, call func(*godo.ListOptions) (interface{}, *godo.Response, error), handler func(interface{}) error) error {
// create options. initially, these will be blank
opt := &godo.ListOptions{}
for {
results, resp, err := call(opt)
if err != nil {
return err
}
err = handler(results)
if err != nil {
return nil
}
// if we are at the last page, break out the for loop
if resp.Links == nil || resp.Links.IsLastPage() {
break
}
page, err := resp.Links.CurrentPage()
if err != nil {
return err
}
// set the page we want for the next request
opt.Page = page + 1
}
return nil
}
| {
// replace invalid characters
s = strings.NewReplacer(
" ", "_",
"-", "_",
":", "_",
).Replace(s)
// group names cannot start with a digit
if '0' <= s[0] && s[0] <= '9' {
s = "_" + s
}
return s
} | identifier_body |
main.go | /*
Copyright 2020 Kamal Nasser All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/apex/log"
"github.com/apex/log/handlers/cli"
"github.com/digitalocean/godo"
"gopkg.in/alecthomas/kingpin.v2"
"gopkg.in/yaml.v2"
)
var (
doToken = kingpin.Flag("access-token", "DigitalOcean API Token - if unset, attempts to use doctl's stored token of its current default context. env var: DIGITALOCEAN_ACCESS_TOKEN").Short('t').Envar("DIGITALOCEAN_ACCESS_TOKEN").String()
sshUser = kingpin.Flag("ssh-user", "default ssh user").String()
sshPort = kingpin.Flag("ssh-port", "default ssh port").Int()
tag = kingpin.Flag("tag", "filter droplets by tag").String()
ignore = kingpin.Flag("ignore", "ignore a Droplet by name, can be specified multiple times").Strings()
groupByRegion = kingpin.Flag("group-by-region", "group hosts by region, defaults to true").Default("true").Bool()
groupByTag = kingpin.Flag("group-by-tag", "group hosts by their Droplet tags, defaults to true").Default("true").Bool()
groupByProject = kingpin.Flag("group-by-project", "group hosts by their Projects, defaults to true").Default("true").Bool()
privateIPs = kingpin.Flag("private-ips", "use private Droplet IPs instead of public IPs").Bool()
out = kingpin.Flag("out", "write the ansible inventory to this file - if unset, print to stdout").String()
timeout = kingpin.Flag("timeout", "timeout for total runtime of the command, defaults to 2m").Default("2m").Duration()
)
var doRegions = []string{"ams1", "ams2", "ams3", "blr1", "fra1", "lon1", "nyc1", "nyc2", "nyc3", "sfo1", "sfo2", "sfo3", "sgp1", "tor1"}
func main() {
kingpin.Parse()
log.SetHandler(cli.Default)
if *doToken == "" {
log.Info("no access token provided, attempting to look up doctl's access token")
token, context, err := doctlToken()
if err != nil {
log.WithError(err).Fatalf("couldn't look up token")
}
*doToken = token
log.WithField("context", context).Info("using doctl access token")
}
ctx, cancel := context.WithTimeout(context.Background(), *timeout) | // get droplets
if *tag != "" {
log.WithField("tag", *tag).Info("only selecting tagged Droplets")
}
log.Info("listing Droplets")
droplets, err := listDroplets(ctx, client, *tag)
if err != nil {
log.WithError(err).Fatal("couldn't fetch Droplets")
}
// filter out ignored droplets
droplets = removeIgnored(droplets, *ignore)
// initialize some maps
var dropletsByRegion map[string][]string
if *groupByRegion {
dropletsByRegion = make(map[string][]string, len(doRegions))
for _, r := range doRegions {
dropletsByRegion[r] = []string{}
}
}
var dropletsByTag map[string][]string
if *groupByTag {
dropletsByTag = make(map[string][]string, 0)
}
var inventory bytes.Buffer
dropletsByID := make(map[int]string, len(droplets))
for _, d := range droplets {
ll := log.WithField("droplet", d.Name)
ll.Info("processing")
dropletsByID[d.ID] = d.Name
if *groupByRegion {
r := d.Region.Slug
dropletsByRegion[r] = append(dropletsByRegion[r], d.Name)
}
if *groupByTag {
for _, tag := range d.Tags {
dropletsByTag[tag] = append(dropletsByTag[tag], d.Name)
}
}
var (
ip string
err error
)
if *privateIPs {
ip, err = d.PrivateIPv4()
} else {
ip, err = d.PublicIPv4()
}
if err != nil {
ll.WithError(err).Error("couldn't look up the Droplet's IP address, skipped")
continue
}
inventory.WriteString(d.Name)
inventory.WriteRune('\t')
if *sshUser != "" {
inventory.WriteString(fmt.Sprintf("ansible_user=%s ", *sshUser))
}
if *sshPort != 0 {
inventory.WriteString(fmt.Sprintf("ansible_port=%d ", *sshPort))
}
if ip != "" {
inventory.WriteString(fmt.Sprintf("ansible_host=%s", ip))
} else {
ll.Warn("could not get the Droplet's IP address, using hostname")
}
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
// write the region groups
if *groupByRegion {
// loop over the doRegions slice to maintain alphabetic order
for _, region := range doRegions {
log.WithField("region", region).Info("building region group")
droplets := dropletsByRegion[region]
inventory.WriteString(fmt.Sprintf("[%s]", region))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the tag groups
if *groupByTag {
for tag, droplets := range dropletsByTag {
tag = sanitizeAnsibleGroup(tag)
log.WithField("tag", tag).Info("building tag group")
inventory.WriteString(fmt.Sprintf("[%s]", tag))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the project groups
if *groupByProject {
log.Info("listing projects")
projects, _, err := client.Projects.List(ctx, nil)
if err != nil {
log.WithError(err).Fatal("couldn't list projects")
}
dropletsByProject := make(map[string][]string)
for _, project := range projects {
ll := log.WithField("project", project.Name)
ll.Info("listing project resources")
resources, err := listProjectResources(ctx, client, project.ID)
if err != nil {
ll.WithError(err).Fatal("")
}
for _, r := range resources {
if !strings.HasPrefix(r.URN, "do:droplet:") {
continue
}
id := strings.TrimPrefix(r.URN, "do:droplet:")
idInt, err := strconv.Atoi(id)
if err != nil {
ll.WithError(err).WithField("urn", r.URN).Error("parsing droplet ID, skipping")
continue
}
// skip droplets that aren't included in the inventory
droplet, exists := dropletsByID[idInt]
if !exists {
continue
}
dropletsByProject[project.Name] = append(dropletsByProject[project.Name], droplet)
}
}
for project, droplets := range dropletsByProject {
project = sanitizeAnsibleGroup(project)
log.WithField("project", project).Info("building project group")
inventory.WriteString(fmt.Sprintf("[%s]", project))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
if *out != "" {
ll := log.WithField("out", *out)
ll.Info("writing inventory to file")
f, err := os.Create(*out)
if err != nil {
ll.WithError(err).Fatal("couldn't open file for writing")
}
defer f.Close()
_, err = inventory.WriteTo(f)
if err != nil {
ll.WithError(err).Fatal("couldn't write inventory to file")
}
} else {
inventory.WriteTo(os.Stdout)
}
log.Info("done!")
}
func doctlToken() (string, string, error) {
type doctlConfig struct {
Context string `yaml:"context"`
AccessToken string `yaml:"access-token"`
AuthContexts map[string]string `yaml:"auth-contexts"`
}
cfgDir, err := os.UserConfigDir()
if err != nil {
return "", "", fmt.Errorf("couldn't look up user config dir: %w", err)
}
cfgFile, err := ioutil.ReadFile(filepath.Join(cfgDir, "doctl", "config.yaml"))
if err != nil {
return "", "", fmt.Errorf("couldn't read doctl's config.yaml: %w", err)
}
cfg := doctlConfig{}
err = yaml.Unmarshal(cfgFile, &cfg)
if err != nil {
return "", "", fmt.Errorf("couldn't unmarshal doctl's config.yaml: %w", err)
}
switch cfg.Context {
case "default":
return cfg.AccessToken, cfg.Context, nil
default:
return cfg.AuthContexts[cfg.Context], cfg.Context, nil
}
}
func sanitizeAnsibleGroup(s string) string {
// replace invalid characters
s = strings.NewReplacer(
" ", "_",
"-", "_",
":", "_",
).Replace(s)
// group names cannot start with a digit
if '0' <= s[0] && s[0] <= '9' {
s = "_" + s
}
return s
}
func removeIgnored(droplets []godo.Droplet, ignored []string) []godo.Droplet {
if len(ignored) == 0 {
return droplets
}
// copy ignored droplets into a map
ignoreList := make(map[string]interface{}, len(ignored))
for _, i := range ignored {
ignoreList[i] = struct{}{}
}
// remove ignored droplets from the list
newDroplets := droplets[:0]
for _, d := range droplets {
if _, ignored := ignoreList[d.Name]; ignored {
log.WithField("droplet", d.Name).Info("ignoring")
continue
}
newDroplets = append(newDroplets, d)
}
return newDroplets
}
// get droplets w/ pagination
func listDroplets(ctx context.Context, client *godo.Client, tag string) ([]godo.Droplet, error) {
droplets := []godo.Droplet{}
call := func(opt *godo.ListOptions) (interface{}, *godo.Response, error) {
if tag != "" {
return client.Droplets.ListByTag(ctx, tag, opt)
}
return client.Droplets.List(ctx, opt)
}
handler := func(d interface{}) error {
dd, ok := d.([]godo.Droplet)
if !ok {
return fmt.Errorf("listing Droplets")
}
droplets = append(droplets, dd...)
return nil
}
err := paginateGodo(ctx, call, handler)
if err != nil {
return nil, err
}
return droplets, nil
}
// get project resources w/ pagination
func listProjectResources(ctx context.Context, client *godo.Client, projectID string) ([]godo.ProjectResource, error) {
prs := []godo.ProjectResource{}
call := func(opt *godo.ListOptions) (interface{}, *godo.Response, error) {
return client.Projects.ListResources(ctx, projectID, opt)
}
handler := func(r interface{}) error {
rr, ok := r.([]godo.ProjectResource)
if !ok {
return fmt.Errorf("listing project resources")
}
prs = append(prs, rr...)
return nil
}
err := paginateGodo(ctx, call, handler)
if err != nil {
return nil, err
}
return prs, nil
}
func paginateGodo(ctx context.Context, call func(*godo.ListOptions) (interface{}, *godo.Response, error), handler func(interface{}) error) error {
// create options. initially, these will be blank
opt := &godo.ListOptions{}
for {
results, resp, err := call(opt)
if err != nil {
return err
}
err = handler(results)
if err != nil {
return nil
}
// if we are at the last page, break out the for loop
if resp.Links == nil || resp.Links.IsLastPage() {
break
}
page, err := resp.Links.CurrentPage()
if err != nil {
return err
}
// set the page we want for the next request
opt.Page = page + 1
}
return nil
} | defer cancel()
client := godo.NewFromToken(*doToken)
| random_line_split |
disk.py | # coding = utf-8
import os
import linux
import re
import json
from linux import try_catch
SAS_LIMIT_COUNT = 10
SAS_LIMIT_GB = 1024
SATA_LIMIT_HOURS = 10
SSD_WEAROUT_LIMIT = 99
SATA_SMART_ERROR_LIST = [
"Reallocated_Sector_Ct",
"Spin_Retry_Count",
"End-to-End_Error",
"High_Fly_Writes",
"Current_Pending_Sector",
"UDMA_CRC_Error_Count"
]
class | (object):
def __init__(self):
self.model = ""
self.vendor = ""
self.fw = ""
self.sn = ""
self.wwn = ""
self.hctl = ""
self.dev_name = ""
self.smart = ""
self.type = ""
self.smart_attr = {}
self.age = {}
self.flash = False
@staticmethod
def map_disk_wwn_hctl(diskname):
""" map wwn and H:C:T:L from dev_name """
lsscsi = linux.exe_shell("lsscsi -w |grep /dev/|awk '{print$1,$3,$4}'")
for i in lsscsi.splitlines():
split_t = i.split(" ")
if diskname in split_t[2]:
return {
"hctl": split_t[0],
"wwn": split_t[1],
"dev_name": split_t[2]
}
return None
@staticmethod
def get_from_sas_disk_smart_i_str(disk_name):
return linux.exe_shell("smartctl -i /dev/%s" % disk_name)
@staticmethod
def get_from_sas_disk_simple_attr(disk_name):
smart = Disk.get_from_sas_disk_smart_i_str(disk_name)
model = linux.search_regex_one_line_string_column(smart, "(?:Device Model|Product):.+", ":", 1).strip()
sn = linux.search_regex_one_line_string_column(smart, "Serial (?:N|n)umber.+", ":", 1).strip()
vendor = linux.search_regex_one_line_string_column(smart, "(?:SATA Ver|Vendor).+", ":", 1).split()[0].strip()
return {
"name": disk_name,
"model": model,
"sn": sn,
"vendor": vendor
}
@staticmethod
def get_all_disk():
""" return all disk object list from hba and chipset. """
disks = []
disks_lines = linux.exe_shell("lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'")
for line in disks_lines.splitlines():
disk_t = line.split()
if len(disk_t) > 1 and "LSI" not in disk_t[1]:
disks.append(disk_t[0])
ds = []
for i in disks:
d_t = DiskFromLsiSas3("", i)
d_t.fill_attrs()
ds.append(d_t)
return ds
@staticmethod
def get_dev_attr_dict(dev_name):
i = DiskFromLsiSas3("", dev_name)
i.fill_attrs()
return {
"dev": i.dev_name,
"model": i.model,
"fw": i.fw,
"SN": i.sn,
"type": i.type,
"vendor": i.vendor,
"smart": i.smart_attr,
"hctl": i.hctl,
"wwn": i.wwn,
"age": i.age,
"is_ssd": str(i.flash)
}
@staticmethod
def __if_smart_err(disk_oj):
""" return True if smart info of disk_oj has error, else return False """
if "SAS" in disk_oj.smart:
if int(disk_oj.smart_attr["channel0Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Phy reset problem"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Phy reset problem"]) > 0:
return True
else:
return False
if "SATA" in disk_oj.smart:
if "No Errors Logged" not in disk_oj.smart:
return False
for attr_ in SATA_SMART_ERROR_LIST:
if disk_oj.smart_attr[attr_]["RAW_VALUE"] > 0:
return False
return True
@staticmethod
def get_over_agelimit_disks(disk_list):
""" return sas and sata disk list witch start_stop_hours/count or data is over the limit """
over_sas_disk = []
over_sata_disk = []
for disk in disk_list:
if disk.type == "SAS":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or float(disk.age["data_gb"]) > SAS_LIMIT_GB:
over_sas_disk.append(disk)
if disk.type == "SATA":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or int(
disk.age["power_on_hours"]) > SATA_LIMIT_HOURS:
over_sata_disk.append(disk)
return over_sas_disk, over_sata_disk
@staticmethod
def get_overage_disks_json(disk_list):
""" get_overage_disks function's json model """
pass
@staticmethod
def get_err_disk_dict():
""" return disk dict has error """
err_disk_dict = {}
disks = Disk.get_all_disk()
for i in disks:
if Disk.__if_smart_err(i):
struct = {
"dev": i.dev_name,
"model": i.model,
"fw": i.fw,
"SN": i.sn,
"type": i.type,
"vendor": i.vendor,
"smart": i.smart_attr,
"hctl": i.hctl,
"wwn": i.wwn
}
err_disk_dict[i.dev_name] = struct
return err_disk_dict
@staticmethod
def get_wearout_ssd_status():
""" return ssd wearout status dict """
disks = Disk.get_all_disk()
ssd_status = {}
for i in disks:
tmp = i.get_wearout_status()
# tmp[0] is dev_name, tmp[1] is wearout %
if tmp is not None:
ssd_status[tmp[0]] = tmp[1]
if len(ssd_status) == 0:
return None
return ssd_status
class DiskFromLsiSas3(Disk):
def __init__(self, sn, name):
super(DiskFromLsiSas3, self).__init__()
self.sn = sn
self.dev_name = name
def fill_attrs(self):
smart_str = linux.exe_shell("smartctl -a /dev/%s" % self.dev_name)
smartx_str = linux.exe_shell("smartctl -x /dev/%s" % self.dev_name)
self.smart = smartx_str
try:
self.model = linux.search_regex_one_line_string_column(smart_str, "(?:Device Model|Product):.+", ":",
1).strip()
self.fw = linux.search_regex_one_line_string_column(smart_str, "(?:Firmware|Revision).+", ":", 1).strip()
self.vendor = linux.search_regex_one_line_string_column(smart_str, "(?:SATA Ver|Vendor).+", ":", 1).split()[
0].strip()
self.sn = linux.search_regex_one_line_string_column(smart_str, "Serial (?:N|n)umber.+", ":", 1).strip()
map_temp = self.map_disk_wwn_hctl(self.dev_name)
self.wwn = map_temp["wwn"] if map_temp is not None else ""
self.hctl = map_temp["hctl"] if map_temp is not None else ""
rotational = linux.read_file(os.path.join("/sys/block", self.dev_name, "queue/rotational"))
if rotational.strip() == "0":
self.flash = True
except IOError:
print("%s read_file rotational err." % self.dev_name)
except Exception:
print("disk %s is not exists." % self.dev_name)
# fill in smart_attr
# ==========================================================================
# SAS disk
# smart_attr: {
# 'channel0Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'channel1Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'read': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# 'verify': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# 'write': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# }
#
# SATA disk
# smart_attr: {
# 'Raw_Read_Error_Rate': {
# 'ID': '1',
# 'FLAG': '0x000f',
# 'VALUE': '074',
# 'WORST': '063',
# 'THRESH': '044',
# 'TYPE': 'Pre-fail',
# 'UPDATED': 'Always',
# 'WHEN_FAILED': '-',
# 'RAW_VALUE': '26816470'
# }
# 'Spin_Up_Time': {
# ...(According to the following form)
# }
# }
# SATA smart form:
# ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE
# 1 Raw_Read_Error_Rate 0x000f 074 063 044 Pre-fail Always - 26816470
# 3 Spin_Up_Time 0x0003 094 094 000 Pre-fail Always - 0
# 4 Start_Stop_Count 0x0032 100 100 020 Old_age Always - 314
# 5 Reallocated_Sector_Ct 0x0033 100 100 036 Pre-fail Always - 1
# 7 Seek_Error_Rate 0x000f 073 060 030 Pre-fail Always - 21595176
# 9 Power_On_Hours 0x0032 096 096 000 Old_age Always - 3851
# 10 Spin_Retry_Count 0x0013 100 100 097 Pre-fail Always - 0
# 12 Power_Cycle_Count 0x0032 100 100 020 Old_age Always - 271
# 184 End-to-End_Error 0x0032 100 100 099 Old_age Always - 0
# 187 Reported_Uncorrect 0x0032 100 100 000 Old_age Always - 0
# 188 Command_Timeout 0x0032 100 100 000 Old_age Always - 0
# 189 High_Fly_Writes 0x003a 100 100 000 Old_age Always - 0
# 190 Airflow_Temperature_Cel 0x0022 064 057 045 Old_age Always - 36 (Min/Max 24/40)
# 191 G-Sense_Error_Rate 0x0032 100 100 000 Old_age Always - 0
# 192 Power-Off_Retract_Count 0x0032 100 100 000 Old_age Always - 147
# 193 Load_Cycle_Count 0x0032 099 099 000 Old_age Always - 2690
# 194 Temperature_Celsius 0x0022 036 043 000 Old_age Always - 36 (0 11 0 0 0)
# 195 Hardware_ECC_Recovered 0x001a 110 099 000 Old_age Always - 26816470
# 197 Current_Pending_Sector 0x0012 100 100 000 Old_age Always - 0
# 198 Offline_Uncorrectable 0x0010 100 100 000 Old_age Offline - 0
# 199 UDMA_CRC_Error_Count 0x003e 200 200 000 Old_age Always - 0
#
# ===========================================================================
if "SAS" in smart_str:
self.type = "SAS"
smart_str_arr = linux.search_regex_strings(smart_str, " *(?:write:|read:|verify:).+")
for line in smart_str_arr:
tmp = line.split()
dict_tmp = {
"errorEccFast": tmp[1].strip(),
"errorEccDelayed": tmp[2].strip(),
"errorEccByRereadsRewrite": tmp[3].strip(),
"totalErrorsCorrected": tmp[4].strip(),
"correctionAlgorithmInvocations": tmp[5].strip(),
"byte10_9": tmp[6].strip(),
"totalUncorrectedError": tmp[7].strip()
}
self.smart_attr[tmp[0].replace(":", " ").strip()] = dict_tmp
smart_str_arr = linux.search_regex_strings(
self.smart,
"(?:Invalid DWORD|Running disparity|Loss of DWORD|Phy reset problem).+=.+"
)
i = 0
dict_tmp = {}
for it in smart_str_arr:
tmp = it.split("=")
dict_tmp[tmp[0].strip()] = tmp[1].strip()
if 3 == i:
self.smart_attr["channel0Error"] = dict_tmp
dict_tmp = {}
if 7 == i:
self.smart_attr["channel1Error"] = dict_tmp
dict_tmp = {}
i += 1
# fill in age
# 'data_gb' is float number
# age: {
# 'start_stop_count': '10',
# 'data_gb': '5999'
# }
if isinstance(self.smart, str) and ("start-stop" in self.smart):
self.age["start_stop_count"] = linux.search_regex_one_line_string_column(self.smart, ".+start-stop.+",
":", 1)
all_gb = float(self.smart_attr["read"]["byte10_9"]) + float(
self.smart_attr["write"]["byte10_9"]) + float(self.smart_attr["verify"]["byte10_9"])
self.age["data_gb"] = str(all_gb)
if "SATA" in smart_str:
self.type = "SATA"
dict_tmp = linux.search_regex_strings(smart_str, ".*[0-9]+.+0x.+(?:In_the_past|-|FAILING_NOW) +[0-9]+")
for line in dict_tmp:
tmp = line.split()
dict_tmp = {
"ID": tmp[0].strip(),
"FLAG": tmp[2].strip(),
"VALUE": tmp[3].strip(),
"WORST": tmp[4].strip(),
"THRESH": tmp[5].strip(),
"TYPE": tmp[6].strip(),
"UPDATED": tmp[7].strip(),
"WHEN_FAILED": tmp[8].strip(),
"RAW_VALUE": tmp[9].strip(),
}
self.smart_attr[tmp[1]] = dict_tmp
if "Start_Stop_Count" in self.smart_attr:
self.age["start_stop_count"] = self.smart_attr["Start_Stop_Count"]["RAW_VALUE"]
self.age["power_on_hours"] = self.smart_attr["Power_On_Hours"]["RAW_VALUE"]
def get_wearout_status(self):
if self.flash is True and "Media_Wearout_Indicator" in self.smart_attr:
value = self.smart_attr["Media_Wearout_Indicator"]["VALUE"]
return self.dev_name, value
else:
return None
def to_json(self):
struct = {
"dev": self.dev_name,
"model": self.model,
"fw": self.fw,
"SN": self.sn,
"type": self.type,
"vendor": self.vendor,
"smart": self.smart_attr,
"hctl": self.hctl,
"wwn": self.wwn
}
json_str = json.dumps(struct, indent=1)
return json_str
class DiskFromLsiSas2(DiskFromLsiSas3):
def __init__(self, sn, name):
super(DiskFromLsiSas2, self).__init__(sn, name)
class DiskFromChipset(DiskFromLsiSas3):
def __init__(self, sn, name):
super(DiskFromChipset, self).__init__(sn, name)
class DiskFromMegaRaid(Disk):
def __init__(self, did, name):
super(DiskFromMegaRaid, self).__init__()
self.dev_name = name
self.did = did
def fill_attrs(self):
pass
| Disk | identifier_name |
disk.py | # coding = utf-8
import os
import linux
import re
import json
from linux import try_catch
SAS_LIMIT_COUNT = 10
SAS_LIMIT_GB = 1024
SATA_LIMIT_HOURS = 10
SSD_WEAROUT_LIMIT = 99
SATA_SMART_ERROR_LIST = [
"Reallocated_Sector_Ct",
"Spin_Retry_Count",
"End-to-End_Error",
"High_Fly_Writes",
"Current_Pending_Sector",
"UDMA_CRC_Error_Count"
]
class Disk(object):
def __init__(self):
self.model = ""
self.vendor = ""
self.fw = ""
self.sn = ""
self.wwn = ""
self.hctl = ""
self.dev_name = ""
self.smart = ""
self.type = ""
self.smart_attr = {}
self.age = {}
self.flash = False
@staticmethod
def map_disk_wwn_hctl(diskname):
""" map wwn and H:C:T:L from dev_name """
lsscsi = linux.exe_shell("lsscsi -w |grep /dev/|awk '{print$1,$3,$4}'")
for i in lsscsi.splitlines():
split_t = i.split(" ")
if diskname in split_t[2]:
return {
"hctl": split_t[0],
"wwn": split_t[1],
"dev_name": split_t[2]
}
return None
@staticmethod
def get_from_sas_disk_smart_i_str(disk_name):
return linux.exe_shell("smartctl -i /dev/%s" % disk_name)
@staticmethod
def get_from_sas_disk_simple_attr(disk_name):
smart = Disk.get_from_sas_disk_smart_i_str(disk_name)
model = linux.search_regex_one_line_string_column(smart, "(?:Device Model|Product):.+", ":", 1).strip()
sn = linux.search_regex_one_line_string_column(smart, "Serial (?:N|n)umber.+", ":", 1).strip()
vendor = linux.search_regex_one_line_string_column(smart, "(?:SATA Ver|Vendor).+", ":", 1).split()[0].strip()
return {
"name": disk_name,
"model": model,
"sn": sn,
"vendor": vendor
}
@staticmethod
def get_all_disk():
""" return all disk object list from hba and chipset. """
disks = []
disks_lines = linux.exe_shell("lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'")
for line in disks_lines.splitlines():
disk_t = line.split()
if len(disk_t) > 1 and "LSI" not in disk_t[1]:
disks.append(disk_t[0])
ds = []
for i in disks:
d_t = DiskFromLsiSas3("", i)
d_t.fill_attrs()
ds.append(d_t)
return ds
@staticmethod
def get_dev_attr_dict(dev_name):
i = DiskFromLsiSas3("", dev_name)
i.fill_attrs()
return {
"dev": i.dev_name,
"model": i.model,
"fw": i.fw,
"SN": i.sn,
"type": i.type,
"vendor": i.vendor,
"smart": i.smart_attr,
"hctl": i.hctl,
"wwn": i.wwn,
"age": i.age,
"is_ssd": str(i.flash)
}
@staticmethod
def __if_smart_err(disk_oj):
""" return True if smart info of disk_oj has error, else return False """
if "SAS" in disk_oj.smart:
if int(disk_oj.smart_attr["channel0Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Phy reset problem"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Phy reset problem"]) > 0:
return True
else:
return False
if "SATA" in disk_oj.smart:
if "No Errors Logged" not in disk_oj.smart:
return False
for attr_ in SATA_SMART_ERROR_LIST:
if disk_oj.smart_attr[attr_]["RAW_VALUE"] > 0:
return False
return True
@staticmethod
def get_over_agelimit_disks(disk_list):
""" return sas and sata disk list witch start_stop_hours/count or data is over the limit """
over_sas_disk = []
over_sata_disk = []
for disk in disk_list:
if disk.type == "SAS":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or float(disk.age["data_gb"]) > SAS_LIMIT_GB:
over_sas_disk.append(disk)
if disk.type == "SATA":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or int(
disk.age["power_on_hours"]) > SATA_LIMIT_HOURS:
over_sata_disk.append(disk)
return over_sas_disk, over_sata_disk
@staticmethod
def get_overage_disks_json(disk_list):
""" get_overage_disks function's json model """
pass
@staticmethod
def get_err_disk_dict():
""" return disk dict has error """
err_disk_dict = {}
disks = Disk.get_all_disk()
for i in disks:
if Disk.__if_smart_err(i):
struct = {
"dev": i.dev_name,
"model": i.model,
"fw": i.fw,
"SN": i.sn,
"type": i.type,
"vendor": i.vendor,
"smart": i.smart_attr,
"hctl": i.hctl,
"wwn": i.wwn
}
err_disk_dict[i.dev_name] = struct
return err_disk_dict
@staticmethod
def get_wearout_ssd_status():
""" return ssd wearout status dict """
disks = Disk.get_all_disk()
ssd_status = {}
for i in disks:
tmp = i.get_wearout_status()
# tmp[0] is dev_name, tmp[1] is wearout %
if tmp is not None:
ssd_status[tmp[0]] = tmp[1]
if len(ssd_status) == 0:
return None
return ssd_status
class DiskFromLsiSas3(Disk):
def __init__(self, sn, name):
super(DiskFromLsiSas3, self).__init__()
self.sn = sn
self.dev_name = name
def fill_attrs(self):
smart_str = linux.exe_shell("smartctl -a /dev/%s" % self.dev_name)
smartx_str = linux.exe_shell("smartctl -x /dev/%s" % self.dev_name)
self.smart = smartx_str
try:
self.model = linux.search_regex_one_line_string_column(smart_str, "(?:Device Model|Product):.+", ":",
1).strip()
self.fw = linux.search_regex_one_line_string_column(smart_str, "(?:Firmware|Revision).+", ":", 1).strip()
self.vendor = linux.search_regex_one_line_string_column(smart_str, "(?:SATA Ver|Vendor).+", ":", 1).split()[
0].strip()
self.sn = linux.search_regex_one_line_string_column(smart_str, "Serial (?:N|n)umber.+", ":", 1).strip()
map_temp = self.map_disk_wwn_hctl(self.dev_name)
self.wwn = map_temp["wwn"] if map_temp is not None else ""
self.hctl = map_temp["hctl"] if map_temp is not None else ""
rotational = linux.read_file(os.path.join("/sys/block", self.dev_name, "queue/rotational"))
if rotational.strip() == "0":
self.flash = True
except IOError:
print("%s read_file rotational err." % self.dev_name)
except Exception:
print("disk %s is not exists." % self.dev_name)
# fill in smart_attr
# ==========================================================================
# SAS disk
# smart_attr: {
# 'channel0Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'channel1Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'read': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# 'verify': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# 'write': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# }
#
# SATA disk
# smart_attr: {
# 'Raw_Read_Error_Rate': {
# 'ID': '1',
# 'FLAG': '0x000f',
# 'VALUE': '074',
# 'WORST': '063',
# 'THRESH': '044',
# 'TYPE': 'Pre-fail',
# 'UPDATED': 'Always',
# 'WHEN_FAILED': '-',
# 'RAW_VALUE': '26816470'
# }
# 'Spin_Up_Time': {
# ...(According to the following form)
# }
# }
# SATA smart form:
# ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE
# 1 Raw_Read_Error_Rate 0x000f 074 063 044 Pre-fail Always - 26816470
# 3 Spin_Up_Time 0x0003 094 094 000 Pre-fail Always - 0
# 4 Start_Stop_Count 0x0032 100 100 020 Old_age Always - 314
# 5 Reallocated_Sector_Ct 0x0033 100 100 036 Pre-fail Always - 1
# 7 Seek_Error_Rate 0x000f 073 060 030 Pre-fail Always - 21595176
# 9 Power_On_Hours 0x0032 096 096 000 Old_age Always - 3851
# 10 Spin_Retry_Count 0x0013 100 100 097 Pre-fail Always - 0
# 12 Power_Cycle_Count 0x0032 100 100 020 Old_age Always - 271
# 184 End-to-End_Error 0x0032 100 100 099 Old_age Always - 0
# 187 Reported_Uncorrect 0x0032 100 100 000 Old_age Always - 0
# 188 Command_Timeout 0x0032 100 100 000 Old_age Always - 0
# 189 High_Fly_Writes 0x003a 100 100 000 Old_age Always - 0
# 190 Airflow_Temperature_Cel 0x0022 064 057 045 Old_age Always - 36 (Min/Max 24/40)
# 191 G-Sense_Error_Rate 0x0032 100 100 000 Old_age Always - 0
# 192 Power-Off_Retract_Count 0x0032 100 100 000 Old_age Always - 147
# 193 Load_Cycle_Count 0x0032 099 099 000 Old_age Always - 2690
# 194 Temperature_Celsius 0x0022 036 043 000 Old_age Always - 36 (0 11 0 0 0)
# 195 Hardware_ECC_Recovered 0x001a 110 099 000 Old_age Always - 26816470
# 197 Current_Pending_Sector 0x0012 100 100 000 Old_age Always - 0
# 198 Offline_Uncorrectable 0x0010 100 100 000 Old_age Offline - 0
# 199 UDMA_CRC_Error_Count 0x003e 200 200 000 Old_age Always - 0
#
# ===========================================================================
if "SAS" in smart_str:
self.type = "SAS"
smart_str_arr = linux.search_regex_strings(smart_str, " *(?:write:|read:|verify:).+")
for line in smart_str_arr:
tmp = line.split()
dict_tmp = {
"errorEccFast": tmp[1].strip(),
"errorEccDelayed": tmp[2].strip(),
"errorEccByRereadsRewrite": tmp[3].strip(),
"totalErrorsCorrected": tmp[4].strip(),
"correctionAlgorithmInvocations": tmp[5].strip(),
"byte10_9": tmp[6].strip(),
"totalUncorrectedError": tmp[7].strip()
}
self.smart_attr[tmp[0].replace(":", " ").strip()] = dict_tmp
smart_str_arr = linux.search_regex_strings(
self.smart,
"(?:Invalid DWORD|Running disparity|Loss of DWORD|Phy reset problem).+=.+"
)
i = 0
dict_tmp = {}
for it in smart_str_arr:
tmp = it.split("=")
dict_tmp[tmp[0].strip()] = tmp[1].strip()
if 3 == i:
self.smart_attr["channel0Error"] = dict_tmp
dict_tmp = {}
if 7 == i:
self.smart_attr["channel1Error"] = dict_tmp
dict_tmp = {}
i += 1
# fill in age
# 'data_gb' is float number
# age: {
# 'start_stop_count': '10',
# 'data_gb': '5999'
# }
if isinstance(self.smart, str) and ("start-stop" in self.smart):
self.age["start_stop_count"] = linux.search_regex_one_line_string_column(self.smart, ".+start-stop.+",
":", 1)
all_gb = float(self.smart_attr["read"]["byte10_9"]) + float(
self.smart_attr["write"]["byte10_9"]) + float(self.smart_attr["verify"]["byte10_9"])
self.age["data_gb"] = str(all_gb)
if "SATA" in smart_str:
self.type = "SATA"
dict_tmp = linux.search_regex_strings(smart_str, ".*[0-9]+.+0x.+(?:In_the_past|-|FAILING_NOW) +[0-9]+")
for line in dict_tmp:
tmp = line.split()
dict_tmp = {
"ID": tmp[0].strip(),
"FLAG": tmp[2].strip(),
"VALUE": tmp[3].strip(),
"WORST": tmp[4].strip(),
"THRESH": tmp[5].strip(),
"TYPE": tmp[6].strip(),
"UPDATED": tmp[7].strip(),
"WHEN_FAILED": tmp[8].strip(),
"RAW_VALUE": tmp[9].strip(),
}
self.smart_attr[tmp[1]] = dict_tmp
if "Start_Stop_Count" in self.smart_attr:
self.age["start_stop_count"] = self.smart_attr["Start_Stop_Count"]["RAW_VALUE"]
self.age["power_on_hours"] = self.smart_attr["Power_On_Hours"]["RAW_VALUE"]
def get_wearout_status(self):
if self.flash is True and "Media_Wearout_Indicator" in self.smart_attr:
value = self.smart_attr["Media_Wearout_Indicator"]["VALUE"]
return self.dev_name, value
else:
return None
def to_json(self):
struct = {
"dev": self.dev_name,
"model": self.model,
"fw": self.fw,
"SN": self.sn,
"type": self.type,
"vendor": self.vendor,
"smart": self.smart_attr,
"hctl": self.hctl,
"wwn": self.wwn
}
json_str = json.dumps(struct, indent=1)
return json_str
class DiskFromLsiSas2(DiskFromLsiSas3):
|
class DiskFromChipset(DiskFromLsiSas3):
def __init__(self, sn, name):
super(DiskFromChipset, self).__init__(sn, name)
class DiskFromMegaRaid(Disk):
def __init__(self, did, name):
super(DiskFromMegaRaid, self).__init__()
self.dev_name = name
self.did = did
def fill_attrs(self):
pass
| def __init__(self, sn, name):
super(DiskFromLsiSas2, self).__init__(sn, name) | identifier_body |
disk.py | # coding = utf-8
import os
import linux
import re
import json
from linux import try_catch
SAS_LIMIT_COUNT = 10
SAS_LIMIT_GB = 1024
SATA_LIMIT_HOURS = 10
SSD_WEAROUT_LIMIT = 99
SATA_SMART_ERROR_LIST = [
"Reallocated_Sector_Ct",
"Spin_Retry_Count",
"End-to-End_Error",
"High_Fly_Writes",
"Current_Pending_Sector",
"UDMA_CRC_Error_Count"
]
class Disk(object):
def __init__(self):
self.model = ""
self.vendor = ""
self.fw = ""
self.sn = ""
self.wwn = ""
self.hctl = ""
self.dev_name = ""
self.smart = ""
self.type = ""
self.smart_attr = {}
self.age = {}
self.flash = False
@staticmethod
def map_disk_wwn_hctl(diskname):
""" map wwn and H:C:T:L from dev_name """
lsscsi = linux.exe_shell("lsscsi -w |grep /dev/|awk '{print$1,$3,$4}'")
for i in lsscsi.splitlines():
split_t = i.split(" ")
if diskname in split_t[2]:
return {
"hctl": split_t[0],
"wwn": split_t[1],
"dev_name": split_t[2]
}
return None
@staticmethod
def get_from_sas_disk_smart_i_str(disk_name):
return linux.exe_shell("smartctl -i /dev/%s" % disk_name)
@staticmethod
def get_from_sas_disk_simple_attr(disk_name):
smart = Disk.get_from_sas_disk_smart_i_str(disk_name)
model = linux.search_regex_one_line_string_column(smart, "(?:Device Model|Product):.+", ":", 1).strip()
sn = linux.search_regex_one_line_string_column(smart, "Serial (?:N|n)umber.+", ":", 1).strip()
vendor = linux.search_regex_one_line_string_column(smart, "(?:SATA Ver|Vendor).+", ":", 1).split()[0].strip()
return {
"name": disk_name,
"model": model,
"sn": sn,
"vendor": vendor
}
@staticmethod
def get_all_disk():
""" return all disk object list from hba and chipset. """
disks = []
disks_lines = linux.exe_shell("lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'")
for line in disks_lines.splitlines():
disk_t = line.split()
if len(disk_t) > 1 and "LSI" not in disk_t[1]:
disks.append(disk_t[0])
ds = []
for i in disks:
d_t = DiskFromLsiSas3("", i)
d_t.fill_attrs()
ds.append(d_t)
return ds
@staticmethod
def get_dev_attr_dict(dev_name):
i = DiskFromLsiSas3("", dev_name)
i.fill_attrs()
return {
"dev": i.dev_name,
"model": i.model,
"fw": i.fw,
"SN": i.sn,
"type": i.type,
"vendor": i.vendor,
"smart": i.smart_attr,
"hctl": i.hctl,
"wwn": i.wwn,
"age": i.age,
"is_ssd": str(i.flash)
}
@staticmethod
def __if_smart_err(disk_oj):
""" return True if smart info of disk_oj has error, else return False """
if "SAS" in disk_oj.smart:
if int(disk_oj.smart_attr["channel0Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Phy reset problem"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Phy reset problem"]) > 0:
return True
else:
return False
if "SATA" in disk_oj.smart:
if "No Errors Logged" not in disk_oj.smart:
return False
for attr_ in SATA_SMART_ERROR_LIST:
if disk_oj.smart_attr[attr_]["RAW_VALUE"] > 0:
return False
return True
@staticmethod
def get_over_agelimit_disks(disk_list):
""" return sas and sata disk list witch start_stop_hours/count or data is over the limit """
over_sas_disk = []
over_sata_disk = []
for disk in disk_list:
if disk.type == "SAS":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or float(disk.age["data_gb"]) > SAS_LIMIT_GB:
over_sas_disk.append(disk)
if disk.type == "SATA":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or int(
disk.age["power_on_hours"]) > SATA_LIMIT_HOURS:
over_sata_disk.append(disk)
return over_sas_disk, over_sata_disk
@staticmethod
def get_overage_disks_json(disk_list):
""" get_overage_disks function's json model """
pass
@staticmethod
def get_err_disk_dict():
""" return disk dict has error """
err_disk_dict = {}
disks = Disk.get_all_disk()
for i in disks:
if Disk.__if_smart_err(i):
struct = {
"dev": i.dev_name,
"model": i.model,
"fw": i.fw,
"SN": i.sn,
"type": i.type,
"vendor": i.vendor,
"smart": i.smart_attr,
"hctl": i.hctl,
"wwn": i.wwn
}
err_disk_dict[i.dev_name] = struct
return err_disk_dict
@staticmethod
def get_wearout_ssd_status():
""" return ssd wearout status dict """
disks = Disk.get_all_disk()
ssd_status = {}
for i in disks:
tmp = i.get_wearout_status()
# tmp[0] is dev_name, tmp[1] is wearout %
if tmp is not None:
ssd_status[tmp[0]] = tmp[1]
if len(ssd_status) == 0:
return None
return ssd_status
class DiskFromLsiSas3(Disk):
def __init__(self, sn, name):
super(DiskFromLsiSas3, self).__init__()
self.sn = sn
self.dev_name = name
def fill_attrs(self):
smart_str = linux.exe_shell("smartctl -a /dev/%s" % self.dev_name)
smartx_str = linux.exe_shell("smartctl -x /dev/%s" % self.dev_name)
self.smart = smartx_str
try:
self.model = linux.search_regex_one_line_string_column(smart_str, "(?:Device Model|Product):.+", ":",
1).strip()
self.fw = linux.search_regex_one_line_string_column(smart_str, "(?:Firmware|Revision).+", ":", 1).strip()
self.vendor = linux.search_regex_one_line_string_column(smart_str, "(?:SATA Ver|Vendor).+", ":", 1).split()[
0].strip()
self.sn = linux.search_regex_one_line_string_column(smart_str, "Serial (?:N|n)umber.+", ":", 1).strip()
map_temp = self.map_disk_wwn_hctl(self.dev_name)
self.wwn = map_temp["wwn"] if map_temp is not None else ""
self.hctl = map_temp["hctl"] if map_temp is not None else ""
rotational = linux.read_file(os.path.join("/sys/block", self.dev_name, "queue/rotational"))
if rotational.strip() == "0":
self.flash = True
except IOError:
print("%s read_file rotational err." % self.dev_name)
except Exception:
print("disk %s is not exists." % self.dev_name)
# fill in smart_attr
# ==========================================================================
# SAS disk
# smart_attr: {
# 'channel0Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'channel1Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'read': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# 'verify': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# 'write': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8', | # }
#
# SATA disk
# smart_attr: {
# 'Raw_Read_Error_Rate': {
# 'ID': '1',
# 'FLAG': '0x000f',
# 'VALUE': '074',
# 'WORST': '063',
# 'THRESH': '044',
# 'TYPE': 'Pre-fail',
# 'UPDATED': 'Always',
# 'WHEN_FAILED': '-',
# 'RAW_VALUE': '26816470'
# }
# 'Spin_Up_Time': {
# ...(According to the following form)
# }
# }
# SATA smart form:
# ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE
# 1 Raw_Read_Error_Rate 0x000f 074 063 044 Pre-fail Always - 26816470
# 3 Spin_Up_Time 0x0003 094 094 000 Pre-fail Always - 0
# 4 Start_Stop_Count 0x0032 100 100 020 Old_age Always - 314
# 5 Reallocated_Sector_Ct 0x0033 100 100 036 Pre-fail Always - 1
# 7 Seek_Error_Rate 0x000f 073 060 030 Pre-fail Always - 21595176
# 9 Power_On_Hours 0x0032 096 096 000 Old_age Always - 3851
# 10 Spin_Retry_Count 0x0013 100 100 097 Pre-fail Always - 0
# 12 Power_Cycle_Count 0x0032 100 100 020 Old_age Always - 271
# 184 End-to-End_Error 0x0032 100 100 099 Old_age Always - 0
# 187 Reported_Uncorrect 0x0032 100 100 000 Old_age Always - 0
# 188 Command_Timeout 0x0032 100 100 000 Old_age Always - 0
# 189 High_Fly_Writes 0x003a 100 100 000 Old_age Always - 0
# 190 Airflow_Temperature_Cel 0x0022 064 057 045 Old_age Always - 36 (Min/Max 24/40)
# 191 G-Sense_Error_Rate 0x0032 100 100 000 Old_age Always - 0
# 192 Power-Off_Retract_Count 0x0032 100 100 000 Old_age Always - 147
# 193 Load_Cycle_Count 0x0032 099 099 000 Old_age Always - 2690
# 194 Temperature_Celsius 0x0022 036 043 000 Old_age Always - 36 (0 11 0 0 0)
# 195 Hardware_ECC_Recovered 0x001a 110 099 000 Old_age Always - 26816470
# 197 Current_Pending_Sector 0x0012 100 100 000 Old_age Always - 0
# 198 Offline_Uncorrectable 0x0010 100 100 000 Old_age Offline - 0
# 199 UDMA_CRC_Error_Count 0x003e 200 200 000 Old_age Always - 0
#
# ===========================================================================
if "SAS" in smart_str:
self.type = "SAS"
smart_str_arr = linux.search_regex_strings(smart_str, " *(?:write:|read:|verify:).+")
for line in smart_str_arr:
tmp = line.split()
dict_tmp = {
"errorEccFast": tmp[1].strip(),
"errorEccDelayed": tmp[2].strip(),
"errorEccByRereadsRewrite": tmp[3].strip(),
"totalErrorsCorrected": tmp[4].strip(),
"correctionAlgorithmInvocations": tmp[5].strip(),
"byte10_9": tmp[6].strip(),
"totalUncorrectedError": tmp[7].strip()
}
self.smart_attr[tmp[0].replace(":", " ").strip()] = dict_tmp
smart_str_arr = linux.search_regex_strings(
self.smart,
"(?:Invalid DWORD|Running disparity|Loss of DWORD|Phy reset problem).+=.+"
)
i = 0
dict_tmp = {}
for it in smart_str_arr:
tmp = it.split("=")
dict_tmp[tmp[0].strip()] = tmp[1].strip()
if 3 == i:
self.smart_attr["channel0Error"] = dict_tmp
dict_tmp = {}
if 7 == i:
self.smart_attr["channel1Error"] = dict_tmp
dict_tmp = {}
i += 1
# fill in age
# 'data_gb' is float number
# age: {
# 'start_stop_count': '10',
# 'data_gb': '5999'
# }
if isinstance(self.smart, str) and ("start-stop" in self.smart):
self.age["start_stop_count"] = linux.search_regex_one_line_string_column(self.smart, ".+start-stop.+",
":", 1)
all_gb = float(self.smart_attr["read"]["byte10_9"]) + float(
self.smart_attr["write"]["byte10_9"]) + float(self.smart_attr["verify"]["byte10_9"])
self.age["data_gb"] = str(all_gb)
if "SATA" in smart_str:
self.type = "SATA"
dict_tmp = linux.search_regex_strings(smart_str, ".*[0-9]+.+0x.+(?:In_the_past|-|FAILING_NOW) +[0-9]+")
for line in dict_tmp:
tmp = line.split()
dict_tmp = {
"ID": tmp[0].strip(),
"FLAG": tmp[2].strip(),
"VALUE": tmp[3].strip(),
"WORST": tmp[4].strip(),
"THRESH": tmp[5].strip(),
"TYPE": tmp[6].strip(),
"UPDATED": tmp[7].strip(),
"WHEN_FAILED": tmp[8].strip(),
"RAW_VALUE": tmp[9].strip(),
}
self.smart_attr[tmp[1]] = dict_tmp
if "Start_Stop_Count" in self.smart_attr:
self.age["start_stop_count"] = self.smart_attr["Start_Stop_Count"]["RAW_VALUE"]
self.age["power_on_hours"] = self.smart_attr["Power_On_Hours"]["RAW_VALUE"]
def get_wearout_status(self):
if self.flash is True and "Media_Wearout_Indicator" in self.smart_attr:
value = self.smart_attr["Media_Wearout_Indicator"]["VALUE"]
return self.dev_name, value
else:
return None
def to_json(self):
struct = {
"dev": self.dev_name,
"model": self.model,
"fw": self.fw,
"SN": self.sn,
"type": self.type,
"vendor": self.vendor,
"smart": self.smart_attr,
"hctl": self.hctl,
"wwn": self.wwn
}
json_str = json.dumps(struct, indent=1)
return json_str
class DiskFromLsiSas2(DiskFromLsiSas3):
def __init__(self, sn, name):
super(DiskFromLsiSas2, self).__init__(sn, name)
class DiskFromChipset(DiskFromLsiSas3):
def __init__(self, sn, name):
super(DiskFromChipset, self).__init__(sn, name)
class DiskFromMegaRaid(Disk):
def __init__(self, did, name):
super(DiskFromMegaRaid, self).__init__()
self.dev_name = name
self.did = did
def fill_attrs(self):
pass | # 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# } | random_line_split |
disk.py | # coding = utf-8
import os
import linux
import re
import json
from linux import try_catch
SAS_LIMIT_COUNT = 10
SAS_LIMIT_GB = 1024
SATA_LIMIT_HOURS = 10
SSD_WEAROUT_LIMIT = 99
SATA_SMART_ERROR_LIST = [
"Reallocated_Sector_Ct",
"Spin_Retry_Count",
"End-to-End_Error",
"High_Fly_Writes",
"Current_Pending_Sector",
"UDMA_CRC_Error_Count"
]
class Disk(object):
def __init__(self):
self.model = ""
self.vendor = ""
self.fw = ""
self.sn = ""
self.wwn = ""
self.hctl = ""
self.dev_name = ""
self.smart = ""
self.type = ""
self.smart_attr = {}
self.age = {}
self.flash = False
@staticmethod
def map_disk_wwn_hctl(diskname):
""" map wwn and H:C:T:L from dev_name """
lsscsi = linux.exe_shell("lsscsi -w |grep /dev/|awk '{print$1,$3,$4}'")
for i in lsscsi.splitlines():
split_t = i.split(" ")
if diskname in split_t[2]:
return {
"hctl": split_t[0],
"wwn": split_t[1],
"dev_name": split_t[2]
}
return None
@staticmethod
def get_from_sas_disk_smart_i_str(disk_name):
return linux.exe_shell("smartctl -i /dev/%s" % disk_name)
@staticmethod
def get_from_sas_disk_simple_attr(disk_name):
smart = Disk.get_from_sas_disk_smart_i_str(disk_name)
model = linux.search_regex_one_line_string_column(smart, "(?:Device Model|Product):.+", ":", 1).strip()
sn = linux.search_regex_one_line_string_column(smart, "Serial (?:N|n)umber.+", ":", 1).strip()
vendor = linux.search_regex_one_line_string_column(smart, "(?:SATA Ver|Vendor).+", ":", 1).split()[0].strip()
return {
"name": disk_name,
"model": model,
"sn": sn,
"vendor": vendor
}
@staticmethod
def get_all_disk():
""" return all disk object list from hba and chipset. """
disks = []
disks_lines = linux.exe_shell("lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'")
for line in disks_lines.splitlines():
disk_t = line.split()
if len(disk_t) > 1 and "LSI" not in disk_t[1]:
disks.append(disk_t[0])
ds = []
for i in disks:
d_t = DiskFromLsiSas3("", i)
d_t.fill_attrs()
ds.append(d_t)
return ds
@staticmethod
def get_dev_attr_dict(dev_name):
i = DiskFromLsiSas3("", dev_name)
i.fill_attrs()
return {
"dev": i.dev_name,
"model": i.model,
"fw": i.fw,
"SN": i.sn,
"type": i.type,
"vendor": i.vendor,
"smart": i.smart_attr,
"hctl": i.hctl,
"wwn": i.wwn,
"age": i.age,
"is_ssd": str(i.flash)
}
@staticmethod
def __if_smart_err(disk_oj):
""" return True if smart info of disk_oj has error, else return False """
if "SAS" in disk_oj.smart:
|
if "SATA" in disk_oj.smart:
if "No Errors Logged" not in disk_oj.smart:
return False
for attr_ in SATA_SMART_ERROR_LIST:
if disk_oj.smart_attr[attr_]["RAW_VALUE"] > 0:
return False
return True
@staticmethod
def get_over_agelimit_disks(disk_list):
""" return sas and sata disk list witch start_stop_hours/count or data is over the limit """
over_sas_disk = []
over_sata_disk = []
for disk in disk_list:
if disk.type == "SAS":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or float(disk.age["data_gb"]) > SAS_LIMIT_GB:
over_sas_disk.append(disk)
if disk.type == "SATA":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or int(
disk.age["power_on_hours"]) > SATA_LIMIT_HOURS:
over_sata_disk.append(disk)
return over_sas_disk, over_sata_disk
@staticmethod
def get_overage_disks_json(disk_list):
""" get_overage_disks function's json model """
pass
@staticmethod
def get_err_disk_dict():
""" return disk dict has error """
err_disk_dict = {}
disks = Disk.get_all_disk()
for i in disks:
if Disk.__if_smart_err(i):
struct = {
"dev": i.dev_name,
"model": i.model,
"fw": i.fw,
"SN": i.sn,
"type": i.type,
"vendor": i.vendor,
"smart": i.smart_attr,
"hctl": i.hctl,
"wwn": i.wwn
}
err_disk_dict[i.dev_name] = struct
return err_disk_dict
@staticmethod
def get_wearout_ssd_status():
""" return ssd wearout status dict """
disks = Disk.get_all_disk()
ssd_status = {}
for i in disks:
tmp = i.get_wearout_status()
# tmp[0] is dev_name, tmp[1] is wearout %
if tmp is not None:
ssd_status[tmp[0]] = tmp[1]
if len(ssd_status) == 0:
return None
return ssd_status
class DiskFromLsiSas3(Disk):
def __init__(self, sn, name):
super(DiskFromLsiSas3, self).__init__()
self.sn = sn
self.dev_name = name
def fill_attrs(self):
smart_str = linux.exe_shell("smartctl -a /dev/%s" % self.dev_name)
smartx_str = linux.exe_shell("smartctl -x /dev/%s" % self.dev_name)
self.smart = smartx_str
try:
self.model = linux.search_regex_one_line_string_column(smart_str, "(?:Device Model|Product):.+", ":",
1).strip()
self.fw = linux.search_regex_one_line_string_column(smart_str, "(?:Firmware|Revision).+", ":", 1).strip()
self.vendor = linux.search_regex_one_line_string_column(smart_str, "(?:SATA Ver|Vendor).+", ":", 1).split()[
0].strip()
self.sn = linux.search_regex_one_line_string_column(smart_str, "Serial (?:N|n)umber.+", ":", 1).strip()
map_temp = self.map_disk_wwn_hctl(self.dev_name)
self.wwn = map_temp["wwn"] if map_temp is not None else ""
self.hctl = map_temp["hctl"] if map_temp is not None else ""
rotational = linux.read_file(os.path.join("/sys/block", self.dev_name, "queue/rotational"))
if rotational.strip() == "0":
self.flash = True
except IOError:
print("%s read_file rotational err." % self.dev_name)
except Exception:
print("disk %s is not exists." % self.dev_name)
# fill in smart_attr
# ==========================================================================
# SAS disk
# smart_attr: {
# 'channel0Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'channel1Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'read': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# 'verify': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# 'write': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# }
#
# SATA disk
# smart_attr: {
# 'Raw_Read_Error_Rate': {
# 'ID': '1',
# 'FLAG': '0x000f',
# 'VALUE': '074',
# 'WORST': '063',
# 'THRESH': '044',
# 'TYPE': 'Pre-fail',
# 'UPDATED': 'Always',
# 'WHEN_FAILED': '-',
# 'RAW_VALUE': '26816470'
# }
# 'Spin_Up_Time': {
# ...(According to the following form)
# }
# }
# SATA smart form:
# ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE
# 1 Raw_Read_Error_Rate 0x000f 074 063 044 Pre-fail Always - 26816470
# 3 Spin_Up_Time 0x0003 094 094 000 Pre-fail Always - 0
# 4 Start_Stop_Count 0x0032 100 100 020 Old_age Always - 314
# 5 Reallocated_Sector_Ct 0x0033 100 100 036 Pre-fail Always - 1
# 7 Seek_Error_Rate 0x000f 073 060 030 Pre-fail Always - 21595176
# 9 Power_On_Hours 0x0032 096 096 000 Old_age Always - 3851
# 10 Spin_Retry_Count 0x0013 100 100 097 Pre-fail Always - 0
# 12 Power_Cycle_Count 0x0032 100 100 020 Old_age Always - 271
# 184 End-to-End_Error 0x0032 100 100 099 Old_age Always - 0
# 187 Reported_Uncorrect 0x0032 100 100 000 Old_age Always - 0
# 188 Command_Timeout 0x0032 100 100 000 Old_age Always - 0
# 189 High_Fly_Writes 0x003a 100 100 000 Old_age Always - 0
# 190 Airflow_Temperature_Cel 0x0022 064 057 045 Old_age Always - 36 (Min/Max 24/40)
# 191 G-Sense_Error_Rate 0x0032 100 100 000 Old_age Always - 0
# 192 Power-Off_Retract_Count 0x0032 100 100 000 Old_age Always - 147
# 193 Load_Cycle_Count 0x0032 099 099 000 Old_age Always - 2690
# 194 Temperature_Celsius 0x0022 036 043 000 Old_age Always - 36 (0 11 0 0 0)
# 195 Hardware_ECC_Recovered 0x001a 110 099 000 Old_age Always - 26816470
# 197 Current_Pending_Sector 0x0012 100 100 000 Old_age Always - 0
# 198 Offline_Uncorrectable 0x0010 100 100 000 Old_age Offline - 0
# 199 UDMA_CRC_Error_Count 0x003e 200 200 000 Old_age Always - 0
#
# ===========================================================================
if "SAS" in smart_str:
self.type = "SAS"
smart_str_arr = linux.search_regex_strings(smart_str, " *(?:write:|read:|verify:).+")
for line in smart_str_arr:
tmp = line.split()
dict_tmp = {
"errorEccFast": tmp[1].strip(),
"errorEccDelayed": tmp[2].strip(),
"errorEccByRereadsRewrite": tmp[3].strip(),
"totalErrorsCorrected": tmp[4].strip(),
"correctionAlgorithmInvocations": tmp[5].strip(),
"byte10_9": tmp[6].strip(),
"totalUncorrectedError": tmp[7].strip()
}
self.smart_attr[tmp[0].replace(":", " ").strip()] = dict_tmp
smart_str_arr = linux.search_regex_strings(
self.smart,
"(?:Invalid DWORD|Running disparity|Loss of DWORD|Phy reset problem).+=.+"
)
i = 0
dict_tmp = {}
for it in smart_str_arr:
tmp = it.split("=")
dict_tmp[tmp[0].strip()] = tmp[1].strip()
if 3 == i:
self.smart_attr["channel0Error"] = dict_tmp
dict_tmp = {}
if 7 == i:
self.smart_attr["channel1Error"] = dict_tmp
dict_tmp = {}
i += 1
# fill in age
# 'data_gb' is float number
# age: {
# 'start_stop_count': '10',
# 'data_gb': '5999'
# }
if isinstance(self.smart, str) and ("start-stop" in self.smart):
self.age["start_stop_count"] = linux.search_regex_one_line_string_column(self.smart, ".+start-stop.+",
":", 1)
all_gb = float(self.smart_attr["read"]["byte10_9"]) + float(
self.smart_attr["write"]["byte10_9"]) + float(self.smart_attr["verify"]["byte10_9"])
self.age["data_gb"] = str(all_gb)
if "SATA" in smart_str:
self.type = "SATA"
dict_tmp = linux.search_regex_strings(smart_str, ".*[0-9]+.+0x.+(?:In_the_past|-|FAILING_NOW) +[0-9]+")
for line in dict_tmp:
tmp = line.split()
dict_tmp = {
"ID": tmp[0].strip(),
"FLAG": tmp[2].strip(),
"VALUE": tmp[3].strip(),
"WORST": tmp[4].strip(),
"THRESH": tmp[5].strip(),
"TYPE": tmp[6].strip(),
"UPDATED": tmp[7].strip(),
"WHEN_FAILED": tmp[8].strip(),
"RAW_VALUE": tmp[9].strip(),
}
self.smart_attr[tmp[1]] = dict_tmp
if "Start_Stop_Count" in self.smart_attr:
self.age["start_stop_count"] = self.smart_attr["Start_Stop_Count"]["RAW_VALUE"]
self.age["power_on_hours"] = self.smart_attr["Power_On_Hours"]["RAW_VALUE"]
def get_wearout_status(self):
if self.flash is True and "Media_Wearout_Indicator" in self.smart_attr:
value = self.smart_attr["Media_Wearout_Indicator"]["VALUE"]
return self.dev_name, value
else:
return None
def to_json(self):
struct = {
"dev": self.dev_name,
"model": self.model,
"fw": self.fw,
"SN": self.sn,
"type": self.type,
"vendor": self.vendor,
"smart": self.smart_attr,
"hctl": self.hctl,
"wwn": self.wwn
}
json_str = json.dumps(struct, indent=1)
return json_str
class DiskFromLsiSas2(DiskFromLsiSas3):
def __init__(self, sn, name):
super(DiskFromLsiSas2, self).__init__(sn, name)
class DiskFromChipset(DiskFromLsiSas3):
def __init__(self, sn, name):
super(DiskFromChipset, self).__init__(sn, name)
class DiskFromMegaRaid(Disk):
def __init__(self, did, name):
super(DiskFromMegaRaid, self).__init__()
self.dev_name = name
self.did = did
def fill_attrs(self):
pass
| if int(disk_oj.smart_attr["channel0Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Phy reset problem"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Phy reset problem"]) > 0:
return True
else:
return False | conditional_block |
load_drives.py | from __future__ import print_function
import os
import sys
import pickle
import time
import argparse
import numpy as np
import sklearn
from keras.utils import to_categorical
from keras.utils import Sequence
# https://keras.io/utils/#sequence
import malpiOptions
from load_aux import getAuxFromMeta, loadOneAux
# For python2/3 compatibility when calling isinstance(x,basestring)
# From: https://stackoverflow.com/questions/11301138/how-to-check-if-variable-is-string-with-python-2-and-3-compatibility
try:
basestring
except NameError:
basestring = str
def normalize_images( images, default=True ):
if default:
rmean = 92.93206363205326
gmean = 85.80540021330793
bmean = 54.14884297660608
rstd = 57.696159704394354
gstd = 53.739380109203445
bstd = 47.66536771313241
#print( "Default normalization" )
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
else:
rmean = np.mean(images[:,:,:,0])
gmean= np.mean(images[:,:,:,1])
bmean= np.mean(images[:,:,:,2])
rstd = np.std(images[:,:,:,0])
gstd = np.std(images[:,:,:,1])
bstd = np.std(images[:,:,:,2])
print( "Image means: {}/{}/{}".format( rmean, gmean, bmean ) )
print( "Image stds: {}/{}/{}".format( rstd, gstd, bstd ) )
# should only do this for the training data, not val/test, but I'm not sure how to do that when Keras makes the train/val split
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
def embedActions( actions ):
embedding = { "stop":0, "forward":1, "left":2, "right":3, "backward":4 }
emb = []
prev_act = 0
for act in actions:
try:
if not act.startswith("speed"):
prev_act = embedding[act]
if prev_act is None:
print( "Invalid action: {}".format( act ) )
raise ValueError("Invalid action: " + str(act) )
emb.append( embedding[act] )
else:
emb.append( prev_act )
except Exception as ex:
print( ex )
print( act )
return emb
class | (Sequence):
""" Loads MaLPi drive data
From: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly.html"""
def __init__(self, filelist, image_size=(120,120), batch_size=32, shuffle=True, max_load=30000, auxName=None, images_only=False ):
""" Input a list of drive directories.
Pre-load each to count number of samples.
load one file and use it to generate batches until we run out.
load the next file, repeat
Re-shuffle on each epoch end
"""
'Initialization'
self.files = filelist
self.size = image_size
self.batch_size = batch_size
self.shuffle = shuffle
self.max_load = max_load
self.auxName = auxName
self.images_only = images_only
self.image_norm = False
self.next_dir_index = 0
self.images = []
self.actions = []
self.current_start = 0
self.categorical = None
self.input_dim = None
self.num_actions = None
self.batch_shape = (self.batch_size,) + self.size + (3,)
print( "Batch shape: {}".format( self.batch_shape ) )
self.count = self.__count()
self.on_epoch_end()
def __len__(self):
'The number of batches per epoch'
return int(np.floor(self.count / self.batch_size))
def __getitem__(self, index):
sample_beg = index * self.batch_size
sample_beg -= self.current_start
sample_end = sample_beg + self.batch_size
#print( "getitem {} {}:{} {}".format( index, sample_beg, sample_end, self.current_start ) )
prev_len = len(self.images)
if (sample_beg < len(self.images)) and (sample_end < len(self.images)):
images = self.images[sample_beg:sample_end]
actions = self.actions[sample_beg:sample_end]
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (1): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
if sample_beg <= len(self.images):
images = self.images[sample_beg:]
actions = self.actions[sample_beg:]
#sample_end = len(self.images) - sample_beg
sample_end = self.batch_size - len(images)
self.images, self.actions = self.__load_next_max()
try:
i2 = self.images[0:sample_end]
images = np.append(images,i2,axis=0)
except Exception as ex:
print( ex )
print( "images {} {}".format( images.shape, i2.shape ) )
print( "{}".format( images ) )
try:
a2 = self.actions[0:sample_end]
actions = np.append(actions,a2,axis=0)
except Exception as ex:
print( ex )
print( "actions {} {}".format( actions.shape, a2.shape ) )
print( "{}".format( actions ) )
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (2): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
print( "Invalid batch indexes: {} {}:{}".format( index, sample_beg, sample_end ) )
def __load_next_max(self):
self.current_start += len(self.images)
images = []
actions = []
while len(images) <= self.max_load and self.next_dir_index < len(self.files):
fname = self.files[self.next_dir_index]
dimages, dactions = self.loadOneDrive( fname )
images.extend(dimages)
actions.extend(dactions)
self.next_dir_index += 1
if self.shuffle == True:
if self.images_only:
images = sklearn.utils.shuffle(images)
else:
images, actions = sklearn.utils.shuffle(images,actions)
images = np.array(images)
actions = np.array(actions)
if self.image_norm:
normalize_images(images)
return images, actions
def loadOneDrive( self, drive_dir, count_only=False ):
actions = []
if not self.images_only:
if self.auxName is not None:
aux = getAuxFromMeta( drive_dir, self.auxName )
if aux is not None:
actions = loadOneAux( drive_dir, aux )
if len(actions) == 0:
actions_file = os.path.join( drive_dir, "image_actions.npy" )
if os.path.exists(actions_file):
actions = np.load(actions_file)
else:
actions_file = os.path.join( drive_dir, "image_actions.pickle" )
with open(actions_file,'r') as f:
actions = pickle.load(f)
if len(actions) > 0:
categorical = True
if isinstance(actions[0], basestring):
actions = embedActions( actions )
actions = to_categorical( actions, num_classes=5 )
categorical = True
elif type(actions) == list:
actions = np.array(actions).astype('float')
categorical = False
elif type(actions) == np.ndarray:
actions = np.array(actions).astype('float')
categorical = False
else:
print("Unknown actions format: {} {} as {}".format( type(actions), actions[0], type(actions[0]) ))
if self.categorical is None:
self.categorical = categorical
elif self.categorical != categorical:
print( "Mixed cat/non-cat action space: {}".format( drive_dir ) )
# Need an option for this
#if not self.categorical:
# actions = self.addActionDiff(actions)
if self.num_actions is None:
self.num_actions = len(actions[0])
if count_only:
return len(actions)
basename = "images_{}x{}".format( self.size[0], self.size[1] )
im_file = os.path.join( drive_dir, basename+".npy" )
if os.path.exists(im_file):
images = np.load(im_file)
else:
im_file = os.path.join( drive_dir, basename+".pickle" )
with open(im_file,'r') as f:
images = pickle.load(f)
if count_only:
return len(images)
if not self.images_only and (len(images) != len(actions)):
print( "Data mismatch: {}".format( drive_dir ) )
print( " images: {}".format( images.shape ) )
print( " actions: {}".format( actions.shape ) )
return images, actions
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
np.random.shuffle(self.files)
self.images = []
self.actions = []
self.next_dir_index = 0
self.current_start = 0
self.images, self.actions = self.__load_next_max()
def __count(self):
count = 0
for onefile in self.files:
count += self.loadOneDrive( onefile, count_only=True)
return count
def addActionDiff(self, actions):
diff = actions[:,0] - actions[:,1]
#diff *= 10.0
diff = np.reshape( diff, (diff.shape[0], 1) )
actions = np.hstack( (actions, diff) )
return actions
def runTests(args):
pass
def getOptions():
parser = argparse.ArgumentParser(description='Test data loader.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
malpiOptions.addMalpiOptions( parser )
args = parser.parse_args()
malpiOptions.preprocessOptions(args)
return args
if __name__ == "__main__":
args = getOptions()
if args.test_only:
runTests(args)
exit()
images_only = True
gen = DriveDataGenerator(args.dirs, batch_size=100, shuffle=True, max_load=2000, image_size=(64,64), images_only=images_only )
print( "# samples: {}".format( gen.count ) )
print( "# batches: {}".format( len(gen) ) )
if not images_only:
_, actions = gen[0]
print( "Actions: {}".format( actions.shape ) )
print( " mean: {}".format( np.mean(actions, axis=0) ) )
print( " stdev: {}".format( np.std(actions, axis=0) ) )
for epoch in range(5):
for i in range(len(gen)):
images = gen[i]
print( "Batch {}: {}".format( i, images.shape ), end='\r' )
#print( "action[0]: {}".format( actions[0] ) )
sys.stdout.flush()
time.sleep(0.05)
print("")
print( "End epoch {}".format( epoch ) )
gen.on_epoch_end()
print("")
| DriveDataGenerator | identifier_name |
load_drives.py | from __future__ import print_function
import os
import sys
import pickle
import time
import argparse
import numpy as np
import sklearn
from keras.utils import to_categorical
from keras.utils import Sequence
# https://keras.io/utils/#sequence
import malpiOptions
from load_aux import getAuxFromMeta, loadOneAux
# For python2/3 compatibility when calling isinstance(x,basestring)
# From: https://stackoverflow.com/questions/11301138/how-to-check-if-variable-is-string-with-python-2-and-3-compatibility
try:
basestring
except NameError:
basestring = str
def normalize_images( images, default=True ):
if default:
rmean = 92.93206363205326
gmean = 85.80540021330793
bmean = 54.14884297660608
rstd = 57.696159704394354
gstd = 53.739380109203445
bstd = 47.66536771313241
#print( "Default normalization" )
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
else:
rmean = np.mean(images[:,:,:,0])
gmean= np.mean(images[:,:,:,1])
bmean= np.mean(images[:,:,:,2])
rstd = np.std(images[:,:,:,0])
gstd = np.std(images[:,:,:,1])
bstd = np.std(images[:,:,:,2])
print( "Image means: {}/{}/{}".format( rmean, gmean, bmean ) )
print( "Image stds: {}/{}/{}".format( rstd, gstd, bstd ) )
# should only do this for the training data, not val/test, but I'm not sure how to do that when Keras makes the train/val split
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
def embedActions( actions ):
embedding = { "stop":0, "forward":1, "left":2, "right":3, "backward":4 }
emb = []
prev_act = 0
for act in actions:
try:
if not act.startswith("speed"):
prev_act = embedding[act]
if prev_act is None:
print( "Invalid action: {}".format( act ) )
raise ValueError("Invalid action: " + str(act) )
emb.append( embedding[act] )
else:
emb.append( prev_act )
except Exception as ex:
print( ex )
print( act )
return emb
class DriveDataGenerator(Sequence):
|
def runTests(args):
pass
def getOptions():
parser = argparse.ArgumentParser(description='Test data loader.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
malpiOptions.addMalpiOptions( parser )
args = parser.parse_args()
malpiOptions.preprocessOptions(args)
return args
if __name__ == "__main__":
args = getOptions()
if args.test_only:
runTests(args)
exit()
images_only = True
gen = DriveDataGenerator(args.dirs, batch_size=100, shuffle=True, max_load=2000, image_size=(64,64), images_only=images_only )
print( "# samples: {}".format( gen.count ) )
print( "# batches: {}".format( len(gen) ) )
if not images_only:
_, actions = gen[0]
print( "Actions: {}".format( actions.shape ) )
print( " mean: {}".format( np.mean(actions, axis=0) ) )
print( " stdev: {}".format( np.std(actions, axis=0) ) )
for epoch in range(5):
for i in range(len(gen)):
images = gen[i]
print( "Batch {}: {}".format( i, images.shape ), end='\r' )
#print( "action[0]: {}".format( actions[0] ) )
sys.stdout.flush()
time.sleep(0.05)
print("")
print( "End epoch {}".format( epoch ) )
gen.on_epoch_end()
print("")
| """ Loads MaLPi drive data
From: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly.html"""
def __init__(self, filelist, image_size=(120,120), batch_size=32, shuffle=True, max_load=30000, auxName=None, images_only=False ):
""" Input a list of drive directories.
Pre-load each to count number of samples.
load one file and use it to generate batches until we run out.
load the next file, repeat
Re-shuffle on each epoch end
"""
'Initialization'
self.files = filelist
self.size = image_size
self.batch_size = batch_size
self.shuffle = shuffle
self.max_load = max_load
self.auxName = auxName
self.images_only = images_only
self.image_norm = False
self.next_dir_index = 0
self.images = []
self.actions = []
self.current_start = 0
self.categorical = None
self.input_dim = None
self.num_actions = None
self.batch_shape = (self.batch_size,) + self.size + (3,)
print( "Batch shape: {}".format( self.batch_shape ) )
self.count = self.__count()
self.on_epoch_end()
def __len__(self):
'The number of batches per epoch'
return int(np.floor(self.count / self.batch_size))
def __getitem__(self, index):
sample_beg = index * self.batch_size
sample_beg -= self.current_start
sample_end = sample_beg + self.batch_size
#print( "getitem {} {}:{} {}".format( index, sample_beg, sample_end, self.current_start ) )
prev_len = len(self.images)
if (sample_beg < len(self.images)) and (sample_end < len(self.images)):
images = self.images[sample_beg:sample_end]
actions = self.actions[sample_beg:sample_end]
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (1): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
if sample_beg <= len(self.images):
images = self.images[sample_beg:]
actions = self.actions[sample_beg:]
#sample_end = len(self.images) - sample_beg
sample_end = self.batch_size - len(images)
self.images, self.actions = self.__load_next_max()
try:
i2 = self.images[0:sample_end]
images = np.append(images,i2,axis=0)
except Exception as ex:
print( ex )
print( "images {} {}".format( images.shape, i2.shape ) )
print( "{}".format( images ) )
try:
a2 = self.actions[0:sample_end]
actions = np.append(actions,a2,axis=0)
except Exception as ex:
print( ex )
print( "actions {} {}".format( actions.shape, a2.shape ) )
print( "{}".format( actions ) )
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (2): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
print( "Invalid batch indexes: {} {}:{}".format( index, sample_beg, sample_end ) )
def __load_next_max(self):
self.current_start += len(self.images)
images = []
actions = []
while len(images) <= self.max_load and self.next_dir_index < len(self.files):
fname = self.files[self.next_dir_index]
dimages, dactions = self.loadOneDrive( fname )
images.extend(dimages)
actions.extend(dactions)
self.next_dir_index += 1
if self.shuffle == True:
if self.images_only:
images = sklearn.utils.shuffle(images)
else:
images, actions = sklearn.utils.shuffle(images,actions)
images = np.array(images)
actions = np.array(actions)
if self.image_norm:
normalize_images(images)
return images, actions
def loadOneDrive( self, drive_dir, count_only=False ):
actions = []
if not self.images_only:
if self.auxName is not None:
aux = getAuxFromMeta( drive_dir, self.auxName )
if aux is not None:
actions = loadOneAux( drive_dir, aux )
if len(actions) == 0:
actions_file = os.path.join( drive_dir, "image_actions.npy" )
if os.path.exists(actions_file):
actions = np.load(actions_file)
else:
actions_file = os.path.join( drive_dir, "image_actions.pickle" )
with open(actions_file,'r') as f:
actions = pickle.load(f)
if len(actions) > 0:
categorical = True
if isinstance(actions[0], basestring):
actions = embedActions( actions )
actions = to_categorical( actions, num_classes=5 )
categorical = True
elif type(actions) == list:
actions = np.array(actions).astype('float')
categorical = False
elif type(actions) == np.ndarray:
actions = np.array(actions).astype('float')
categorical = False
else:
print("Unknown actions format: {} {} as {}".format( type(actions), actions[0], type(actions[0]) ))
if self.categorical is None:
self.categorical = categorical
elif self.categorical != categorical:
print( "Mixed cat/non-cat action space: {}".format( drive_dir ) )
# Need an option for this
#if not self.categorical:
# actions = self.addActionDiff(actions)
if self.num_actions is None:
self.num_actions = len(actions[0])
if count_only:
return len(actions)
basename = "images_{}x{}".format( self.size[0], self.size[1] )
im_file = os.path.join( drive_dir, basename+".npy" )
if os.path.exists(im_file):
images = np.load(im_file)
else:
im_file = os.path.join( drive_dir, basename+".pickle" )
with open(im_file,'r') as f:
images = pickle.load(f)
if count_only:
return len(images)
if not self.images_only and (len(images) != len(actions)):
print( "Data mismatch: {}".format( drive_dir ) )
print( " images: {}".format( images.shape ) )
print( " actions: {}".format( actions.shape ) )
return images, actions
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
np.random.shuffle(self.files)
self.images = []
self.actions = []
self.next_dir_index = 0
self.current_start = 0
self.images, self.actions = self.__load_next_max()
def __count(self):
count = 0
for onefile in self.files:
count += self.loadOneDrive( onefile, count_only=True)
return count
def addActionDiff(self, actions):
diff = actions[:,0] - actions[:,1]
#diff *= 10.0
diff = np.reshape( diff, (diff.shape[0], 1) )
actions = np.hstack( (actions, diff) )
return actions | identifier_body |
load_drives.py | from __future__ import print_function
import os
import sys
import pickle
import time
import argparse
import numpy as np
import sklearn
from keras.utils import to_categorical
from keras.utils import Sequence
# https://keras.io/utils/#sequence
import malpiOptions
from load_aux import getAuxFromMeta, loadOneAux
# For python2/3 compatibility when calling isinstance(x,basestring)
# From: https://stackoverflow.com/questions/11301138/how-to-check-if-variable-is-string-with-python-2-and-3-compatibility
try:
basestring
except NameError:
basestring = str
def normalize_images( images, default=True ):
if default:
rmean = 92.93206363205326
gmean = 85.80540021330793
bmean = 54.14884297660608
rstd = 57.696159704394354
gstd = 53.739380109203445
bstd = 47.66536771313241
#print( "Default normalization" )
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
else:
rmean = np.mean(images[:,:,:,0])
gmean= np.mean(images[:,:,:,1])
bmean= np.mean(images[:,:,:,2])
rstd = np.std(images[:,:,:,0])
gstd = np.std(images[:,:,:,1])
bstd = np.std(images[:,:,:,2])
print( "Image means: {}/{}/{}".format( rmean, gmean, bmean ) )
print( "Image stds: {}/{}/{}".format( rstd, gstd, bstd ) )
# should only do this for the training data, not val/test, but I'm not sure how to do that when Keras makes the train/val split
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
def embedActions( actions ):
embedding = { "stop":0, "forward":1, "left":2, "right":3, "backward":4 }
emb = []
prev_act = 0
for act in actions:
try:
if not act.startswith("speed"):
|
else:
emb.append( prev_act )
except Exception as ex:
print( ex )
print( act )
return emb
class DriveDataGenerator(Sequence):
""" Loads MaLPi drive data
From: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly.html"""
def __init__(self, filelist, image_size=(120,120), batch_size=32, shuffle=True, max_load=30000, auxName=None, images_only=False ):
""" Input a list of drive directories.
Pre-load each to count number of samples.
load one file and use it to generate batches until we run out.
load the next file, repeat
Re-shuffle on each epoch end
"""
'Initialization'
self.files = filelist
self.size = image_size
self.batch_size = batch_size
self.shuffle = shuffle
self.max_load = max_load
self.auxName = auxName
self.images_only = images_only
self.image_norm = False
self.next_dir_index = 0
self.images = []
self.actions = []
self.current_start = 0
self.categorical = None
self.input_dim = None
self.num_actions = None
self.batch_shape = (self.batch_size,) + self.size + (3,)
print( "Batch shape: {}".format( self.batch_shape ) )
self.count = self.__count()
self.on_epoch_end()
def __len__(self):
'The number of batches per epoch'
return int(np.floor(self.count / self.batch_size))
def __getitem__(self, index):
sample_beg = index * self.batch_size
sample_beg -= self.current_start
sample_end = sample_beg + self.batch_size
#print( "getitem {} {}:{} {}".format( index, sample_beg, sample_end, self.current_start ) )
prev_len = len(self.images)
if (sample_beg < len(self.images)) and (sample_end < len(self.images)):
images = self.images[sample_beg:sample_end]
actions = self.actions[sample_beg:sample_end]
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (1): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
if sample_beg <= len(self.images):
images = self.images[sample_beg:]
actions = self.actions[sample_beg:]
#sample_end = len(self.images) - sample_beg
sample_end = self.batch_size - len(images)
self.images, self.actions = self.__load_next_max()
try:
i2 = self.images[0:sample_end]
images = np.append(images,i2,axis=0)
except Exception as ex:
print( ex )
print( "images {} {}".format( images.shape, i2.shape ) )
print( "{}".format( images ) )
try:
a2 = self.actions[0:sample_end]
actions = np.append(actions,a2,axis=0)
except Exception as ex:
print( ex )
print( "actions {} {}".format( actions.shape, a2.shape ) )
print( "{}".format( actions ) )
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (2): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
print( "Invalid batch indexes: {} {}:{}".format( index, sample_beg, sample_end ) )
def __load_next_max(self):
self.current_start += len(self.images)
images = []
actions = []
while len(images) <= self.max_load and self.next_dir_index < len(self.files):
fname = self.files[self.next_dir_index]
dimages, dactions = self.loadOneDrive( fname )
images.extend(dimages)
actions.extend(dactions)
self.next_dir_index += 1
if self.shuffle == True:
if self.images_only:
images = sklearn.utils.shuffle(images)
else:
images, actions = sklearn.utils.shuffle(images,actions)
images = np.array(images)
actions = np.array(actions)
if self.image_norm:
normalize_images(images)
return images, actions
def loadOneDrive( self, drive_dir, count_only=False ):
actions = []
if not self.images_only:
if self.auxName is not None:
aux = getAuxFromMeta( drive_dir, self.auxName )
if aux is not None:
actions = loadOneAux( drive_dir, aux )
if len(actions) == 0:
actions_file = os.path.join( drive_dir, "image_actions.npy" )
if os.path.exists(actions_file):
actions = np.load(actions_file)
else:
actions_file = os.path.join( drive_dir, "image_actions.pickle" )
with open(actions_file,'r') as f:
actions = pickle.load(f)
if len(actions) > 0:
categorical = True
if isinstance(actions[0], basestring):
actions = embedActions( actions )
actions = to_categorical( actions, num_classes=5 )
categorical = True
elif type(actions) == list:
actions = np.array(actions).astype('float')
categorical = False
elif type(actions) == np.ndarray:
actions = np.array(actions).astype('float')
categorical = False
else:
print("Unknown actions format: {} {} as {}".format( type(actions), actions[0], type(actions[0]) ))
if self.categorical is None:
self.categorical = categorical
elif self.categorical != categorical:
print( "Mixed cat/non-cat action space: {}".format( drive_dir ) )
# Need an option for this
#if not self.categorical:
# actions = self.addActionDiff(actions)
if self.num_actions is None:
self.num_actions = len(actions[0])
if count_only:
return len(actions)
basename = "images_{}x{}".format( self.size[0], self.size[1] )
im_file = os.path.join( drive_dir, basename+".npy" )
if os.path.exists(im_file):
images = np.load(im_file)
else:
im_file = os.path.join( drive_dir, basename+".pickle" )
with open(im_file,'r') as f:
images = pickle.load(f)
if count_only:
return len(images)
if not self.images_only and (len(images) != len(actions)):
print( "Data mismatch: {}".format( drive_dir ) )
print( " images: {}".format( images.shape ) )
print( " actions: {}".format( actions.shape ) )
return images, actions
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
np.random.shuffle(self.files)
self.images = []
self.actions = []
self.next_dir_index = 0
self.current_start = 0
self.images, self.actions = self.__load_next_max()
def __count(self):
count = 0
for onefile in self.files:
count += self.loadOneDrive( onefile, count_only=True)
return count
def addActionDiff(self, actions):
diff = actions[:,0] - actions[:,1]
#diff *= 10.0
diff = np.reshape( diff, (diff.shape[0], 1) )
actions = np.hstack( (actions, diff) )
return actions
def runTests(args):
pass
def getOptions():
parser = argparse.ArgumentParser(description='Test data loader.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
malpiOptions.addMalpiOptions( parser )
args = parser.parse_args()
malpiOptions.preprocessOptions(args)
return args
if __name__ == "__main__":
args = getOptions()
if args.test_only:
runTests(args)
exit()
images_only = True
gen = DriveDataGenerator(args.dirs, batch_size=100, shuffle=True, max_load=2000, image_size=(64,64), images_only=images_only )
print( "# samples: {}".format( gen.count ) )
print( "# batches: {}".format( len(gen) ) )
if not images_only:
_, actions = gen[0]
print( "Actions: {}".format( actions.shape ) )
print( " mean: {}".format( np.mean(actions, axis=0) ) )
print( " stdev: {}".format( np.std(actions, axis=0) ) )
for epoch in range(5):
for i in range(len(gen)):
images = gen[i]
print( "Batch {}: {}".format( i, images.shape ), end='\r' )
#print( "action[0]: {}".format( actions[0] ) )
sys.stdout.flush()
time.sleep(0.05)
print("")
print( "End epoch {}".format( epoch ) )
gen.on_epoch_end()
print("")
| prev_act = embedding[act]
if prev_act is None:
print( "Invalid action: {}".format( act ) )
raise ValueError("Invalid action: " + str(act) )
emb.append( embedding[act] ) | conditional_block |
load_drives.py | from __future__ import print_function
import os
import sys
import pickle
import time
import argparse
import numpy as np
import sklearn
from keras.utils import to_categorical
from keras.utils import Sequence
# https://keras.io/utils/#sequence
import malpiOptions
from load_aux import getAuxFromMeta, loadOneAux
# For python2/3 compatibility when calling isinstance(x,basestring)
# From: https://stackoverflow.com/questions/11301138/how-to-check-if-variable-is-string-with-python-2-and-3-compatibility
try:
basestring
except NameError:
basestring = str
def normalize_images( images, default=True ):
if default:
rmean = 92.93206363205326
gmean = 85.80540021330793
bmean = 54.14884297660608
rstd = 57.696159704394354
gstd = 53.739380109203445
bstd = 47.66536771313241
#print( "Default normalization" )
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
else:
rmean = np.mean(images[:,:,:,0])
gmean= np.mean(images[:,:,:,1])
bmean= np.mean(images[:,:,:,2])
rstd = np.std(images[:,:,:,0])
gstd = np.std(images[:,:,:,1])
bstd = np.std(images[:,:,:,2])
print( "Image means: {}/{}/{}".format( rmean, gmean, bmean ) )
print( "Image stds: {}/{}/{}".format( rstd, gstd, bstd ) )
# should only do this for the training data, not val/test, but I'm not sure how to do that when Keras makes the train/val split
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd |
def embedActions( actions ):
embedding = { "stop":0, "forward":1, "left":2, "right":3, "backward":4 }
emb = []
prev_act = 0
for act in actions:
try:
if not act.startswith("speed"):
prev_act = embedding[act]
if prev_act is None:
print( "Invalid action: {}".format( act ) )
raise ValueError("Invalid action: " + str(act) )
emb.append( embedding[act] )
else:
emb.append( prev_act )
except Exception as ex:
print( ex )
print( act )
return emb
class DriveDataGenerator(Sequence):
""" Loads MaLPi drive data
From: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly.html"""
def __init__(self, filelist, image_size=(120,120), batch_size=32, shuffle=True, max_load=30000, auxName=None, images_only=False ):
""" Input a list of drive directories.
Pre-load each to count number of samples.
load one file and use it to generate batches until we run out.
load the next file, repeat
Re-shuffle on each epoch end
"""
'Initialization'
self.files = filelist
self.size = image_size
self.batch_size = batch_size
self.shuffle = shuffle
self.max_load = max_load
self.auxName = auxName
self.images_only = images_only
self.image_norm = False
self.next_dir_index = 0
self.images = []
self.actions = []
self.current_start = 0
self.categorical = None
self.input_dim = None
self.num_actions = None
self.batch_shape = (self.batch_size,) + self.size + (3,)
print( "Batch shape: {}".format( self.batch_shape ) )
self.count = self.__count()
self.on_epoch_end()
def __len__(self):
'The number of batches per epoch'
return int(np.floor(self.count / self.batch_size))
def __getitem__(self, index):
sample_beg = index * self.batch_size
sample_beg -= self.current_start
sample_end = sample_beg + self.batch_size
#print( "getitem {} {}:{} {}".format( index, sample_beg, sample_end, self.current_start ) )
prev_len = len(self.images)
if (sample_beg < len(self.images)) and (sample_end < len(self.images)):
images = self.images[sample_beg:sample_end]
actions = self.actions[sample_beg:sample_end]
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (1): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
if sample_beg <= len(self.images):
images = self.images[sample_beg:]
actions = self.actions[sample_beg:]
#sample_end = len(self.images) - sample_beg
sample_end = self.batch_size - len(images)
self.images, self.actions = self.__load_next_max()
try:
i2 = self.images[0:sample_end]
images = np.append(images,i2,axis=0)
except Exception as ex:
print( ex )
print( "images {} {}".format( images.shape, i2.shape ) )
print( "{}".format( images ) )
try:
a2 = self.actions[0:sample_end]
actions = np.append(actions,a2,axis=0)
except Exception as ex:
print( ex )
print( "actions {} {}".format( actions.shape, a2.shape ) )
print( "{}".format( actions ) )
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (2): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
print( "Invalid batch indexes: {} {}:{}".format( index, sample_beg, sample_end ) )
def __load_next_max(self):
self.current_start += len(self.images)
images = []
actions = []
while len(images) <= self.max_load and self.next_dir_index < len(self.files):
fname = self.files[self.next_dir_index]
dimages, dactions = self.loadOneDrive( fname )
images.extend(dimages)
actions.extend(dactions)
self.next_dir_index += 1
if self.shuffle == True:
if self.images_only:
images = sklearn.utils.shuffle(images)
else:
images, actions = sklearn.utils.shuffle(images,actions)
images = np.array(images)
actions = np.array(actions)
if self.image_norm:
normalize_images(images)
return images, actions
def loadOneDrive( self, drive_dir, count_only=False ):
actions = []
if not self.images_only:
if self.auxName is not None:
aux = getAuxFromMeta( drive_dir, self.auxName )
if aux is not None:
actions = loadOneAux( drive_dir, aux )
if len(actions) == 0:
actions_file = os.path.join( drive_dir, "image_actions.npy" )
if os.path.exists(actions_file):
actions = np.load(actions_file)
else:
actions_file = os.path.join( drive_dir, "image_actions.pickle" )
with open(actions_file,'r') as f:
actions = pickle.load(f)
if len(actions) > 0:
categorical = True
if isinstance(actions[0], basestring):
actions = embedActions( actions )
actions = to_categorical( actions, num_classes=5 )
categorical = True
elif type(actions) == list:
actions = np.array(actions).astype('float')
categorical = False
elif type(actions) == np.ndarray:
actions = np.array(actions).astype('float')
categorical = False
else:
print("Unknown actions format: {} {} as {}".format( type(actions), actions[0], type(actions[0]) ))
if self.categorical is None:
self.categorical = categorical
elif self.categorical != categorical:
print( "Mixed cat/non-cat action space: {}".format( drive_dir ) )
# Need an option for this
#if not self.categorical:
# actions = self.addActionDiff(actions)
if self.num_actions is None:
self.num_actions = len(actions[0])
if count_only:
return len(actions)
basename = "images_{}x{}".format( self.size[0], self.size[1] )
im_file = os.path.join( drive_dir, basename+".npy" )
if os.path.exists(im_file):
images = np.load(im_file)
else:
im_file = os.path.join( drive_dir, basename+".pickle" )
with open(im_file,'r') as f:
images = pickle.load(f)
if count_only:
return len(images)
if not self.images_only and (len(images) != len(actions)):
print( "Data mismatch: {}".format( drive_dir ) )
print( " images: {}".format( images.shape ) )
print( " actions: {}".format( actions.shape ) )
return images, actions
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
np.random.shuffle(self.files)
self.images = []
self.actions = []
self.next_dir_index = 0
self.current_start = 0
self.images, self.actions = self.__load_next_max()
def __count(self):
count = 0
for onefile in self.files:
count += self.loadOneDrive( onefile, count_only=True)
return count
def addActionDiff(self, actions):
diff = actions[:,0] - actions[:,1]
#diff *= 10.0
diff = np.reshape( diff, (diff.shape[0], 1) )
actions = np.hstack( (actions, diff) )
return actions
def runTests(args):
pass
def getOptions():
parser = argparse.ArgumentParser(description='Test data loader.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
malpiOptions.addMalpiOptions( parser )
args = parser.parse_args()
malpiOptions.preprocessOptions(args)
return args
if __name__ == "__main__":
args = getOptions()
if args.test_only:
runTests(args)
exit()
images_only = True
gen = DriveDataGenerator(args.dirs, batch_size=100, shuffle=True, max_load=2000, image_size=(64,64), images_only=images_only )
print( "# samples: {}".format( gen.count ) )
print( "# batches: {}".format( len(gen) ) )
if not images_only:
_, actions = gen[0]
print( "Actions: {}".format( actions.shape ) )
print( " mean: {}".format( np.mean(actions, axis=0) ) )
print( " stdev: {}".format( np.std(actions, axis=0) ) )
for epoch in range(5):
for i in range(len(gen)):
images = gen[i]
print( "Batch {}: {}".format( i, images.shape ), end='\r' )
#print( "action[0]: {}".format( actions[0] ) )
sys.stdout.flush()
time.sleep(0.05)
print("")
print( "End epoch {}".format( epoch ) )
gen.on_epoch_end()
print("") | random_line_split | |
run.go | package run
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/docker/cli/cli"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/stdcopy"
"github.com/spf13/cobra"
"github.com/werf/logboek"
"github.com/werf/werf/cmd/werf/common"
"github.com/werf/werf/pkg/build"
"github.com/werf/werf/pkg/container_backend"
"github.com/werf/werf/pkg/docker"
"github.com/werf/werf/pkg/git_repo"
"github.com/werf/werf/pkg/git_repo/gitdata"
"github.com/werf/werf/pkg/giterminism_manager"
"github.com/werf/werf/pkg/image"
"github.com/werf/werf/pkg/logging"
"github.com/werf/werf/pkg/ssh_agent"
"github.com/werf/werf/pkg/storage/lrumeta"
"github.com/werf/werf/pkg/storage/manager"
"github.com/werf/werf/pkg/tmp_manager"
"github.com/werf/werf/pkg/true_git"
"github.com/werf/werf/pkg/werf"
"github.com/werf/werf/pkg/werf/global_warnings"
)
type cmdDataType struct {
Shell bool
Bash bool
RawDockerOptions string
DockerOptions []string
DockerCommand []string
ImageName string
}
var (
cmdData cmdDataType
commonCmdData common.CmdData
)
func NewCmd(ctx context.Context) *cobra.Command {
ctx = common.NewContextWithCmdData(ctx, &commonCmdData)
cmd := common.SetCommandContext(ctx, &cobra.Command{
Use: "run [options] [IMAGE_NAME] [-- COMMAND ARG...]",
Short: "Run container for project image",
Long: common.GetLongCommandDescription(GetRunDocs().Long),
DisableFlagsInUseLine: true,
Example: ` # Run specified image and remove after execution
$ werf run application
# Run image with predefined docker run options and command for debug
$ werf run --shell
# Run image with specified docker run options and command
$ werf run --docker-options="-d -p 5000:5000 --restart=always --name registry" -- /app/run.sh
# Print a resulting docker run command
$ werf run --shell --dry-run
docker run -ti --rm image-stage-test:1ffe83860127e68e893b6aece5b0b7619f903f8492a285c6410371c87018c6a0 /bin/sh`,
Annotations: map[string]string{
common.DisableOptionsInUseLineAnno: "1",
common.DocsLongMD: GetRunDocs().LongMD,
},
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
defer global_warnings.PrintGlobalWarnings(ctx)
if err := common.ProcessLogOptions(&commonCmdData); err != nil {
common.PrintHelp(cmd)
return err
}
if err := processArgs(cmd, args); err != nil {
common.PrintHelp(cmd)
return err
}
if cmdData.RawDockerOptions != "" {
cmdData.DockerOptions = strings.Fields(cmdData.RawDockerOptions)
}
if cmdData.Shell && cmdData.Bash {
return fmt.Errorf("cannot use --shell and --bash options at the same time")
}
if cmdData.Shell || cmdData.Bash {
if len(cmdData.DockerOptions) == 0 && len(cmdData.DockerCommand) == 0 {
cmdData.DockerOptions = []string{"-ti", "--rm"}
if cmdData.Shell {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/sh")
}
if cmdData.Bash {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/bash")
}
} else {
common.PrintHelp(cmd)
return fmt.Errorf("shell option cannot be used with other docker run arguments")
}
} else if len(cmdData.DockerOptions) == 0 {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--rm")
}
return runMain(ctx)
},
})
common.SetupDir(&commonCmdData, cmd)
common.SetupGitWorkTree(&commonCmdData, cmd)
common.SetupConfigTemplatesDir(&commonCmdData, cmd)
common.SetupConfigPath(&commonCmdData, cmd)
common.SetupEnvironment(&commonCmdData, cmd)
common.SetupGiterminismOptions(&commonCmdData, cmd)
common.SetupTmpDir(&commonCmdData, cmd, common.SetupTmpDirOptions{})
common.SetupHomeDir(&commonCmdData, cmd, common.SetupHomeDirOptions{})
common.SetupSSHKey(&commonCmdData, cmd)
common.SetupSecondaryStagesStorageOptions(&commonCmdData, cmd)
common.SetupCacheStagesStorageOptions(&commonCmdData, cmd)
common.SetupRepoOptions(&commonCmdData, cmd, common.RepoDataOptions{OptionalRepo: true})
common.SetupFinalRepo(&commonCmdData, cmd)
common.SetupSkipBuild(&commonCmdData, cmd)
common.SetupRequireBuiltImages(&commonCmdData, cmd)
common.SetupFollow(&commonCmdData, cmd)
common.SetupDockerConfig(&commonCmdData, cmd, "Command needs granted permissions to read and pull images from the specified repo")
common.SetupInsecureRegistry(&commonCmdData, cmd)
common.SetupInsecureHelmDependencies(&commonCmdData, cmd)
common.SetupSkipTlsVerifyRegistry(&commonCmdData, cmd)
common.SetupLogOptions(&commonCmdData, cmd)
common.SetupLogProjectDir(&commonCmdData, cmd)
common.SetupSynchronization(&commonCmdData, cmd)
common.SetupKubeConfig(&commonCmdData, cmd)
common.SetupKubeConfigBase64(&commonCmdData, cmd)
common.SetupKubeContext(&commonCmdData, cmd)
common.SetupDryRun(&commonCmdData, cmd)
common.SetupVirtualMerge(&commonCmdData, cmd)
commonCmdData.SetupPlatform(cmd)
cmd.Flags().BoolVarP(&cmdData.Shell, "shell", "", false, "Use predefined docker options and command for debug")
cmd.Flags().BoolVarP(&cmdData.Bash, "bash", "", false, "Use predefined docker options and command for debug")
cmd.Flags().StringVarP(&cmdData.RawDockerOptions, "docker-options", "", os.Getenv("WERF_DOCKER_OPTIONS"), "Define docker run options (default $WERF_DOCKER_OPTIONS)")
return cmd
}
func processArgs(cmd *cobra.Command, args []string) error {
doubleDashInd := cmd.ArgsLenAtDash()
doubleDashExist := cmd.ArgsLenAtDash() != -1
if doubleDashExist {
if doubleDashInd == len(args) {
return fmt.Errorf("unsupported position args format")
}
switch doubleDashInd {
case 0:
cmdData.DockerCommand = args[doubleDashInd:]
case 1:
cmdData.ImageName = args[0]
cmdData.DockerCommand = args[doubleDashInd:]
default:
return fmt.Errorf("unsupported position args format")
}
} else {
switch len(args) {
case 0:
case 1:
cmdData.ImageName = args[0]
default:
return fmt.Errorf("unsupported position args format")
}
}
return nil
}
func checkDetachDockerOption() error {
for _, value := range cmdData.DockerOptions {
if value == "-d" || value == "--detach" {
return nil
}
}
return fmt.Errorf("the container must be launched in the background (in follow mode): pass -d/--detach with --docker-options option")
}
func getContainerName() string {
for ind, value := range cmdData.DockerOptions {
if value == "--name" {
if ind+1 < len(cmdData.DockerOptions) {
return cmdData.DockerOptions[ind+1]
}
} else if strings.HasPrefix(value, "--name=") {
return strings.TrimPrefix(value, "--name=")
}
}
return ""
}
func runMain(ctx context.Context) error {
global_warnings.PostponeMultiwerfNotUpToDateWarning()
if err := werf.Init(*commonCmdData.TmpDir, *commonCmdData.HomeDir); err != nil {
return fmt.Errorf("initialization error: %w", err)
}
containerBackend, processCtx, err := common.InitProcessContainerBackend(ctx, &commonCmdData)
if err != nil {
return err
}
ctx = processCtx
gitDataManager, err := gitdata.GetHostGitDataManager(ctx)
if err != nil {
return fmt.Errorf("error getting host git data manager: %w", err)
}
if err := git_repo.Init(gitDataManager); err != nil {
return err
}
if err := image.Init(); err != nil {
return err
}
if err := lrumeta.Init(); err != nil {
return err
}
if err := true_git.Init(ctx, true_git.Options{LiveGitOutput: *commonCmdData.LogDebug}); err != nil {
return err
}
if err := common.DockerRegistryInit(ctx, &commonCmdData); err != nil {
return err
}
giterminismManager, err := common.GetGiterminismManager(ctx, &commonCmdData)
if err != nil {
return err
}
common.ProcessLogProjectDir(&commonCmdData, giterminismManager.ProjectDir())
if err := ssh_agent.Init(ctx, common.GetSSHKey(&commonCmdData)); err != nil {
return fmt.Errorf("cannot initialize ssh agent: %w", err)
}
defer func() {
err := ssh_agent.Terminate()
if err != nil {
logboek.Warn().LogF("WARNING: ssh agent termination failed: %s\n", err)
}
}()
if *commonCmdData.Follow {
if cmdData.Shell || cmdData.Bash {
return fmt.Errorf("follow mode does not work with --shell and --bash options")
}
if err := checkDetachDockerOption(); err != nil {
return err
}
containerName := getContainerName()
if containerName == "" {
return fmt.Errorf("follow mode does not work without specific container name: pass --name=CONTAINER_NAME with --docker-options option")
}
return common.FollowGitHead(ctx, &commonCmdData, func(ctx context.Context, headCommitGiterminismManager giterminism_manager.Interface) error {
if err := safeDockerCliRmFunc(ctx, containerName); err != nil {
return err
}
if err := run(ctx, containerBackend, headCommitGiterminismManager); err != nil {
return err
}
go func() {
time.Sleep(500 * time.Millisecond)
fmt.Printf("Attaching to container %s ...\n", containerName)
resp, err := docker.ContainerAttach(ctx, containerName, types.ContainerAttachOptions{
Stream: true,
Stdout: true,
Stderr: true,
Logs: true,
})
if err != nil {
_, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
}
if _, err := stdcopy.StdCopy(os.Stdout, os.Stderr, resp.Reader); err != nil {
_, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
}
}()
return nil
})
} else {
if err := run(ctx, containerBackend, giterminismManager); err != nil {
if statusErr, ok := err.(cli.StatusError); ok {
common.TerminateWithError(err.Error(), statusErr.StatusCode)
}
return err
}
return nil
}
}
func run(ctx context.Context, containerBackend container_backend.ContainerBackend, giterminismManager giterminism_manager.Interface) error {
_, werfConfig, err := common.GetRequiredWerfConfig(ctx, &commonCmdData, giterminismManager, common.GetWerfConfigOptions(&commonCmdData, false))
if err != nil {
return fmt.Errorf("unable to load werf config: %w", err)
}
projectName := werfConfig.Meta.Project
projectTmpDir, err := tmp_manager.CreateProjectDir(ctx)
if err != nil {
return fmt.Errorf("getting project tmp dir failed: %w", err)
}
defer tmp_manager.ReleaseProjectDir(projectTmpDir)
imageName := cmdData.ImageName
if imageName == "" && len(werfConfig.GetAllImages()) == 1 {
imageName = werfConfig.GetAllImages()[0].GetName()
}
if !werfConfig.HasImage(imageName) {
return fmt.Errorf("image %q is not defined in werf.yaml", logging.ImageLogName(imageName, false))
}
stagesStorage, err := common.GetStagesStorage(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
finalStagesStorage, err := common.GetOptionalFinalStagesStorage(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
synchronization, err := common.GetSynchronization(ctx, &commonCmdData, projectName, stagesStorage)
if err != nil {
return err
}
storageLockManager, err := common.GetStorageLockManager(ctx, synchronization)
if err != nil {
return err
}
secondaryStagesStorageList, err := common.GetSecondaryStagesStorageList(ctx, stagesStorage, containerBackend, &commonCmdData)
if err != nil {
return err
}
cacheStagesStorageList, err := common.GetCacheStagesStorageList(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
storageManager := manager.NewStorageManager(projectName, stagesStorage, finalStagesStorage, secondaryStagesStorageList, cacheStagesStorageList, storageLockManager)
logboek.Context(ctx).Info().LogOptionalLn()
imagesToProcess := build.NewImagesToProcess([]string{imageName}, false)
conveyorOptions, err := common.GetConveyorOptions(ctx, &commonCmdData, imagesToProcess)
if err != nil {
return err
}
conveyorWithRetry := build.NewConveyorWithRetryWrapper(werfConfig, giterminismManager, giterminismManager.ProjectDir(), projectTmpDir, ssh_agent.SSHAuthSock, containerBackend, storageManager, storageLockManager, conveyorOptions)
defer conveyorWithRetry.Terminate()
var dockerImageName string
if err := conveyorWithRetry.WithRetryBlock(ctx, func(c *build.Conveyor) error {
if common.GetRequireBuiltImages(ctx, &commonCmdData) {
if err := c.ShouldBeBuilt(ctx, build.ShouldBeBuiltOptions{}); err != nil {
return err
}
} else {
if err := c.Build(ctx, build.BuildOptions{SkipImageMetadataPublication: *commonCmdData.Dev}); err != nil {
return err
}
}
dockerImageName, err = c.GetFullImageName(ctx, imageName)
if err != nil {
return fmt.Errorf("unable to get full name for image %q: %w", imageName, err)
}
return nil
}); err != nil {
return err
}
var dockerRunArgs []string
dockerRunArgs = append(dockerRunArgs, cmdData.DockerOptions...)
dockerRunArgs = append(dockerRunArgs, dockerImageName)
dockerRunArgs = append(dockerRunArgs, cmdData.DockerCommand...)
if *commonCmdData.DryRun {
fmt.Printf("docker run %s\n", strings.Join(dockerRunArgs, " "))
return nil
} else {
return logboek.Streams().DoErrorWithoutProxyStreamDataFormatting(func() error {
return common.WithoutTerminationSignalsTrap(func() error {
return docker.CliRun_LiveOutput(ctx, dockerRunArgs...)
})
})
}
}
func safeDockerCliRmFunc(ctx context.Context, containerName string) error {
if exist, err := docker.ContainerExist(ctx, containerName); err != nil | else if exist {
logboek.Context(ctx).LogF("Removing container %s ...\n", containerName)
if err := docker.CliRm(ctx, "-f", containerName); err != nil {
return fmt.Errorf("unable to remove container %s: %w", containerName, err)
}
}
return nil
}
| {
return fmt.Errorf("unable to check container %s existence: %w", containerName, err)
} | conditional_block |
run.go | package run
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/docker/cli/cli"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/stdcopy"
"github.com/spf13/cobra"
"github.com/werf/logboek"
"github.com/werf/werf/cmd/werf/common"
"github.com/werf/werf/pkg/build"
"github.com/werf/werf/pkg/container_backend"
"github.com/werf/werf/pkg/docker"
"github.com/werf/werf/pkg/git_repo"
"github.com/werf/werf/pkg/git_repo/gitdata"
"github.com/werf/werf/pkg/giterminism_manager"
"github.com/werf/werf/pkg/image"
"github.com/werf/werf/pkg/logging"
"github.com/werf/werf/pkg/ssh_agent"
"github.com/werf/werf/pkg/storage/lrumeta"
"github.com/werf/werf/pkg/storage/manager"
"github.com/werf/werf/pkg/tmp_manager"
"github.com/werf/werf/pkg/true_git"
"github.com/werf/werf/pkg/werf"
"github.com/werf/werf/pkg/werf/global_warnings"
)
type cmdDataType struct {
Shell bool
Bash bool
RawDockerOptions string
DockerOptions []string
DockerCommand []string
ImageName string
}
var (
cmdData cmdDataType
commonCmdData common.CmdData
)
func NewCmd(ctx context.Context) *cobra.Command {
ctx = common.NewContextWithCmdData(ctx, &commonCmdData)
cmd := common.SetCommandContext(ctx, &cobra.Command{
Use: "run [options] [IMAGE_NAME] [-- COMMAND ARG...]",
Short: "Run container for project image",
Long: common.GetLongCommandDescription(GetRunDocs().Long),
DisableFlagsInUseLine: true,
Example: ` # Run specified image and remove after execution
$ werf run application
# Run image with predefined docker run options and command for debug
$ werf run --shell
# Run image with specified docker run options and command
$ werf run --docker-options="-d -p 5000:5000 --restart=always --name registry" -- /app/run.sh
# Print a resulting docker run command
$ werf run --shell --dry-run
docker run -ti --rm image-stage-test:1ffe83860127e68e893b6aece5b0b7619f903f8492a285c6410371c87018c6a0 /bin/sh`,
Annotations: map[string]string{
common.DisableOptionsInUseLineAnno: "1",
common.DocsLongMD: GetRunDocs().LongMD,
},
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
defer global_warnings.PrintGlobalWarnings(ctx)
if err := common.ProcessLogOptions(&commonCmdData); err != nil {
common.PrintHelp(cmd)
return err
}
if err := processArgs(cmd, args); err != nil {
common.PrintHelp(cmd)
return err
}
if cmdData.RawDockerOptions != "" {
cmdData.DockerOptions = strings.Fields(cmdData.RawDockerOptions)
}
if cmdData.Shell && cmdData.Bash {
return fmt.Errorf("cannot use --shell and --bash options at the same time")
}
if cmdData.Shell || cmdData.Bash {
if len(cmdData.DockerOptions) == 0 && len(cmdData.DockerCommand) == 0 {
cmdData.DockerOptions = []string{"-ti", "--rm"}
if cmdData.Shell {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/sh")
}
if cmdData.Bash {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/bash")
}
} else {
common.PrintHelp(cmd)
return fmt.Errorf("shell option cannot be used with other docker run arguments")
}
} else if len(cmdData.DockerOptions) == 0 {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--rm")
}
return runMain(ctx)
},
})
common.SetupDir(&commonCmdData, cmd)
common.SetupGitWorkTree(&commonCmdData, cmd)
common.SetupConfigTemplatesDir(&commonCmdData, cmd)
common.SetupConfigPath(&commonCmdData, cmd)
common.SetupEnvironment(&commonCmdData, cmd)
common.SetupGiterminismOptions(&commonCmdData, cmd)
common.SetupTmpDir(&commonCmdData, cmd, common.SetupTmpDirOptions{})
common.SetupHomeDir(&commonCmdData, cmd, common.SetupHomeDirOptions{})
common.SetupSSHKey(&commonCmdData, cmd)
common.SetupSecondaryStagesStorageOptions(&commonCmdData, cmd)
common.SetupCacheStagesStorageOptions(&commonCmdData, cmd)
common.SetupRepoOptions(&commonCmdData, cmd, common.RepoDataOptions{OptionalRepo: true})
common.SetupFinalRepo(&commonCmdData, cmd)
common.SetupSkipBuild(&commonCmdData, cmd)
common.SetupRequireBuiltImages(&commonCmdData, cmd)
common.SetupFollow(&commonCmdData, cmd)
common.SetupDockerConfig(&commonCmdData, cmd, "Command needs granted permissions to read and pull images from the specified repo")
common.SetupInsecureRegistry(&commonCmdData, cmd)
common.SetupInsecureHelmDependencies(&commonCmdData, cmd)
common.SetupSkipTlsVerifyRegistry(&commonCmdData, cmd)
common.SetupLogOptions(&commonCmdData, cmd)
common.SetupLogProjectDir(&commonCmdData, cmd)
common.SetupSynchronization(&commonCmdData, cmd)
common.SetupKubeConfig(&commonCmdData, cmd)
common.SetupKubeConfigBase64(&commonCmdData, cmd)
common.SetupKubeContext(&commonCmdData, cmd)
common.SetupDryRun(&commonCmdData, cmd)
common.SetupVirtualMerge(&commonCmdData, cmd)
commonCmdData.SetupPlatform(cmd)
cmd.Flags().BoolVarP(&cmdData.Shell, "shell", "", false, "Use predefined docker options and command for debug")
cmd.Flags().BoolVarP(&cmdData.Bash, "bash", "", false, "Use predefined docker options and command for debug")
cmd.Flags().StringVarP(&cmdData.RawDockerOptions, "docker-options", "", os.Getenv("WERF_DOCKER_OPTIONS"), "Define docker run options (default $WERF_DOCKER_OPTIONS)")
return cmd
}
func processArgs(cmd *cobra.Command, args []string) error |
func checkDetachDockerOption() error {
for _, value := range cmdData.DockerOptions {
if value == "-d" || value == "--detach" {
return nil
}
}
return fmt.Errorf("the container must be launched in the background (in follow mode): pass -d/--detach with --docker-options option")
}
func getContainerName() string {
for ind, value := range cmdData.DockerOptions {
if value == "--name" {
if ind+1 < len(cmdData.DockerOptions) {
return cmdData.DockerOptions[ind+1]
}
} else if strings.HasPrefix(value, "--name=") {
return strings.TrimPrefix(value, "--name=")
}
}
return ""
}
func runMain(ctx context.Context) error {
global_warnings.PostponeMultiwerfNotUpToDateWarning()
if err := werf.Init(*commonCmdData.TmpDir, *commonCmdData.HomeDir); err != nil {
return fmt.Errorf("initialization error: %w", err)
}
containerBackend, processCtx, err := common.InitProcessContainerBackend(ctx, &commonCmdData)
if err != nil {
return err
}
ctx = processCtx
gitDataManager, err := gitdata.GetHostGitDataManager(ctx)
if err != nil {
return fmt.Errorf("error getting host git data manager: %w", err)
}
if err := git_repo.Init(gitDataManager); err != nil {
return err
}
if err := image.Init(); err != nil {
return err
}
if err := lrumeta.Init(); err != nil {
return err
}
if err := true_git.Init(ctx, true_git.Options{LiveGitOutput: *commonCmdData.LogDebug}); err != nil {
return err
}
if err := common.DockerRegistryInit(ctx, &commonCmdData); err != nil {
return err
}
giterminismManager, err := common.GetGiterminismManager(ctx, &commonCmdData)
if err != nil {
return err
}
common.ProcessLogProjectDir(&commonCmdData, giterminismManager.ProjectDir())
if err := ssh_agent.Init(ctx, common.GetSSHKey(&commonCmdData)); err != nil {
return fmt.Errorf("cannot initialize ssh agent: %w", err)
}
defer func() {
err := ssh_agent.Terminate()
if err != nil {
logboek.Warn().LogF("WARNING: ssh agent termination failed: %s\n", err)
}
}()
if *commonCmdData.Follow {
if cmdData.Shell || cmdData.Bash {
return fmt.Errorf("follow mode does not work with --shell and --bash options")
}
if err := checkDetachDockerOption(); err != nil {
return err
}
containerName := getContainerName()
if containerName == "" {
return fmt.Errorf("follow mode does not work without specific container name: pass --name=CONTAINER_NAME with --docker-options option")
}
return common.FollowGitHead(ctx, &commonCmdData, func(ctx context.Context, headCommitGiterminismManager giterminism_manager.Interface) error {
if err := safeDockerCliRmFunc(ctx, containerName); err != nil {
return err
}
if err := run(ctx, containerBackend, headCommitGiterminismManager); err != nil {
return err
}
go func() {
time.Sleep(500 * time.Millisecond)
fmt.Printf("Attaching to container %s ...\n", containerName)
resp, err := docker.ContainerAttach(ctx, containerName, types.ContainerAttachOptions{
Stream: true,
Stdout: true,
Stderr: true,
Logs: true,
})
if err != nil {
_, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
}
if _, err := stdcopy.StdCopy(os.Stdout, os.Stderr, resp.Reader); err != nil {
_, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
}
}()
return nil
})
} else {
if err := run(ctx, containerBackend, giterminismManager); err != nil {
if statusErr, ok := err.(cli.StatusError); ok {
common.TerminateWithError(err.Error(), statusErr.StatusCode)
}
return err
}
return nil
}
}
func run(ctx context.Context, containerBackend container_backend.ContainerBackend, giterminismManager giterminism_manager.Interface) error {
_, werfConfig, err := common.GetRequiredWerfConfig(ctx, &commonCmdData, giterminismManager, common.GetWerfConfigOptions(&commonCmdData, false))
if err != nil {
return fmt.Errorf("unable to load werf config: %w", err)
}
projectName := werfConfig.Meta.Project
projectTmpDir, err := tmp_manager.CreateProjectDir(ctx)
if err != nil {
return fmt.Errorf("getting project tmp dir failed: %w", err)
}
defer tmp_manager.ReleaseProjectDir(projectTmpDir)
imageName := cmdData.ImageName
if imageName == "" && len(werfConfig.GetAllImages()) == 1 {
imageName = werfConfig.GetAllImages()[0].GetName()
}
if !werfConfig.HasImage(imageName) {
return fmt.Errorf("image %q is not defined in werf.yaml", logging.ImageLogName(imageName, false))
}
stagesStorage, err := common.GetStagesStorage(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
finalStagesStorage, err := common.GetOptionalFinalStagesStorage(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
synchronization, err := common.GetSynchronization(ctx, &commonCmdData, projectName, stagesStorage)
if err != nil {
return err
}
storageLockManager, err := common.GetStorageLockManager(ctx, synchronization)
if err != nil {
return err
}
secondaryStagesStorageList, err := common.GetSecondaryStagesStorageList(ctx, stagesStorage, containerBackend, &commonCmdData)
if err != nil {
return err
}
cacheStagesStorageList, err := common.GetCacheStagesStorageList(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
storageManager := manager.NewStorageManager(projectName, stagesStorage, finalStagesStorage, secondaryStagesStorageList, cacheStagesStorageList, storageLockManager)
logboek.Context(ctx).Info().LogOptionalLn()
imagesToProcess := build.NewImagesToProcess([]string{imageName}, false)
conveyorOptions, err := common.GetConveyorOptions(ctx, &commonCmdData, imagesToProcess)
if err != nil {
return err
}
conveyorWithRetry := build.NewConveyorWithRetryWrapper(werfConfig, giterminismManager, giterminismManager.ProjectDir(), projectTmpDir, ssh_agent.SSHAuthSock, containerBackend, storageManager, storageLockManager, conveyorOptions)
defer conveyorWithRetry.Terminate()
var dockerImageName string
if err := conveyorWithRetry.WithRetryBlock(ctx, func(c *build.Conveyor) error {
if common.GetRequireBuiltImages(ctx, &commonCmdData) {
if err := c.ShouldBeBuilt(ctx, build.ShouldBeBuiltOptions{}); err != nil {
return err
}
} else {
if err := c.Build(ctx, build.BuildOptions{SkipImageMetadataPublication: *commonCmdData.Dev}); err != nil {
return err
}
}
dockerImageName, err = c.GetFullImageName(ctx, imageName)
if err != nil {
return fmt.Errorf("unable to get full name for image %q: %w", imageName, err)
}
return nil
}); err != nil {
return err
}
var dockerRunArgs []string
dockerRunArgs = append(dockerRunArgs, cmdData.DockerOptions...)
dockerRunArgs = append(dockerRunArgs, dockerImageName)
dockerRunArgs = append(dockerRunArgs, cmdData.DockerCommand...)
if *commonCmdData.DryRun {
fmt.Printf("docker run %s\n", strings.Join(dockerRunArgs, " "))
return nil
} else {
return logboek.Streams().DoErrorWithoutProxyStreamDataFormatting(func() error {
return common.WithoutTerminationSignalsTrap(func() error {
return docker.CliRun_LiveOutput(ctx, dockerRunArgs...)
})
})
}
}
func safeDockerCliRmFunc(ctx context.Context, containerName string) error {
if exist, err := docker.ContainerExist(ctx, containerName); err != nil {
return fmt.Errorf("unable to check container %s existence: %w", containerName, err)
} else if exist {
logboek.Context(ctx).LogF("Removing container %s ...\n", containerName)
if err := docker.CliRm(ctx, "-f", containerName); err != nil {
return fmt.Errorf("unable to remove container %s: %w", containerName, err)
}
}
return nil
}
| {
doubleDashInd := cmd.ArgsLenAtDash()
doubleDashExist := cmd.ArgsLenAtDash() != -1
if doubleDashExist {
if doubleDashInd == len(args) {
return fmt.Errorf("unsupported position args format")
}
switch doubleDashInd {
case 0:
cmdData.DockerCommand = args[doubleDashInd:]
case 1:
cmdData.ImageName = args[0]
cmdData.DockerCommand = args[doubleDashInd:]
default:
return fmt.Errorf("unsupported position args format")
}
} else {
switch len(args) {
case 0:
case 1:
cmdData.ImageName = args[0]
default:
return fmt.Errorf("unsupported position args format")
}
}
return nil
} | identifier_body |
run.go | package run
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/docker/cli/cli"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/stdcopy"
"github.com/spf13/cobra"
"github.com/werf/logboek"
"github.com/werf/werf/cmd/werf/common"
"github.com/werf/werf/pkg/build"
"github.com/werf/werf/pkg/container_backend"
"github.com/werf/werf/pkg/docker"
"github.com/werf/werf/pkg/git_repo"
"github.com/werf/werf/pkg/git_repo/gitdata"
"github.com/werf/werf/pkg/giterminism_manager"
"github.com/werf/werf/pkg/image"
"github.com/werf/werf/pkg/logging"
"github.com/werf/werf/pkg/ssh_agent"
"github.com/werf/werf/pkg/storage/lrumeta"
"github.com/werf/werf/pkg/storage/manager"
"github.com/werf/werf/pkg/tmp_manager"
"github.com/werf/werf/pkg/true_git"
"github.com/werf/werf/pkg/werf"
"github.com/werf/werf/pkg/werf/global_warnings"
)
type cmdDataType struct {
Shell bool
Bash bool
RawDockerOptions string
DockerOptions []string
DockerCommand []string
ImageName string
}
var (
cmdData cmdDataType
commonCmdData common.CmdData
)
func NewCmd(ctx context.Context) *cobra.Command {
ctx = common.NewContextWithCmdData(ctx, &commonCmdData)
cmd := common.SetCommandContext(ctx, &cobra.Command{
Use: "run [options] [IMAGE_NAME] [-- COMMAND ARG...]",
Short: "Run container for project image",
Long: common.GetLongCommandDescription(GetRunDocs().Long),
DisableFlagsInUseLine: true,
Example: ` # Run specified image and remove after execution
$ werf run application
# Run image with predefined docker run options and command for debug
$ werf run --shell
# Run image with specified docker run options and command
$ werf run --docker-options="-d -p 5000:5000 --restart=always --name registry" -- /app/run.sh
# Print a resulting docker run command
$ werf run --shell --dry-run
docker run -ti --rm image-stage-test:1ffe83860127e68e893b6aece5b0b7619f903f8492a285c6410371c87018c6a0 /bin/sh`,
Annotations: map[string]string{
common.DisableOptionsInUseLineAnno: "1",
common.DocsLongMD: GetRunDocs().LongMD,
},
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
defer global_warnings.PrintGlobalWarnings(ctx)
if err := common.ProcessLogOptions(&commonCmdData); err != nil {
common.PrintHelp(cmd)
return err
}
if err := processArgs(cmd, args); err != nil {
common.PrintHelp(cmd)
return err
}
if cmdData.RawDockerOptions != "" {
cmdData.DockerOptions = strings.Fields(cmdData.RawDockerOptions)
}
if cmdData.Shell && cmdData.Bash {
return fmt.Errorf("cannot use --shell and --bash options at the same time")
}
if cmdData.Shell || cmdData.Bash {
if len(cmdData.DockerOptions) == 0 && len(cmdData.DockerCommand) == 0 {
cmdData.DockerOptions = []string{"-ti", "--rm"}
if cmdData.Shell {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/sh")
}
if cmdData.Bash {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/bash")
}
} else {
common.PrintHelp(cmd)
return fmt.Errorf("shell option cannot be used with other docker run arguments")
}
} else if len(cmdData.DockerOptions) == 0 {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--rm")
}
return runMain(ctx)
},
})
common.SetupDir(&commonCmdData, cmd)
common.SetupGitWorkTree(&commonCmdData, cmd)
common.SetupConfigTemplatesDir(&commonCmdData, cmd)
common.SetupConfigPath(&commonCmdData, cmd)
common.SetupEnvironment(&commonCmdData, cmd)
common.SetupGiterminismOptions(&commonCmdData, cmd)
common.SetupTmpDir(&commonCmdData, cmd, common.SetupTmpDirOptions{})
common.SetupHomeDir(&commonCmdData, cmd, common.SetupHomeDirOptions{})
common.SetupSSHKey(&commonCmdData, cmd)
common.SetupSecondaryStagesStorageOptions(&commonCmdData, cmd)
common.SetupCacheStagesStorageOptions(&commonCmdData, cmd)
common.SetupRepoOptions(&commonCmdData, cmd, common.RepoDataOptions{OptionalRepo: true})
common.SetupFinalRepo(&commonCmdData, cmd)
common.SetupSkipBuild(&commonCmdData, cmd)
common.SetupRequireBuiltImages(&commonCmdData, cmd)
common.SetupFollow(&commonCmdData, cmd)
common.SetupDockerConfig(&commonCmdData, cmd, "Command needs granted permissions to read and pull images from the specified repo")
common.SetupInsecureRegistry(&commonCmdData, cmd)
common.SetupInsecureHelmDependencies(&commonCmdData, cmd)
common.SetupSkipTlsVerifyRegistry(&commonCmdData, cmd)
common.SetupLogOptions(&commonCmdData, cmd)
common.SetupLogProjectDir(&commonCmdData, cmd)
common.SetupSynchronization(&commonCmdData, cmd)
common.SetupKubeConfig(&commonCmdData, cmd)
common.SetupKubeConfigBase64(&commonCmdData, cmd)
common.SetupKubeContext(&commonCmdData, cmd)
common.SetupDryRun(&commonCmdData, cmd)
common.SetupVirtualMerge(&commonCmdData, cmd)
commonCmdData.SetupPlatform(cmd)
cmd.Flags().BoolVarP(&cmdData.Shell, "shell", "", false, "Use predefined docker options and command for debug")
cmd.Flags().BoolVarP(&cmdData.Bash, "bash", "", false, "Use predefined docker options and command for debug")
cmd.Flags().StringVarP(&cmdData.RawDockerOptions, "docker-options", "", os.Getenv("WERF_DOCKER_OPTIONS"), "Define docker run options (default $WERF_DOCKER_OPTIONS)")
return cmd
}
func processArgs(cmd *cobra.Command, args []string) error {
doubleDashInd := cmd.ArgsLenAtDash()
doubleDashExist := cmd.ArgsLenAtDash() != -1
if doubleDashExist {
if doubleDashInd == len(args) {
return fmt.Errorf("unsupported position args format")
}
switch doubleDashInd {
case 0:
cmdData.DockerCommand = args[doubleDashInd:]
case 1:
cmdData.ImageName = args[0]
cmdData.DockerCommand = args[doubleDashInd:]
default:
return fmt.Errorf("unsupported position args format")
}
} else {
switch len(args) {
case 0:
case 1:
cmdData.ImageName = args[0]
default:
return fmt.Errorf("unsupported position args format")
}
}
return nil
}
func checkDetachDockerOption() error {
for _, value := range cmdData.DockerOptions {
if value == "-d" || value == "--detach" {
return nil
}
}
return fmt.Errorf("the container must be launched in the background (in follow mode): pass -d/--detach with --docker-options option")
}
func | () string {
for ind, value := range cmdData.DockerOptions {
if value == "--name" {
if ind+1 < len(cmdData.DockerOptions) {
return cmdData.DockerOptions[ind+1]
}
} else if strings.HasPrefix(value, "--name=") {
return strings.TrimPrefix(value, "--name=")
}
}
return ""
}
func runMain(ctx context.Context) error {
global_warnings.PostponeMultiwerfNotUpToDateWarning()
if err := werf.Init(*commonCmdData.TmpDir, *commonCmdData.HomeDir); err != nil {
return fmt.Errorf("initialization error: %w", err)
}
containerBackend, processCtx, err := common.InitProcessContainerBackend(ctx, &commonCmdData)
if err != nil {
return err
}
ctx = processCtx
gitDataManager, err := gitdata.GetHostGitDataManager(ctx)
if err != nil {
return fmt.Errorf("error getting host git data manager: %w", err)
}
if err := git_repo.Init(gitDataManager); err != nil {
return err
}
if err := image.Init(); err != nil {
return err
}
if err := lrumeta.Init(); err != nil {
return err
}
if err := true_git.Init(ctx, true_git.Options{LiveGitOutput: *commonCmdData.LogDebug}); err != nil {
return err
}
if err := common.DockerRegistryInit(ctx, &commonCmdData); err != nil {
return err
}
giterminismManager, err := common.GetGiterminismManager(ctx, &commonCmdData)
if err != nil {
return err
}
common.ProcessLogProjectDir(&commonCmdData, giterminismManager.ProjectDir())
if err := ssh_agent.Init(ctx, common.GetSSHKey(&commonCmdData)); err != nil {
return fmt.Errorf("cannot initialize ssh agent: %w", err)
}
defer func() {
err := ssh_agent.Terminate()
if err != nil {
logboek.Warn().LogF("WARNING: ssh agent termination failed: %s\n", err)
}
}()
if *commonCmdData.Follow {
if cmdData.Shell || cmdData.Bash {
return fmt.Errorf("follow mode does not work with --shell and --bash options")
}
if err := checkDetachDockerOption(); err != nil {
return err
}
containerName := getContainerName()
if containerName == "" {
return fmt.Errorf("follow mode does not work without specific container name: pass --name=CONTAINER_NAME with --docker-options option")
}
return common.FollowGitHead(ctx, &commonCmdData, func(ctx context.Context, headCommitGiterminismManager giterminism_manager.Interface) error {
if err := safeDockerCliRmFunc(ctx, containerName); err != nil {
return err
}
if err := run(ctx, containerBackend, headCommitGiterminismManager); err != nil {
return err
}
go func() {
time.Sleep(500 * time.Millisecond)
fmt.Printf("Attaching to container %s ...\n", containerName)
resp, err := docker.ContainerAttach(ctx, containerName, types.ContainerAttachOptions{
Stream: true,
Stdout: true,
Stderr: true,
Logs: true,
})
if err != nil {
_, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
}
if _, err := stdcopy.StdCopy(os.Stdout, os.Stderr, resp.Reader); err != nil {
_, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
}
}()
return nil
})
} else {
if err := run(ctx, containerBackend, giterminismManager); err != nil {
if statusErr, ok := err.(cli.StatusError); ok {
common.TerminateWithError(err.Error(), statusErr.StatusCode)
}
return err
}
return nil
}
}
func run(ctx context.Context, containerBackend container_backend.ContainerBackend, giterminismManager giterminism_manager.Interface) error {
_, werfConfig, err := common.GetRequiredWerfConfig(ctx, &commonCmdData, giterminismManager, common.GetWerfConfigOptions(&commonCmdData, false))
if err != nil {
return fmt.Errorf("unable to load werf config: %w", err)
}
projectName := werfConfig.Meta.Project
projectTmpDir, err := tmp_manager.CreateProjectDir(ctx)
if err != nil {
return fmt.Errorf("getting project tmp dir failed: %w", err)
}
defer tmp_manager.ReleaseProjectDir(projectTmpDir)
imageName := cmdData.ImageName
if imageName == "" && len(werfConfig.GetAllImages()) == 1 {
imageName = werfConfig.GetAllImages()[0].GetName()
}
if !werfConfig.HasImage(imageName) {
return fmt.Errorf("image %q is not defined in werf.yaml", logging.ImageLogName(imageName, false))
}
stagesStorage, err := common.GetStagesStorage(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
finalStagesStorage, err := common.GetOptionalFinalStagesStorage(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
synchronization, err := common.GetSynchronization(ctx, &commonCmdData, projectName, stagesStorage)
if err != nil {
return err
}
storageLockManager, err := common.GetStorageLockManager(ctx, synchronization)
if err != nil {
return err
}
secondaryStagesStorageList, err := common.GetSecondaryStagesStorageList(ctx, stagesStorage, containerBackend, &commonCmdData)
if err != nil {
return err
}
cacheStagesStorageList, err := common.GetCacheStagesStorageList(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
storageManager := manager.NewStorageManager(projectName, stagesStorage, finalStagesStorage, secondaryStagesStorageList, cacheStagesStorageList, storageLockManager)
logboek.Context(ctx).Info().LogOptionalLn()
imagesToProcess := build.NewImagesToProcess([]string{imageName}, false)
conveyorOptions, err := common.GetConveyorOptions(ctx, &commonCmdData, imagesToProcess)
if err != nil {
return err
}
conveyorWithRetry := build.NewConveyorWithRetryWrapper(werfConfig, giterminismManager, giterminismManager.ProjectDir(), projectTmpDir, ssh_agent.SSHAuthSock, containerBackend, storageManager, storageLockManager, conveyorOptions)
defer conveyorWithRetry.Terminate()
var dockerImageName string
if err := conveyorWithRetry.WithRetryBlock(ctx, func(c *build.Conveyor) error {
if common.GetRequireBuiltImages(ctx, &commonCmdData) {
if err := c.ShouldBeBuilt(ctx, build.ShouldBeBuiltOptions{}); err != nil {
return err
}
} else {
if err := c.Build(ctx, build.BuildOptions{SkipImageMetadataPublication: *commonCmdData.Dev}); err != nil {
return err
}
}
dockerImageName, err = c.GetFullImageName(ctx, imageName)
if err != nil {
return fmt.Errorf("unable to get full name for image %q: %w", imageName, err)
}
return nil
}); err != nil {
return err
}
var dockerRunArgs []string
dockerRunArgs = append(dockerRunArgs, cmdData.DockerOptions...)
dockerRunArgs = append(dockerRunArgs, dockerImageName)
dockerRunArgs = append(dockerRunArgs, cmdData.DockerCommand...)
if *commonCmdData.DryRun {
fmt.Printf("docker run %s\n", strings.Join(dockerRunArgs, " "))
return nil
} else {
return logboek.Streams().DoErrorWithoutProxyStreamDataFormatting(func() error {
return common.WithoutTerminationSignalsTrap(func() error {
return docker.CliRun_LiveOutput(ctx, dockerRunArgs...)
})
})
}
}
func safeDockerCliRmFunc(ctx context.Context, containerName string) error {
if exist, err := docker.ContainerExist(ctx, containerName); err != nil {
return fmt.Errorf("unable to check container %s existence: %w", containerName, err)
} else if exist {
logboek.Context(ctx).LogF("Removing container %s ...\n", containerName)
if err := docker.CliRm(ctx, "-f", containerName); err != nil {
return fmt.Errorf("unable to remove container %s: %w", containerName, err)
}
}
return nil
}
| getContainerName | identifier_name |
run.go | package run
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/docker/cli/cli"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/stdcopy"
"github.com/spf13/cobra"
"github.com/werf/logboek"
"github.com/werf/werf/cmd/werf/common"
"github.com/werf/werf/pkg/build"
"github.com/werf/werf/pkg/container_backend"
"github.com/werf/werf/pkg/docker"
"github.com/werf/werf/pkg/git_repo"
"github.com/werf/werf/pkg/git_repo/gitdata"
"github.com/werf/werf/pkg/giterminism_manager"
"github.com/werf/werf/pkg/image"
"github.com/werf/werf/pkg/logging"
"github.com/werf/werf/pkg/ssh_agent"
"github.com/werf/werf/pkg/storage/lrumeta"
"github.com/werf/werf/pkg/storage/manager"
"github.com/werf/werf/pkg/tmp_manager"
"github.com/werf/werf/pkg/true_git"
"github.com/werf/werf/pkg/werf"
"github.com/werf/werf/pkg/werf/global_warnings"
)
type cmdDataType struct {
Shell bool
Bash bool
RawDockerOptions string
DockerOptions []string
DockerCommand []string
ImageName string
}
var (
cmdData cmdDataType
commonCmdData common.CmdData
)
func NewCmd(ctx context.Context) *cobra.Command {
ctx = common.NewContextWithCmdData(ctx, &commonCmdData)
cmd := common.SetCommandContext(ctx, &cobra.Command{
Use: "run [options] [IMAGE_NAME] [-- COMMAND ARG...]",
Short: "Run container for project image",
Long: common.GetLongCommandDescription(GetRunDocs().Long),
DisableFlagsInUseLine: true,
Example: ` # Run specified image and remove after execution
$ werf run application
# Run image with predefined docker run options and command for debug
$ werf run --shell
# Run image with specified docker run options and command
$ werf run --docker-options="-d -p 5000:5000 --restart=always --name registry" -- /app/run.sh
# Print a resulting docker run command
$ werf run --shell --dry-run
docker run -ti --rm image-stage-test:1ffe83860127e68e893b6aece5b0b7619f903f8492a285c6410371c87018c6a0 /bin/sh`,
Annotations: map[string]string{
common.DisableOptionsInUseLineAnno: "1",
common.DocsLongMD: GetRunDocs().LongMD,
},
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
defer global_warnings.PrintGlobalWarnings(ctx)
if err := common.ProcessLogOptions(&commonCmdData); err != nil {
common.PrintHelp(cmd)
return err
}
if err := processArgs(cmd, args); err != nil {
common.PrintHelp(cmd)
return err
}
if cmdData.RawDockerOptions != "" {
cmdData.DockerOptions = strings.Fields(cmdData.RawDockerOptions)
}
if cmdData.Shell && cmdData.Bash {
return fmt.Errorf("cannot use --shell and --bash options at the same time")
}
if cmdData.Shell || cmdData.Bash {
if len(cmdData.DockerOptions) == 0 && len(cmdData.DockerCommand) == 0 {
cmdData.DockerOptions = []string{"-ti", "--rm"}
if cmdData.Shell {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/sh")
}
if cmdData.Bash {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/bash")
}
} else {
common.PrintHelp(cmd)
return fmt.Errorf("shell option cannot be used with other docker run arguments")
}
} else if len(cmdData.DockerOptions) == 0 {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--rm")
}
return runMain(ctx)
},
})
common.SetupDir(&commonCmdData, cmd)
common.SetupGitWorkTree(&commonCmdData, cmd)
common.SetupConfigTemplatesDir(&commonCmdData, cmd)
common.SetupConfigPath(&commonCmdData, cmd)
common.SetupEnvironment(&commonCmdData, cmd)
common.SetupGiterminismOptions(&commonCmdData, cmd)
common.SetupTmpDir(&commonCmdData, cmd, common.SetupTmpDirOptions{})
common.SetupHomeDir(&commonCmdData, cmd, common.SetupHomeDirOptions{})
common.SetupSSHKey(&commonCmdData, cmd)
common.SetupSecondaryStagesStorageOptions(&commonCmdData, cmd)
common.SetupCacheStagesStorageOptions(&commonCmdData, cmd)
common.SetupRepoOptions(&commonCmdData, cmd, common.RepoDataOptions{OptionalRepo: true})
common.SetupFinalRepo(&commonCmdData, cmd)
common.SetupSkipBuild(&commonCmdData, cmd)
common.SetupRequireBuiltImages(&commonCmdData, cmd)
common.SetupFollow(&commonCmdData, cmd)
common.SetupDockerConfig(&commonCmdData, cmd, "Command needs granted permissions to read and pull images from the specified repo")
common.SetupInsecureRegistry(&commonCmdData, cmd)
common.SetupInsecureHelmDependencies(&commonCmdData, cmd)
common.SetupSkipTlsVerifyRegistry(&commonCmdData, cmd)
common.SetupLogOptions(&commonCmdData, cmd)
common.SetupLogProjectDir(&commonCmdData, cmd)
common.SetupSynchronization(&commonCmdData, cmd)
common.SetupKubeConfig(&commonCmdData, cmd)
common.SetupKubeConfigBase64(&commonCmdData, cmd)
common.SetupKubeContext(&commonCmdData, cmd)
common.SetupDryRun(&commonCmdData, cmd)
common.SetupVirtualMerge(&commonCmdData, cmd)
commonCmdData.SetupPlatform(cmd)
cmd.Flags().BoolVarP(&cmdData.Shell, "shell", "", false, "Use predefined docker options and command for debug")
cmd.Flags().BoolVarP(&cmdData.Bash, "bash", "", false, "Use predefined docker options and command for debug")
cmd.Flags().StringVarP(&cmdData.RawDockerOptions, "docker-options", "", os.Getenv("WERF_DOCKER_OPTIONS"), "Define docker run options (default $WERF_DOCKER_OPTIONS)")
return cmd
}
func processArgs(cmd *cobra.Command, args []string) error {
doubleDashInd := cmd.ArgsLenAtDash()
doubleDashExist := cmd.ArgsLenAtDash() != -1
if doubleDashExist {
if doubleDashInd == len(args) {
return fmt.Errorf("unsupported position args format")
}
switch doubleDashInd {
case 0:
cmdData.DockerCommand = args[doubleDashInd:]
case 1:
cmdData.ImageName = args[0]
cmdData.DockerCommand = args[doubleDashInd:]
default:
return fmt.Errorf("unsupported position args format")
}
} else {
switch len(args) {
case 0:
case 1:
cmdData.ImageName = args[0]
default:
return fmt.Errorf("unsupported position args format")
}
}
return nil
}
func checkDetachDockerOption() error {
for _, value := range cmdData.DockerOptions {
if value == "-d" || value == "--detach" {
return nil
}
}
return fmt.Errorf("the container must be launched in the background (in follow mode): pass -d/--detach with --docker-options option")
}
func getContainerName() string {
for ind, value := range cmdData.DockerOptions {
if value == "--name" {
if ind+1 < len(cmdData.DockerOptions) {
return cmdData.DockerOptions[ind+1]
}
} else if strings.HasPrefix(value, "--name=") {
return strings.TrimPrefix(value, "--name=")
}
}
return ""
}
func runMain(ctx context.Context) error {
global_warnings.PostponeMultiwerfNotUpToDateWarning()
if err := werf.Init(*commonCmdData.TmpDir, *commonCmdData.HomeDir); err != nil {
return fmt.Errorf("initialization error: %w", err)
}
containerBackend, processCtx, err := common.InitProcessContainerBackend(ctx, &commonCmdData)
if err != nil {
return err
}
ctx = processCtx
gitDataManager, err := gitdata.GetHostGitDataManager(ctx)
if err != nil {
return fmt.Errorf("error getting host git data manager: %w", err)
}
if err := git_repo.Init(gitDataManager); err != nil {
return err
}
if err := image.Init(); err != nil {
return err
}
if err := lrumeta.Init(); err != nil {
return err
}
if err := true_git.Init(ctx, true_git.Options{LiveGitOutput: *commonCmdData.LogDebug}); err != nil {
return err
}
if err := common.DockerRegistryInit(ctx, &commonCmdData); err != nil {
return err
}
giterminismManager, err := common.GetGiterminismManager(ctx, &commonCmdData)
if err != nil {
return err
}
common.ProcessLogProjectDir(&commonCmdData, giterminismManager.ProjectDir())
if err := ssh_agent.Init(ctx, common.GetSSHKey(&commonCmdData)); err != nil {
return fmt.Errorf("cannot initialize ssh agent: %w", err)
}
defer func() {
err := ssh_agent.Terminate()
if err != nil {
logboek.Warn().LogF("WARNING: ssh agent termination failed: %s\n", err)
}
}()
if *commonCmdData.Follow {
if cmdData.Shell || cmdData.Bash {
return fmt.Errorf("follow mode does not work with --shell and --bash options")
}
if err := checkDetachDockerOption(); err != nil {
return err
}
containerName := getContainerName()
if containerName == "" {
return fmt.Errorf("follow mode does not work without specific container name: pass --name=CONTAINER_NAME with --docker-options option")
}
return common.FollowGitHead(ctx, &commonCmdData, func(ctx context.Context, headCommitGiterminismManager giterminism_manager.Interface) error {
if err := safeDockerCliRmFunc(ctx, containerName); err != nil {
return err
}
if err := run(ctx, containerBackend, headCommitGiterminismManager); err != nil {
return err
}
go func() {
time.Sleep(500 * time.Millisecond)
fmt.Printf("Attaching to container %s ...\n", containerName)
resp, err := docker.ContainerAttach(ctx, containerName, types.ContainerAttachOptions{
Stream: true,
Stdout: true,
Stderr: true,
Logs: true,
})
if err != nil {
_, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
}
if _, err := stdcopy.StdCopy(os.Stdout, os.Stderr, resp.Reader); err != nil { | }()
return nil
})
} else {
if err := run(ctx, containerBackend, giterminismManager); err != nil {
if statusErr, ok := err.(cli.StatusError); ok {
common.TerminateWithError(err.Error(), statusErr.StatusCode)
}
return err
}
return nil
}
}
func run(ctx context.Context, containerBackend container_backend.ContainerBackend, giterminismManager giterminism_manager.Interface) error {
_, werfConfig, err := common.GetRequiredWerfConfig(ctx, &commonCmdData, giterminismManager, common.GetWerfConfigOptions(&commonCmdData, false))
if err != nil {
return fmt.Errorf("unable to load werf config: %w", err)
}
projectName := werfConfig.Meta.Project
projectTmpDir, err := tmp_manager.CreateProjectDir(ctx)
if err != nil {
return fmt.Errorf("getting project tmp dir failed: %w", err)
}
defer tmp_manager.ReleaseProjectDir(projectTmpDir)
imageName := cmdData.ImageName
if imageName == "" && len(werfConfig.GetAllImages()) == 1 {
imageName = werfConfig.GetAllImages()[0].GetName()
}
if !werfConfig.HasImage(imageName) {
return fmt.Errorf("image %q is not defined in werf.yaml", logging.ImageLogName(imageName, false))
}
stagesStorage, err := common.GetStagesStorage(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
finalStagesStorage, err := common.GetOptionalFinalStagesStorage(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
synchronization, err := common.GetSynchronization(ctx, &commonCmdData, projectName, stagesStorage)
if err != nil {
return err
}
storageLockManager, err := common.GetStorageLockManager(ctx, synchronization)
if err != nil {
return err
}
secondaryStagesStorageList, err := common.GetSecondaryStagesStorageList(ctx, stagesStorage, containerBackend, &commonCmdData)
if err != nil {
return err
}
cacheStagesStorageList, err := common.GetCacheStagesStorageList(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
storageManager := manager.NewStorageManager(projectName, stagesStorage, finalStagesStorage, secondaryStagesStorageList, cacheStagesStorageList, storageLockManager)
logboek.Context(ctx).Info().LogOptionalLn()
imagesToProcess := build.NewImagesToProcess([]string{imageName}, false)
conveyorOptions, err := common.GetConveyorOptions(ctx, &commonCmdData, imagesToProcess)
if err != nil {
return err
}
conveyorWithRetry := build.NewConveyorWithRetryWrapper(werfConfig, giterminismManager, giterminismManager.ProjectDir(), projectTmpDir, ssh_agent.SSHAuthSock, containerBackend, storageManager, storageLockManager, conveyorOptions)
defer conveyorWithRetry.Terminate()
var dockerImageName string
if err := conveyorWithRetry.WithRetryBlock(ctx, func(c *build.Conveyor) error {
if common.GetRequireBuiltImages(ctx, &commonCmdData) {
if err := c.ShouldBeBuilt(ctx, build.ShouldBeBuiltOptions{}); err != nil {
return err
}
} else {
if err := c.Build(ctx, build.BuildOptions{SkipImageMetadataPublication: *commonCmdData.Dev}); err != nil {
return err
}
}
dockerImageName, err = c.GetFullImageName(ctx, imageName)
if err != nil {
return fmt.Errorf("unable to get full name for image %q: %w", imageName, err)
}
return nil
}); err != nil {
return err
}
var dockerRunArgs []string
dockerRunArgs = append(dockerRunArgs, cmdData.DockerOptions...)
dockerRunArgs = append(dockerRunArgs, dockerImageName)
dockerRunArgs = append(dockerRunArgs, cmdData.DockerCommand...)
if *commonCmdData.DryRun {
fmt.Printf("docker run %s\n", strings.Join(dockerRunArgs, " "))
return nil
} else {
return logboek.Streams().DoErrorWithoutProxyStreamDataFormatting(func() error {
return common.WithoutTerminationSignalsTrap(func() error {
return docker.CliRun_LiveOutput(ctx, dockerRunArgs...)
})
})
}
}
func safeDockerCliRmFunc(ctx context.Context, containerName string) error {
if exist, err := docker.ContainerExist(ctx, containerName); err != nil {
return fmt.Errorf("unable to check container %s existence: %w", containerName, err)
} else if exist {
logboek.Context(ctx).LogF("Removing container %s ...\n", containerName)
if err := docker.CliRm(ctx, "-f", containerName); err != nil {
return fmt.Errorf("unable to remove container %s: %w", containerName, err)
}
}
return nil
} | _, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
} | random_line_split |
lib.rs | //! A fast, extensible, command-line arguments parser.
//!
//! This library is very new, so expect regular breaking changes. If you find a
//! bug or lacking documentation, don't hesitate to open an
//! [issue](https://github.com/Aloso/parkour/issues) or a pull request.
//!
//! This crate started as an experiment, so I'm not sure yet if I want to
//! maintain it long-term. See [here](https://github.com/Aloso/parkour/issues/1)
//! for more.
//!
//! ## Getting started
//!
//! Parkour requires const generics. The first rust version that supports them
//! is Rust 1.51 (`rustc 1.51.0-beta.2`). You can install it with `rustup
//! default beta`.
//!
//! It's recommended to import the [prelude](./prelude/index.html):
//!
//! ```
//! use parkour::prelude::*;
//! ```
//!
//! First, create a struct containing all the data you want to parse. For
//! example:
//!
//! ```
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! struct Show {
//! pos1: String,
//! out: ColorSpace,
//! size: u8,
//! }
//!
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! `bool`, `u8` and `String` can all be parsed by default. To parse
//! `ColorSpace`, we have to implement the [`FromInputValue`] trait. This
//! easiest by using the derive macro:
//!
//! ```
//! # use parkour::prelude::*;
//! #[derive(FromInputValue)]
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! This parses the names of the enum variants case-insensitively. When an
//! invalid value is provided, the error message will say something like:
//!
//! ```text
//! unexpected value, got `foo`, expected rgb, cmy, cmyk, hsv, hsl or cielab
//! ```
//!
//! Now let's implement `Show` as a subcommand. Unfortunately, there's no
//! convenient derive macro (yet):
//!
//! ```
//! # use parkour::prelude::*;
//! # #[derive(FromInputValue)]
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! #
//! struct Show {
//! pos1: String,
//! color_space: ColorSpace,
//! size: u8,
//! }
//!
//! impl FromInput<'static> for Show {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! if input.parse_command("show") {
//! let mut pos1 = None;
//! let mut color_space = None;
//! let mut size = None;
//!
//! while !input.is_empty() {
//! if SetOnce(&mut color_space)
//! .apply(input, &Flag::LongShort("color-space", "c").into())? {
//! continue;
//! }
//!
//! if SetOnce(&mut size)
//! .apply(input, &Flag::LongShort("size", "s").into())? {
//! continue;
//! }
//!
//! if pos1.is_none()
//! && SetPositional(&mut pos1).apply(input, &"pos1".into())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//!
//! Ok(Show {
//! pos1: pos1.ok_or_else(|| parkour::Error::missing_argument("pos1"))?,
//! color_space: color_space
//! .ok_or_else(|| parkour::Error::missing_argument("--color-space"))?,
//! size: size.unwrap_or(4),
//! })
//! } else {
//! Err(parkour::Error::no_value())
//! }
//! }
//! }
//! ```
//!
//! To parse a subcommand, we implement the [`FromInput`] trait. We first check
//! if the next argument is the word `show`. If that's the case, we iterate over
//! the remaining input, until it is empty.
//!
//! In the subcommand, we expect two named arguments (`--color-space` and
//! `--size`) and a positional argument (`pos`). Therefore, in each iteration,
//! we first check if we can parse the named arguments, and then the positional
//! argument. If none of them succeeds and there is still input left, then
//! `input.expect_empty()?` throws an error.
//!
//! Producing the `Show` struct is rather straightforward (`pos` and
//! `--color-space` are required, `--size` defaults to `4`). However, parsing
//! the values involves some type system magic. `SetOnce` and `SetPositional`
//! are [actions], they check if the referenced types can be parsed, and if so,
//! assign the parsed value to the variable automatically. They also ensure that
//! each argument is parsed at most once.
//!
//! Whenever something is parsed, a _context_ is provided that can contain
//! information about _how_ the value should be parsed. In the above example,
//! `Flag::LongShort("color-space", "c").into()` is a context that instructs the
//! parser to parse the color space after the `--color-space` or the `-c` flag.
//!
//! The main command can be implemented similarly:
//!
//! ```
//! # use parkour::prelude::*;
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! # struct Show {
//! # pos1: String, | //! # impl FromInput<'static> for Show {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # todo!()
//! # }
//! # }
//! #
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! // discard the first argument, which is the path to the executable
//! input.bump_argument().unwrap();
//!
//! let mut show = None;
//! let mut color = None;
//!
//! while !input.is_empty() {
//! if SetOnce(&mut color).apply(input, &Flag::LongShort("color", "c").into())? {
//! continue;
//! }
//!
//! if SetSubcommand(&mut show).apply(input, &())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! This is pretty self-explanatory. Now let's proceed to the main function:
//!
//! ```
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! # impl FromInput<'static> for Command {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # Ok(Command { color: None, show: None })
//! # }
//! # }
//! #
//! use std::error::Error;
//!
//! fn main() {
//! match Command::from_input(&mut parkour::parser(), &()) {
//! Ok(command) => {
//! println!("parsed successfully");
//! }
//! Err(e) if e.is_early_exit() => {}
//! Err(e) => {
//! eprint!("{}", e);
//! let mut source = e.source();
//! while let Some(s) = source {
//! eprint!(": {}", s);
//! source = s.source();
//! }
//! eprintln!();
//! }
//! }
//! }
//! ```
//!
//! The [`parser`] function creates a new parser instance, which
//! implements [`Parse`]. This is used to parse the `Command`. If it fails, we
//! print the error with its sources. I will implement a more convenient method
//! for this, I just haven't gotten around to it yet. I also plan to implement
//! ANSI color support.
//!
//! What's with the `e.is_early_exit()`, you might wonder? This error is
//! returned when parsing was aborted and can be ignored. This error can be used
//! e.g. when the `--help` flag is encountered:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while !input.is_empty() {
//! if input.parse_long_flag("help") || input.parse_short_flag("h") {
//! println!("Usage:\n\
//! my-program [-h,--help]\n\
//! my-program show POS1 -c,--color-space VALUE [-s,--size N]");
//!
//! return Err(parkour::Error::early_exit());
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! There is one special case that isn't handled yet: The argument `--` usually
//! causes the remaining tokens to be treated as positional arguments, even if
//! they start with a dash. This is easily implemented:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while !input.is_empty() {
//! if input.parse_long_flag("") {
//! input.set_ignore_dashes(true);
//! continue;
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! Unfortunately, this must be repeated in every subcommand.
#![forbid(unsafe_code)]
#![warn(missing_docs)]
pub use error::{Error, ErrorInner};
pub use from_input::{FromInput, FromInputValue};
pub use parse::Parse;
pub use palex::ArgsInput;
#[cfg(feature = "derive")]
pub use parkour_derive::{FromInput, FromInputValue};
pub mod actions;
mod error;
mod from_input;
pub mod help;
pub mod impls;
mod parse;
pub mod util;
/// A parkour result.
pub type Result<T> = std::result::Result<T, Error>;
/// Create a new parser, which can be used to parse the
/// command-line arguments of the program.
pub fn parser() -> ArgsInput {
ArgsInput::from_args()
}
/// A prelude to make it easier to import all the needed types and traits. Use
/// it like this:
///
/// ```
/// use parkour::prelude::*;
/// ```
pub mod prelude {
pub use crate::actions::{
Action, Append, Dec, Inc, Reset, Set, SetOnce, SetPositional, SetSubcommand,
Unset,
};
pub use crate::impls::{ListCtx, NumberCtx, StringCtx};
pub use crate::util::{ArgCtx, Flag, PosCtx};
pub use crate::{ArgsInput, FromInput, FromInputValue, Parse};
} | //! # color_space: ColorSpace,
//! # size: u8,
//! # } | random_line_split |
lib.rs | //! A fast, extensible, command-line arguments parser.
//!
//! This library is very new, so expect regular breaking changes. If you find a
//! bug or lacking documentation, don't hesitate to open an
//! [issue](https://github.com/Aloso/parkour/issues) or a pull request.
//!
//! This crate started as an experiment, so I'm not sure yet if I want to
//! maintain it long-term. See [here](https://github.com/Aloso/parkour/issues/1)
//! for more.
//!
//! ## Getting started
//!
//! Parkour requires const generics. The first rust version that supports them
//! is Rust 1.51 (`rustc 1.51.0-beta.2`). You can install it with `rustup
//! default beta`.
//!
//! It's recommended to import the [prelude](./prelude/index.html):
//!
//! ```
//! use parkour::prelude::*;
//! ```
//!
//! First, create a struct containing all the data you want to parse. For
//! example:
//!
//! ```
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! struct Show {
//! pos1: String,
//! out: ColorSpace,
//! size: u8,
//! }
//!
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! `bool`, `u8` and `String` can all be parsed by default. To parse
//! `ColorSpace`, we have to implement the [`FromInputValue`] trait. This
//! easiest by using the derive macro:
//!
//! ```
//! # use parkour::prelude::*;
//! #[derive(FromInputValue)]
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! This parses the names of the enum variants case-insensitively. When an
//! invalid value is provided, the error message will say something like:
//!
//! ```text
//! unexpected value, got `foo`, expected rgb, cmy, cmyk, hsv, hsl or cielab
//! ```
//!
//! Now let's implement `Show` as a subcommand. Unfortunately, there's no
//! convenient derive macro (yet):
//!
//! ```
//! # use parkour::prelude::*;
//! # #[derive(FromInputValue)]
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! #
//! struct Show {
//! pos1: String,
//! color_space: ColorSpace,
//! size: u8,
//! }
//!
//! impl FromInput<'static> for Show {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! if input.parse_command("show") {
//! let mut pos1 = None;
//! let mut color_space = None;
//! let mut size = None;
//!
//! while !input.is_empty() {
//! if SetOnce(&mut color_space)
//! .apply(input, &Flag::LongShort("color-space", "c").into())? {
//! continue;
//! }
//!
//! if SetOnce(&mut size)
//! .apply(input, &Flag::LongShort("size", "s").into())? {
//! continue;
//! }
//!
//! if pos1.is_none()
//! && SetPositional(&mut pos1).apply(input, &"pos1".into())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//!
//! Ok(Show {
//! pos1: pos1.ok_or_else(|| parkour::Error::missing_argument("pos1"))?,
//! color_space: color_space
//! .ok_or_else(|| parkour::Error::missing_argument("--color-space"))?,
//! size: size.unwrap_or(4),
//! })
//! } else {
//! Err(parkour::Error::no_value())
//! }
//! }
//! }
//! ```
//!
//! To parse a subcommand, we implement the [`FromInput`] trait. We first check
//! if the next argument is the word `show`. If that's the case, we iterate over
//! the remaining input, until it is empty.
//!
//! In the subcommand, we expect two named arguments (`--color-space` and
//! `--size`) and a positional argument (`pos`). Therefore, in each iteration,
//! we first check if we can parse the named arguments, and then the positional
//! argument. If none of them succeeds and there is still input left, then
//! `input.expect_empty()?` throws an error.
//!
//! Producing the `Show` struct is rather straightforward (`pos` and
//! `--color-space` are required, `--size` defaults to `4`). However, parsing
//! the values involves some type system magic. `SetOnce` and `SetPositional`
//! are [actions], they check if the referenced types can be parsed, and if so,
//! assign the parsed value to the variable automatically. They also ensure that
//! each argument is parsed at most once.
//!
//! Whenever something is parsed, a _context_ is provided that can contain
//! information about _how_ the value should be parsed. In the above example,
//! `Flag::LongShort("color-space", "c").into()` is a context that instructs the
//! parser to parse the color space after the `--color-space` or the `-c` flag.
//!
//! The main command can be implemented similarly:
//!
//! ```
//! # use parkour::prelude::*;
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! # struct Show {
//! # pos1: String,
//! # color_space: ColorSpace,
//! # size: u8,
//! # }
//! # impl FromInput<'static> for Show {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # todo!()
//! # }
//! # }
//! #
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! // discard the first argument, which is the path to the executable
//! input.bump_argument().unwrap();
//!
//! let mut show = None;
//! let mut color = None;
//!
//! while !input.is_empty() {
//! if SetOnce(&mut color).apply(input, &Flag::LongShort("color", "c").into())? {
//! continue;
//! }
//!
//! if SetSubcommand(&mut show).apply(input, &())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! This is pretty self-explanatory. Now let's proceed to the main function:
//!
//! ```
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! # impl FromInput<'static> for Command {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # Ok(Command { color: None, show: None })
//! # }
//! # }
//! #
//! use std::error::Error;
//!
//! fn main() {
//! match Command::from_input(&mut parkour::parser(), &()) {
//! Ok(command) => {
//! println!("parsed successfully");
//! }
//! Err(e) if e.is_early_exit() => {}
//! Err(e) => {
//! eprint!("{}", e);
//! let mut source = e.source();
//! while let Some(s) = source {
//! eprint!(": {}", s);
//! source = s.source();
//! }
//! eprintln!();
//! }
//! }
//! }
//! ```
//!
//! The [`parser`] function creates a new parser instance, which
//! implements [`Parse`]. This is used to parse the `Command`. If it fails, we
//! print the error with its sources. I will implement a more convenient method
//! for this, I just haven't gotten around to it yet. I also plan to implement
//! ANSI color support.
//!
//! What's with the `e.is_early_exit()`, you might wonder? This error is
//! returned when parsing was aborted and can be ignored. This error can be used
//! e.g. when the `--help` flag is encountered:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while !input.is_empty() {
//! if input.parse_long_flag("help") || input.parse_short_flag("h") {
//! println!("Usage:\n\
//! my-program [-h,--help]\n\
//! my-program show POS1 -c,--color-space VALUE [-s,--size N]");
//!
//! return Err(parkour::Error::early_exit());
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! There is one special case that isn't handled yet: The argument `--` usually
//! causes the remaining tokens to be treated as positional arguments, even if
//! they start with a dash. This is easily implemented:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while !input.is_empty() {
//! if input.parse_long_flag("") {
//! input.set_ignore_dashes(true);
//! continue;
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! Unfortunately, this must be repeated in every subcommand.
#![forbid(unsafe_code)]
#![warn(missing_docs)]
pub use error::{Error, ErrorInner};
pub use from_input::{FromInput, FromInputValue};
pub use parse::Parse;
pub use palex::ArgsInput;
#[cfg(feature = "derive")]
pub use parkour_derive::{FromInput, FromInputValue};
pub mod actions;
mod error;
mod from_input;
pub mod help;
pub mod impls;
mod parse;
pub mod util;
/// A parkour result.
pub type Result<T> = std::result::Result<T, Error>;
/// Create a new parser, which can be used to parse the
/// command-line arguments of the program.
pub fn | () -> ArgsInput {
ArgsInput::from_args()
}
/// A prelude to make it easier to import all the needed types and traits. Use
/// it like this:
///
/// ```
/// use parkour::prelude::*;
/// ```
pub mod prelude {
pub use crate::actions::{
Action, Append, Dec, Inc, Reset, Set, SetOnce, SetPositional, SetSubcommand,
Unset,
};
pub use crate::impls::{ListCtx, NumberCtx, StringCtx};
pub use crate::util::{ArgCtx, Flag, PosCtx};
pub use crate::{ArgsInput, FromInput, FromInputValue, Parse};
}
| parser | identifier_name |
lib.rs | //! A fast, extensible, command-line arguments parser.
//!
//! This library is very new, so expect regular breaking changes. If you find a
//! bug or lacking documentation, don't hesitate to open an
//! [issue](https://github.com/Aloso/parkour/issues) or a pull request.
//!
//! This crate started as an experiment, so I'm not sure yet if I want to
//! maintain it long-term. See [here](https://github.com/Aloso/parkour/issues/1)
//! for more.
//!
//! ## Getting started
//!
//! Parkour requires const generics. The first rust version that supports them
//! is Rust 1.51 (`rustc 1.51.0-beta.2`). You can install it with `rustup
//! default beta`.
//!
//! It's recommended to import the [prelude](./prelude/index.html):
//!
//! ```
//! use parkour::prelude::*;
//! ```
//!
//! First, create a struct containing all the data you want to parse. For
//! example:
//!
//! ```
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! struct Show {
//! pos1: String,
//! out: ColorSpace,
//! size: u8,
//! }
//!
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! `bool`, `u8` and `String` can all be parsed by default. To parse
//! `ColorSpace`, we have to implement the [`FromInputValue`] trait. This
//! easiest by using the derive macro:
//!
//! ```
//! # use parkour::prelude::*;
//! #[derive(FromInputValue)]
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! This parses the names of the enum variants case-insensitively. When an
//! invalid value is provided, the error message will say something like:
//!
//! ```text
//! unexpected value, got `foo`, expected rgb, cmy, cmyk, hsv, hsl or cielab
//! ```
//!
//! Now let's implement `Show` as a subcommand. Unfortunately, there's no
//! convenient derive macro (yet):
//!
//! ```
//! # use parkour::prelude::*;
//! # #[derive(FromInputValue)]
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! #
//! struct Show {
//! pos1: String,
//! color_space: ColorSpace,
//! size: u8,
//! }
//!
//! impl FromInput<'static> for Show {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! if input.parse_command("show") {
//! let mut pos1 = None;
//! let mut color_space = None;
//! let mut size = None;
//!
//! while !input.is_empty() {
//! if SetOnce(&mut color_space)
//! .apply(input, &Flag::LongShort("color-space", "c").into())? {
//! continue;
//! }
//!
//! if SetOnce(&mut size)
//! .apply(input, &Flag::LongShort("size", "s").into())? {
//! continue;
//! }
//!
//! if pos1.is_none()
//! && SetPositional(&mut pos1).apply(input, &"pos1".into())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//!
//! Ok(Show {
//! pos1: pos1.ok_or_else(|| parkour::Error::missing_argument("pos1"))?,
//! color_space: color_space
//! .ok_or_else(|| parkour::Error::missing_argument("--color-space"))?,
//! size: size.unwrap_or(4),
//! })
//! } else {
//! Err(parkour::Error::no_value())
//! }
//! }
//! }
//! ```
//!
//! To parse a subcommand, we implement the [`FromInput`] trait. We first check
//! if the next argument is the word `show`. If that's the case, we iterate over
//! the remaining input, until it is empty.
//!
//! In the subcommand, we expect two named arguments (`--color-space` and
//! `--size`) and a positional argument (`pos`). Therefore, in each iteration,
//! we first check if we can parse the named arguments, and then the positional
//! argument. If none of them succeeds and there is still input left, then
//! `input.expect_empty()?` throws an error.
//!
//! Producing the `Show` struct is rather straightforward (`pos` and
//! `--color-space` are required, `--size` defaults to `4`). However, parsing
//! the values involves some type system magic. `SetOnce` and `SetPositional`
//! are [actions], they check if the referenced types can be parsed, and if so,
//! assign the parsed value to the variable automatically. They also ensure that
//! each argument is parsed at most once.
//!
//! Whenever something is parsed, a _context_ is provided that can contain
//! information about _how_ the value should be parsed. In the above example,
//! `Flag::LongShort("color-space", "c").into()` is a context that instructs the
//! parser to parse the color space after the `--color-space` or the `-c` flag.
//!
//! The main command can be implemented similarly:
//!
//! ```
//! # use parkour::prelude::*;
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! # struct Show {
//! # pos1: String,
//! # color_space: ColorSpace,
//! # size: u8,
//! # }
//! # impl FromInput<'static> for Show {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # todo!()
//! # }
//! # }
//! #
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! // discard the first argument, which is the path to the executable
//! input.bump_argument().unwrap();
//!
//! let mut show = None;
//! let mut color = None;
//!
//! while !input.is_empty() {
//! if SetOnce(&mut color).apply(input, &Flag::LongShort("color", "c").into())? {
//! continue;
//! }
//!
//! if SetSubcommand(&mut show).apply(input, &())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! This is pretty self-explanatory. Now let's proceed to the main function:
//!
//! ```
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! # impl FromInput<'static> for Command {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # Ok(Command { color: None, show: None })
//! # }
//! # }
//! #
//! use std::error::Error;
//!
//! fn main() {
//! match Command::from_input(&mut parkour::parser(), &()) {
//! Ok(command) => {
//! println!("parsed successfully");
//! }
//! Err(e) if e.is_early_exit() => {}
//! Err(e) => {
//! eprint!("{}", e);
//! let mut source = e.source();
//! while let Some(s) = source {
//! eprint!(": {}", s);
//! source = s.source();
//! }
//! eprintln!();
//! }
//! }
//! }
//! ```
//!
//! The [`parser`] function creates a new parser instance, which
//! implements [`Parse`]. This is used to parse the `Command`. If it fails, we
//! print the error with its sources. I will implement a more convenient method
//! for this, I just haven't gotten around to it yet. I also plan to implement
//! ANSI color support.
//!
//! What's with the `e.is_early_exit()`, you might wonder? This error is
//! returned when parsing was aborted and can be ignored. This error can be used
//! e.g. when the `--help` flag is encountered:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while !input.is_empty() {
//! if input.parse_long_flag("help") || input.parse_short_flag("h") {
//! println!("Usage:\n\
//! my-program [-h,--help]\n\
//! my-program show POS1 -c,--color-space VALUE [-s,--size N]");
//!
//! return Err(parkour::Error::early_exit());
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! There is one special case that isn't handled yet: The argument `--` usually
//! causes the remaining tokens to be treated as positional arguments, even if
//! they start with a dash. This is easily implemented:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while !input.is_empty() {
//! if input.parse_long_flag("") {
//! input.set_ignore_dashes(true);
//! continue;
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! Unfortunately, this must be repeated in every subcommand.
#![forbid(unsafe_code)]
#![warn(missing_docs)]
pub use error::{Error, ErrorInner};
pub use from_input::{FromInput, FromInputValue};
pub use parse::Parse;
pub use palex::ArgsInput;
#[cfg(feature = "derive")]
pub use parkour_derive::{FromInput, FromInputValue};
pub mod actions;
mod error;
mod from_input;
pub mod help;
pub mod impls;
mod parse;
pub mod util;
/// A parkour result.
pub type Result<T> = std::result::Result<T, Error>;
/// Create a new parser, which can be used to parse the
/// command-line arguments of the program.
pub fn parser() -> ArgsInput |
/// A prelude to make it easier to import all the needed types and traits. Use
/// it like this:
///
/// ```
/// use parkour::prelude::*;
/// ```
pub mod prelude {
pub use crate::actions::{
Action, Append, Dec, Inc, Reset, Set, SetOnce, SetPositional, SetSubcommand,
Unset,
};
pub use crate::impls::{ListCtx, NumberCtx, StringCtx};
pub use crate::util::{ArgCtx, Flag, PosCtx};
pub use crate::{ArgsInput, FromInput, FromInputValue, Parse};
}
| {
ArgsInput::from_args()
} | identifier_body |
wikipedia.py | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2017, Gokturk Gok & Nurefsan Sarikaya.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Wikipedia search support to the auto-tagger. Requires the
BeautifulSoup library.
"""
from __future__ import division, absolute_import, print_function
from beets.autotag.hooks import Distance
from beets.plugins import BeetsPlugin
from requests.exceptions import ConnectionError
import time
import urllib.request
from bs4 import BeautifulSoup
info_boxList = ['Released', 'Genre', 'Length', 'Label']
# -----------------------------------------------------------------------
# is Info Box check
def is_in_info(liste):
for i in info_boxList:
if(len(liste) != 0 and liste[0] == i):
return True
return False
def get_track_length(duration):
"""
Returns the track length in seconds for a wiki duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
# ----------------------------------------------------------------------
"""
WikiAlbum class is being served like AlbumInfo object which keeps all album
meta data in itself.
"""
class WikiAlbum(object):
def __init__(self, artist, album_name):
self.album = album_name
self.artist = artist
self.tracks = []
self.album_length = ""
self.label = None
self.year = None
self.data_source = "Wikipedia"
self.data_url = ""
self.album_id = 1
self.va = False
self.artist_id = 1
self.asin = None
self.albumtype = None
self.year = None
self.month = None
self.day = None
self.mediums = 1
self.artist_sort = None
self.releasegroup_id = None
self.catalognum = None
self.script = None
self.language = None
self.country = None
self.albumstatus = None
self.media = None
self.albumdisambig = None
self.artist_credit = None
self.original_year = None
self.original_month = None
self.original_day = None
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
# in case of album name has (Deluxe) extension
url = 'https://en.wikipedia.org/wiki/' +\
album_name[:-9] + '_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
raise urllib.error.HTTPError
except ConnectionError:
raise ConnectionError
self.data_url = url
soup = BeautifulSoup(html, "lxml")
# ------------------ INFOBOX PARSING ----------------------#
info_box = soup.findAll("table", {"class": "infobox"})
info_counter = 1
for info in info_box:
for row in info.findAll("tr"):
if (self.artist == "" and info_counter == 3):
self.artist = row.getText().split()[-1]
data = (row.getText()).split('\n')
data = list(filter(None, data))
if (is_in_info(data)):
if(data[0] == 'Label'):
self.label = str(data[1:])
elif(data[0] == 'Released'):
if (data[1][-1] == ")"):
self.year = int(data[1][-11:-7])
self.month = int(data[1][-6:-4])
self.day = int(data[1][-3:-1])
else:
self.year = int(data[1][-4:])
# Album length which is converted into beets length format
elif(data[0] == "Length"):
self.album_length = get_track_length(data[1])
# getting Genre
elif(data[0] == "Genre"):
fixed_genre = ""
for character in data[1]:
if (character != ("[" or "{" or "(")):
fixed_genre += character
else:
break
self.genre = fixed_genre
info_counter += 1
track_tables = soup.findAll("table", {"class": "tracklist"})
# set the MediumTotal,total number of tracks in an album is required
track_counter = 0
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct
# table data & checks track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
track_counter += 1
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct table
# data and checking track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
one_track = Track(row_data)
one_track.set_data_url(self.data_url)
one_track.set_medium_total(track_counter)
self.tracks.append(one_track)
def get_album_len(self):
return self.album_length
def get_tracks(self):
return self.tracks
# keeps the metadata of tracks which are gathered from wikipedia
# like TrackInfo object in beets
class Track(object):
def __init__(self, row):
#####
self.medium = 1
self.disctitle = "CD"
#####
self.medium_index = int(row[0][:-1])
self.track_id = int(row[0][:-1])
self.index = int(row[0][:-1])
# wiping out the character (") from track name
temp_name = ""
for i in row[1]:
|
self.title = str(temp_name)
self.writer = list(row[2].split(','))
self.producers = row[3:-1]
self.length = get_track_length(row[-1])
self.artist = None
self.artist_id = None
self.media = None
self.medium_total = None
self.artist_sort = None
self.artist_credit = None
self.data_source = "Wikipedia"
self.data_url = None
self.lyricist = None
self.composer = None
self.composer_sort = None
self.arranger = None
self.track_alt = None
self.track = self.index
self.disc = self.medium
self.disctotal = 2
self.mb_trackid = self.track_id
self.mb_albumid = None
self.mb_album_artistid = None
self.mb_artist_id = None
self.mb_releasegroupid = None
self.comp = 0
self.tracktotal = None
self.albumartist_sort = None
self.albumartist_credit = None
def set_medium_total(self, num):
self.medium_total = num
self.track_total = num
def set_data_url(self, url):
self.data_url = url
def get_name(self):
return self.title
def get_writer(self):
return self.writer
def get_producers(self):
return self.producers
def get_length(self):
return self.length
class Wikipedia(BeetsPlugin):
def __init__(self):
super(Wikipedia, self).__init__()
self.config.add({
'source_weight': 0.50
})
# ----------------------------------------------
""" Track_distance
item --> track to be matched(Item Object)
info is the TrackInfo object that proposed as a match
should return a (dist,dist_max) pair of floats indicating the distance
"""
def track_distance(self, item, info):
dist = Distance()
return dist
# ----------------------------------------------
"""
album_info --> AlbumInfo Object reflecting the album to be compared.
items --> sequence of all Item objects that will be matched
mapping --> dictionary mapping Items to TrackInfo objects
"""
def album_distance(self, items, album_info, mapping):
"""
Returns the album distance.
"""
dist = Distance()
if (album_info.data_source == 'Wikipedia'):
dist.add('source', self.config['source_weight'].as_number())
return dist
# ----------------------------------------------
def candidates(self, items, artist, album, va_likely):
"""Returns a list of AlbumInfo objects for Wikipedia search results
matching an album and artist (if not various).
"""
candidate_list = []
candidate_list.append(WikiAlbum(artist, album))
return candidate_list
| if(i != '"'):
temp_name += i | conditional_block |
wikipedia.py | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2017, Gokturk Gok & Nurefsan Sarikaya.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Wikipedia search support to the auto-tagger. Requires the
BeautifulSoup library.
"""
from __future__ import division, absolute_import, print_function
from beets.autotag.hooks import Distance
from beets.plugins import BeetsPlugin
from requests.exceptions import ConnectionError
import time
import urllib.request
from bs4 import BeautifulSoup
info_boxList = ['Released', 'Genre', 'Length', 'Label']
# -----------------------------------------------------------------------
# is Info Box check
def is_in_info(liste):
for i in info_boxList:
if(len(liste) != 0 and liste[0] == i):
return True
return False
def get_track_length(duration):
"""
Returns the track length in seconds for a wiki duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
# ----------------------------------------------------------------------
"""
WikiAlbum class is being served like AlbumInfo object which keeps all album
meta data in itself.
"""
class WikiAlbum(object):
def __init__(self, artist, album_name):
self.album = album_name
self.artist = artist
self.tracks = []
self.album_length = ""
self.label = None
self.year = None
self.data_source = "Wikipedia"
self.data_url = ""
self.album_id = 1
self.va = False
self.artist_id = 1
self.asin = None
self.albumtype = None
self.year = None
self.month = None
self.day = None
self.mediums = 1
self.artist_sort = None
self.releasegroup_id = None
self.catalognum = None
self.script = None
self.language = None
self.country = None
self.albumstatus = None
self.media = None
self.albumdisambig = None
self.artist_credit = None
self.original_year = None
self.original_month = None
self.original_day = None
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
# in case of album name has (Deluxe) extension
url = 'https://en.wikipedia.org/wiki/' +\
album_name[:-9] + '_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
raise urllib.error.HTTPError
except ConnectionError:
raise ConnectionError
self.data_url = url
soup = BeautifulSoup(html, "lxml")
# ------------------ INFOBOX PARSING ----------------------#
info_box = soup.findAll("table", {"class": "infobox"})
info_counter = 1
for info in info_box:
for row in info.findAll("tr"):
if (self.artist == "" and info_counter == 3):
self.artist = row.getText().split()[-1]
data = (row.getText()).split('\n')
data = list(filter(None, data))
if (is_in_info(data)):
if(data[0] == 'Label'):
self.label = str(data[1:])
elif(data[0] == 'Released'):
if (data[1][-1] == ")"):
self.year = int(data[1][-11:-7])
self.month = int(data[1][-6:-4])
self.day = int(data[1][-3:-1])
else:
self.year = int(data[1][-4:])
# Album length which is converted into beets length format
elif(data[0] == "Length"):
self.album_length = get_track_length(data[1])
# getting Genre
elif(data[0] == "Genre"):
fixed_genre = ""
for character in data[1]:
if (character != ("[" or "{" or "(")):
fixed_genre += character
else:
break
self.genre = fixed_genre
info_counter += 1
track_tables = soup.findAll("table", {"class": "tracklist"})
# set the MediumTotal,total number of tracks in an album is required
track_counter = 0
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct
# table data & checks track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
track_counter += 1
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct table
# data and checking track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
one_track = Track(row_data)
one_track.set_data_url(self.data_url)
one_track.set_medium_total(track_counter)
self.tracks.append(one_track)
def get_album_len(self):
return self.album_length
def get_tracks(self):
return self.tracks
# keeps the metadata of tracks which are gathered from wikipedia
# like TrackInfo object in beets
class Track(object):
def __init__(self, row):
#####
self.medium = 1
self.disctitle = "CD"
#####
self.medium_index = int(row[0][:-1])
self.track_id = int(row[0][:-1])
self.index = int(row[0][:-1])
# wiping out the character (") from track name
temp_name = ""
for i in row[1]:
if(i != '"'):
temp_name += i
self.title = str(temp_name)
self.writer = list(row[2].split(','))
self.producers = row[3:-1]
self.length = get_track_length(row[-1])
self.artist = None
self.artist_id = None
self.media = None
self.medium_total = None
self.artist_sort = None
self.artist_credit = None
self.data_source = "Wikipedia"
self.data_url = None
self.lyricist = None
self.composer = None
self.composer_sort = None
self.arranger = None
self.track_alt = None
self.track = self.index
self.disc = self.medium
self.disctotal = 2
self.mb_trackid = self.track_id
self.mb_albumid = None
self.mb_album_artistid = None
self.mb_artist_id = None
self.mb_releasegroupid = None
self.comp = 0
self.tracktotal = None
self.albumartist_sort = None
self.albumartist_credit = None
def set_medium_total(self, num):
self.medium_total = num
self.track_total = num
def set_data_url(self, url):
self.data_url = url
def get_name(self):
return self.title
def get_writer(self):
return self.writer
def get_producers(self):
return self.producers
def get_length(self):
return self.length
class Wikipedia(BeetsPlugin):
def | (self):
super(Wikipedia, self).__init__()
self.config.add({
'source_weight': 0.50
})
# ----------------------------------------------
""" Track_distance
item --> track to be matched(Item Object)
info is the TrackInfo object that proposed as a match
should return a (dist,dist_max) pair of floats indicating the distance
"""
def track_distance(self, item, info):
dist = Distance()
return dist
# ----------------------------------------------
"""
album_info --> AlbumInfo Object reflecting the album to be compared.
items --> sequence of all Item objects that will be matched
mapping --> dictionary mapping Items to TrackInfo objects
"""
def album_distance(self, items, album_info, mapping):
"""
Returns the album distance.
"""
dist = Distance()
if (album_info.data_source == 'Wikipedia'):
dist.add('source', self.config['source_weight'].as_number())
return dist
# ----------------------------------------------
def candidates(self, items, artist, album, va_likely):
"""Returns a list of AlbumInfo objects for Wikipedia search results
matching an album and artist (if not various).
"""
candidate_list = []
candidate_list.append(WikiAlbum(artist, album))
return candidate_list
| __init__ | identifier_name |
wikipedia.py | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2017, Gokturk Gok & Nurefsan Sarikaya.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Wikipedia search support to the auto-tagger. Requires the
BeautifulSoup library.
"""
from __future__ import division, absolute_import, print_function
from beets.autotag.hooks import Distance
from beets.plugins import BeetsPlugin
from requests.exceptions import ConnectionError
import time
import urllib.request
from bs4 import BeautifulSoup
info_boxList = ['Released', 'Genre', 'Length', 'Label']
# -----------------------------------------------------------------------
# is Info Box check
def is_in_info(liste):
for i in info_boxList:
if(len(liste) != 0 and liste[0] == i):
return True
return False
def get_track_length(duration):
"""
Returns the track length in seconds for a wiki duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
# ----------------------------------------------------------------------
"""
WikiAlbum class is being served like AlbumInfo object which keeps all album
meta data in itself.
"""
class WikiAlbum(object):
def __init__(self, artist, album_name):
self.album = album_name
self.artist = artist
self.tracks = []
self.album_length = ""
self.label = None
self.year = None
self.data_source = "Wikipedia"
self.data_url = ""
self.album_id = 1
self.va = False
self.artist_id = 1
self.asin = None
self.albumtype = None
self.year = None
self.month = None
self.day = None
self.mediums = 1
self.artist_sort = None
self.releasegroup_id = None
self.catalognum = None
self.script = None
self.language = None
self.country = None
self.albumstatus = None
self.media = None
self.albumdisambig = None
self.artist_credit = None
self.original_year = None
self.original_month = None
self.original_day = None
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
# in case of album name has (Deluxe) extension
url = 'https://en.wikipedia.org/wiki/' +\
album_name[:-9] + '_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
raise urllib.error.HTTPError
except ConnectionError:
raise ConnectionError
self.data_url = url
soup = BeautifulSoup(html, "lxml")
# ------------------ INFOBOX PARSING ----------------------#
info_box = soup.findAll("table", {"class": "infobox"})
info_counter = 1
for info in info_box:
for row in info.findAll("tr"):
if (self.artist == "" and info_counter == 3):
self.artist = row.getText().split()[-1]
data = (row.getText()).split('\n')
data = list(filter(None, data))
if (is_in_info(data)):
if(data[0] == 'Label'):
self.label = str(data[1:])
elif(data[0] == 'Released'):
if (data[1][-1] == ")"):
self.year = int(data[1][-11:-7])
self.month = int(data[1][-6:-4])
self.day = int(data[1][-3:-1])
else:
self.year = int(data[1][-4:])
# Album length which is converted into beets length format
elif(data[0] == "Length"):
self.album_length = get_track_length(data[1])
# getting Genre
elif(data[0] == "Genre"):
fixed_genre = ""
for character in data[1]:
if (character != ("[" or "{" or "(")):
fixed_genre += character
else:
break
self.genre = fixed_genre
info_counter += 1
track_tables = soup.findAll("table", {"class": "tracklist"})
# set the MediumTotal,total number of tracks in an album is required
track_counter = 0
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct
# table data & checks track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
track_counter += 1
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct table
# data and checking track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
one_track = Track(row_data)
one_track.set_data_url(self.data_url)
one_track.set_medium_total(track_counter)
self.tracks.append(one_track)
def get_album_len(self):
return self.album_length
def get_tracks(self):
return self.tracks
# keeps the metadata of tracks which are gathered from wikipedia
# like TrackInfo object in beets
class Track(object):
def __init__(self, row):
#####
self.medium = 1
self.disctitle = "CD"
#####
self.medium_index = int(row[0][:-1])
self.track_id = int(row[0][:-1])
self.index = int(row[0][:-1])
# wiping out the character (") from track name
temp_name = ""
for i in row[1]:
if(i != '"'):
temp_name += i
self.title = str(temp_name)
self.writer = list(row[2].split(','))
self.producers = row[3:-1]
self.length = get_track_length(row[-1])
self.artist = None
self.artist_id = None
self.media = None
self.medium_total = None
self.artist_sort = None
self.artist_credit = None
self.data_source = "Wikipedia"
self.data_url = None
self.lyricist = None
self.composer = None
self.composer_sort = None
self.arranger = None
self.track_alt = None
self.track = self.index
self.disc = self.medium
self.disctotal = 2
self.mb_trackid = self.track_id
self.mb_albumid = None
self.mb_album_artistid = None
self.mb_artist_id = None
self.mb_releasegroupid = None
self.comp = 0
self.tracktotal = None
self.albumartist_sort = None
self.albumartist_credit = None
def set_medium_total(self, num):
self.medium_total = num
self.track_total = num
def set_data_url(self, url):
self.data_url = url
def get_name(self):
return self.title
def get_writer(self):
return self.writer
def get_producers(self):
return self.producers
def get_length(self):
return self.length
class Wikipedia(BeetsPlugin):
def __init__(self):
super(Wikipedia, self).__init__()
self.config.add({
'source_weight': 0.50
})
# ----------------------------------------------
""" Track_distance
item --> track to be matched(Item Object)
info is the TrackInfo object that proposed as a match
should return a (dist,dist_max) pair of floats indicating the distance
"""
def track_distance(self, item, info):
dist = Distance()
return dist
# ----------------------------------------------
"""
album_info --> AlbumInfo Object reflecting the album to be compared.
items --> sequence of all Item objects that will be matched
mapping --> dictionary mapping Items to TrackInfo objects
"""
def album_distance(self, items, album_info, mapping): | Returns the album distance.
"""
dist = Distance()
if (album_info.data_source == 'Wikipedia'):
dist.add('source', self.config['source_weight'].as_number())
return dist
# ----------------------------------------------
def candidates(self, items, artist, album, va_likely):
"""Returns a list of AlbumInfo objects for Wikipedia search results
matching an album and artist (if not various).
"""
candidate_list = []
candidate_list.append(WikiAlbum(artist, album))
return candidate_list | """ | random_line_split |
wikipedia.py | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2017, Gokturk Gok & Nurefsan Sarikaya.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Wikipedia search support to the auto-tagger. Requires the
BeautifulSoup library.
"""
from __future__ import division, absolute_import, print_function
from beets.autotag.hooks import Distance
from beets.plugins import BeetsPlugin
from requests.exceptions import ConnectionError
import time
import urllib.request
from bs4 import BeautifulSoup
info_boxList = ['Released', 'Genre', 'Length', 'Label']
# -----------------------------------------------------------------------
# is Info Box check
def is_in_info(liste):
for i in info_boxList:
if(len(liste) != 0 and liste[0] == i):
return True
return False
def get_track_length(duration):
"""
Returns the track length in seconds for a wiki duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
# ----------------------------------------------------------------------
"""
WikiAlbum class is being served like AlbumInfo object which keeps all album
meta data in itself.
"""
class WikiAlbum(object):
def __init__(self, artist, album_name):
self.album = album_name
self.artist = artist
self.tracks = []
self.album_length = ""
self.label = None
self.year = None
self.data_source = "Wikipedia"
self.data_url = ""
self.album_id = 1
self.va = False
self.artist_id = 1
self.asin = None
self.albumtype = None
self.year = None
self.month = None
self.day = None
self.mediums = 1
self.artist_sort = None
self.releasegroup_id = None
self.catalognum = None
self.script = None
self.language = None
self.country = None
self.albumstatus = None
self.media = None
self.albumdisambig = None
self.artist_credit = None
self.original_year = None
self.original_month = None
self.original_day = None
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
# in case of album name has (Deluxe) extension
url = 'https://en.wikipedia.org/wiki/' +\
album_name[:-9] + '_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
raise urllib.error.HTTPError
except ConnectionError:
raise ConnectionError
self.data_url = url
soup = BeautifulSoup(html, "lxml")
# ------------------ INFOBOX PARSING ----------------------#
info_box = soup.findAll("table", {"class": "infobox"})
info_counter = 1
for info in info_box:
for row in info.findAll("tr"):
if (self.artist == "" and info_counter == 3):
self.artist = row.getText().split()[-1]
data = (row.getText()).split('\n')
data = list(filter(None, data))
if (is_in_info(data)):
if(data[0] == 'Label'):
self.label = str(data[1:])
elif(data[0] == 'Released'):
if (data[1][-1] == ")"):
self.year = int(data[1][-11:-7])
self.month = int(data[1][-6:-4])
self.day = int(data[1][-3:-1])
else:
self.year = int(data[1][-4:])
# Album length which is converted into beets length format
elif(data[0] == "Length"):
self.album_length = get_track_length(data[1])
# getting Genre
elif(data[0] == "Genre"):
fixed_genre = ""
for character in data[1]:
if (character != ("[" or "{" or "(")):
fixed_genre += character
else:
break
self.genre = fixed_genre
info_counter += 1
track_tables = soup.findAll("table", {"class": "tracklist"})
# set the MediumTotal,total number of tracks in an album is required
track_counter = 0
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct
# table data & checks track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
track_counter += 1
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct table
# data and checking track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
one_track = Track(row_data)
one_track.set_data_url(self.data_url)
one_track.set_medium_total(track_counter)
self.tracks.append(one_track)
def get_album_len(self):
return self.album_length
def get_tracks(self):
return self.tracks
# keeps the metadata of tracks which are gathered from wikipedia
# like TrackInfo object in beets
class Track(object):
def __init__(self, row):
#####
self.medium = 1
self.disctitle = "CD"
#####
self.medium_index = int(row[0][:-1])
self.track_id = int(row[0][:-1])
self.index = int(row[0][:-1])
# wiping out the character (") from track name
temp_name = ""
for i in row[1]:
if(i != '"'):
temp_name += i
self.title = str(temp_name)
self.writer = list(row[2].split(','))
self.producers = row[3:-1]
self.length = get_track_length(row[-1])
self.artist = None
self.artist_id = None
self.media = None
self.medium_total = None
self.artist_sort = None
self.artist_credit = None
self.data_source = "Wikipedia"
self.data_url = None
self.lyricist = None
self.composer = None
self.composer_sort = None
self.arranger = None
self.track_alt = None
self.track = self.index
self.disc = self.medium
self.disctotal = 2
self.mb_trackid = self.track_id
self.mb_albumid = None
self.mb_album_artistid = None
self.mb_artist_id = None
self.mb_releasegroupid = None
self.comp = 0
self.tracktotal = None
self.albumartist_sort = None
self.albumartist_credit = None
def set_medium_total(self, num):
self.medium_total = num
self.track_total = num
def set_data_url(self, url):
self.data_url = url
def get_name(self):
return self.title
def get_writer(self):
|
def get_producers(self):
return self.producers
def get_length(self):
return self.length
class Wikipedia(BeetsPlugin):
def __init__(self):
super(Wikipedia, self).__init__()
self.config.add({
'source_weight': 0.50
})
# ----------------------------------------------
""" Track_distance
item --> track to be matched(Item Object)
info is the TrackInfo object that proposed as a match
should return a (dist,dist_max) pair of floats indicating the distance
"""
def track_distance(self, item, info):
dist = Distance()
return dist
# ----------------------------------------------
"""
album_info --> AlbumInfo Object reflecting the album to be compared.
items --> sequence of all Item objects that will be matched
mapping --> dictionary mapping Items to TrackInfo objects
"""
def album_distance(self, items, album_info, mapping):
"""
Returns the album distance.
"""
dist = Distance()
if (album_info.data_source == 'Wikipedia'):
dist.add('source', self.config['source_weight'].as_number())
return dist
# ----------------------------------------------
def candidates(self, items, artist, album, va_likely):
"""Returns a list of AlbumInfo objects for Wikipedia search results
matching an album and artist (if not various).
"""
candidate_list = []
candidate_list.append(WikiAlbum(artist, album))
return candidate_list
| return self.writer | identifier_body |
import-docs.py | #!/usr/bin/python
import sys
import re
import json
from operator import itemgetter
from datetime import datetime, date, timedelta
import calendar
from random import *
import csv
in_file = "/data/mid/2011_gs_supplemented.txt"
#in_file = "/data/mid/2011_sorted_pos4.txt"
#gs_file = "/data/mid/last9months2011.xlsx"
gs_file = "/data/mid/gs_master_plus_corrected.xlsx"
action_file = "/data/mid/actions.csv"
nation_file = "/data/mid/nations.csv"
iso_file = "/data/mid/ISO2.csv"
#in_file = "2011_sorted_pos4.txt"
fileencoding = "windows-1252"
mid_action_names = [
'No militarized action',
'Threat to use force',
'Threat to use force',
'Show of force',
'Alert',
'Mobilization',
'Border fortification',
'Border violation',
'Blockade',
'Occupation of territory',
'Seizure of material or personnel',
'Attack',
'Clash',
]
mid_action_types = [
'NO_MILITARIZED_ACTION',
'THREAT_TO_USE_FORCE',
'THREAT_TO_DECLARE_WAR',
'SHOW_OF_FORCE',
'ALERT',
'MOBILIZATION',
'FORTIFY_BORDER',
'BORDER_VIOLATION',
'BLOCKADE',
'OCCUPATION_OF_TERRITORY',
'SEIZURE',
'ATTACK',
'CLASH',
]
mid_action_gold = [
'no militarized action (0)',
'threat to use force (1)',
'threat to declare war (4)',
'show of force (7)',
'alert (8)',
'mobilization (10)',
'fortify border (11)',
'border violation (12)',
'blockade (13)',
'occupation of territory (14)',
'seizure (15)',
'attack (16)',
'clash (17)',
]
# combine the lists to create dictionaries
mid_dict = dict(zip(mid_action_types, mid_action_names))
gold_dict = dict(zip(mid_action_gold, mid_action_names))
# minimum threshold of classification confidence
# to classify as MID
MID_Threshold = 0.5
# read ISO codes for each nation
isoDict = {}
nationDict = {}
with open(iso_file) as csvfile:
isoreader = csv.reader(csvfile,delimiter=',')
for row in isoreader:
key = row[0].lower().strip()
isoDict[key] = row[1]
nationDict[key] = row[2]
# read action similarities from csv
actDict = {}
with open(action_file) as csvfile:
actreader = csv.reader(csvfile,delimiter=',')
for row in actreader:
key = row[0].lower()
actDict[key] = []
for item in row[1:]:
actDict[key] = actDict[key] + [item.lower()]
# read nation similarities from csv
natDict = {}
with open(nation_file) as csvfile:
natreader = csv.reader(csvfile,delimiter=',')
for row in natreader:
try:
rowNme,rowCOW = row[0].split('(')
except:
rowNme = row[0]
key = rowNme.strip()
#key = rowTrm.upper()
natDict[key] = []
for item in row[1:]:
try:
rowNme,rowCOW = item.split('(')
except:
rowNme = item
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
natDict[key] = natDict[key] + [rowTrm]
# read gold standard classification from Excel
import xlrd
with xlrd.open_workbook(gs_file) as book:
# 0 corresponds for 1st worksheet, usually named 'Book1'
sheet = book.sheet_by_index(0)
# gets col A values, the gold standard keys
A = [ A for A in sheet.col_values(0) ]
# gets col B values, the golden standard classifications
B = [ B for B in sheet.col_values(1) ]
# get initiator
C = [ C for C in sheet.col_values(2) ]
# get target
D = [ D for D in sheet.col_values(3) ]
# get start month
E = [ E for E in sheet.col_values(4) ]
# get start day
F = [ F for F in sheet.col_values(5) ]
# get start year
G = [ G for G in sheet.col_values(6) ]
# FIX DICTIONARIES: Use the highest MID classification
# strip whitespace from the start/end of A and B elements
# convert B elements to lowercase
gold_keys = []
for row in A[1:]:
rowStr = row.encode('utf-8')
rowTrm = rowStr.strip()
rowUni = rowTrm.decode('utf-8')
gold_keys.append(rowUni)
gold_actions = []
for row in B[1:]:
rowStr = row.encode('utf-8')
rowTrm = rowStr.strip()
rowLwr = rowTrm.lower()
gold_actions.append(rowLwr)
gold_initiators = []
for row in C[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_initiators.append(rowTrm)
gold_targets = []
for row in D[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_targets.append(rowTrm)
gold_months = []
for row in E[1:]:
try:
gold_months.append(int(row))
except:
gold_months.append(1) # default to January
gold_days = []
for row in F[1:]:
try:
gold_days.append(int(row))
except:
gold_days.append(1) # default to first of month
gold_years = []
for row in G[1:]:
try:
gold_years.append(int(row))
except:
gold_years.append(None)
# gs_mids is a dictionary of document keys and their integer MID levels
gs_mids = {}
gs_actions = {}
gs_initiators = {}
gs_targets = {}
gs_stdates = {}
# loop through the spreadsheet columns, selecting the highest MID for each doc
for i in range(0,len(gold_actions)):
thisAction = gold_actions[i]
if (gold_actions[i] is not None) and (gold_actions[i] is not ""):
# convert string to int (e.g., 'Clash (17)' to 17)
junk,mid_level = thisAction.split('(')
mid_level,junk = mid_level.split(')')
mid_level = int(mid_level)
# use key to check if this document already has an MID level
if gold_keys[i] in gs_mids:
if mid_level > gold_keys[i]:
update_dict = True
else:
update_dict = False
else:
update_dict = True
# if the key is undefined or the new mid_level is higher
# then update the dictionary
if update_dict:
gs_mids[gold_keys[i]] = mid_level
gs_actions[gold_keys[i]] = gold_actions[i]
gs_initiators[gold_keys[i]] = gold_initiators[i]
gs_targets[gold_keys[i]] = gold_targets[i]
if gold_years[i] is not None:
gs_stdates[gold_keys[i]] = date(gold_years[i],gold_months[i],gold_days[i])
else:
gs_stdates[gold_keys[i]] = None
if len(sys.argv)>1:
in_file = sys.argv[1]
# db structure
# Docs table (ID, filename, title, timestamp, source, svm(float), text)
# Hits table (primary key: ID, char: AMT_HitID, foreignkey: doc)
from models import *
assignments.drop(engine, checkfirst=True)
docs.drop(engine, checkfirst=True)
metadata.create_all(engine) # create if it doesn't exist yet
def | (file):
raw = file.readline()
if not raw:
raise EOFError()
if raw.find('-------------------------------------------------')>=0:
raise ValueError()
return raw.decode(fileencoding)
file = open(in_file, 'r')
readdocs = []
count = 0
while file:
meta = {}
try:
while True:
# read meta-data
dataline = nextline(file).rstrip()
if '>>>>>>>' in dataline:
break
elif ':' in dataline:
# split at the first colon
tag,value = dataline.split(':',1)
tag = tag.lower()
tag = tag.strip()
tag = tag.replace(' ','_')
# the Countries meta-data is represented as a list of
# ('Country', freq) pairs, where 'Country' is the name
# of the country and freq is the frequency it occurs in
# the document. We need to parse this and select the 5
# most frequent countries for inclusion in the database
if tag == 'countries':
valueList = value.strip().split(')')
countryList = []
for countryFreq in valueList:
countryTuple = tuple(countryFreq.strip()[1:].split(','))
if countryTuple[0] != '':
country = countryTuple[0]
freq = int(countryTuple[1])
countryList += [[country,freq]]
# sort countries in descending order by frequency
countryList.sort(key=itemgetter(1),reverse=True)
# get the 5 most frequent countries
# or all countries if there are less than five
for index in range(1, min(5, len(countryList)) + 1):
(country, freq) = countryList[index - 1]
meta['country'+str(index)] = country.strip("'")
else:
# add tag and value to the dictionary
meta[tag] = value.strip()
# get text of document
text = u""
while True:
t = nextline(file)
if t.find('<<<<<<<<<<<<<<<')>=0:
break
t = t.replace('\r\n','\n').replace('\r','\n').rstrip()
text += t + u"\n"
meta['text'] = text
# get NER and NELL-CAT from pipeline data
# open pipeline data file (HTML version)
# store the frequency of each NELL-CAT / NER location in document
#try:
locFreq = {}
counted = [False]*len(text)
with open('/data/mid/docs/' + meta['key'].strip()) as nell_file:
for line in nell_file:
if line.startswith('Type: ner'):
if line.find('Value: LOCATION') > -1:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('Type: mid-attr'):
# parse out list of MID actions
# and the pipeline's confidence in each
# from the HTML output of the pipeline
preamble,values = line.split('Value: ',1)
valueList = values.split(',')
valueDict = {}
for value in valueList:
key,num = value.split(':',1)
if num.find('<') > -1:
num,html = num.split('<',1)
valueDict[key] = float(num)
# filter out actions other than those on
# our list of actions we care about
actionDict = {}
for action in mid_action_types:
# if there is a defined strength for this action
if action in valueDict:
actionDict[action] = valueDict[action]
else:
actionDict[action] = 0
# Set 'NO_MILITARIZED_ACTION' to at least threshold
if actionDict['NO_MILITARIZED_ACTION'] < MID_Threshold:
actionDict['NO_MILITARIZED_ACTION'] = MID_Threshold
elif line.startswith('Type: nell-cat'):
isLoc = False
if line.find('Value: location') > -1:
isLoc = True
if line.find('Value: country') > -1:
isLoc = True
if line.find('Value: geo') > -1:
isLoc = True
if isLoc:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('<li><div class="annotation"'):
spanList = line.split(' ')
for span in spanList:
if span.startswith('spanStart='):
itemList = span.split('"')
spanStart = int(itemList[1])
elif span.startswith('spanEnd='):
itemList = span.split('"')
spanEnd = int(itemList[1])
# add mid action classification to document data
# pick the top 4 most confident classifications
# if the 4th classification is below threshold,
# suggest "No militarized action" instead
bestAction = sorted(actionDict, key=actionDict.get, reverse=True)
for index in range(1,5):
meta['mid_attr'+str(index)] = mid_dict[bestAction[index - 1]]
# pick the top 5 most frequent locations
bestLoc = sorted(locFreq, key=locFreq.get, reverse=True)
for index in range(1,min(5, len(bestLoc)) + 1):
meta['nelloc'+str(index)] = bestLoc[index - 1]
# add gold standard MID classification to database
gold_key = meta['gskey']
if gold_key in gs_actions:
gold_action = gs_actions[gold_key]
if gold_action in gold_dict:
gold_db = gold_dict[gold_action]
meta['gsAction'] = gold_db
else:
meta['gsAction'] = 'Unknown'
if (gs_mids[gold_key] > 6) or (gs_mids[gold_key] == 0):
meta['gsActionNum'] = gs_mids[gold_key]
else: # don't discriminate between actions 1 thru 6
meta['gsActionNum'] = 6
# convert nations into Qualtrics format
ini = gs_initiators[gold_key].lower().strip()
tar = gs_targets[gold_key].lower().strip()
if ini in nationDict:
meta['gsInitiator'] = nationDict[ini]
else:
meta['gsInitiator'] = gs_initiators[gold_key]
if tar in nationDict:
meta['gsTargets'] = nationDict[tar]
else:
meta['gsTargets'] = gs_targets[gold_key]
meta['gsDate'] = gs_stdates[gold_key]
# get ISO codes for gold standard nations
if ini in isoDict:
meta['gsInitiatorISO'] = isoDict[ini]
else:
print "ISO for " + ini + " not found."
if tar in isoDict:
meta['gsTargetISO'] = isoDict[tar]
else:
print "ISO for " + tar + " not found."
# make several incorrect suggestions for the action
similar_actions = actDict[gold_action]
# suggest that there's no MID
# with proportion of non-MID documents in gold standard
suggest_no_mid = (gold_action != mid_action_gold[0]) and (randint(0,1) <= 0.2108)
if suggest_no_mid:
# randomly add "no MID action" as a suggestion
similar_actions = [mid_action_gold[0]] + similar_actions
# assign actions
meta['r1Action'] = gold_dict[similar_actions[0]]
meta['r2Action'] = gold_dict[similar_actions[1]]
meta['r3Action'] = gold_dict[similar_actions[2]]
meta['r4Action'] = gold_dict[similar_actions[3]]
# now let's make incorrect suggestions for the date
dateList = meta['date'].split(' ')
dateStr = ' '.join(dateList[0:3])
try:
pubDate = datetime.strptime(dateStr,'%B %d, %Y')
except:
try:
pubDate = datetime.strptime(dateStr,'%B %d, %Y,')
except:
pubDate = datetime.strptime(dateStr,'%B %d %Y')
# define a range of dates: 10 days before and 1 day after publication
pick_a_day = range(-10,1)
if meta['gsDate'] is not None:
pub_gold_diff = meta['gsDate'].day - pubDate.day
if pub_gold_diff in pick_a_day:
pick_a_day.pop(pick_a_day.index(pub_gold_diff)) # cannot pick gold/correct
# now select random dates within that range
suggested_dates = []
if suggest_no_mid and (meta['gsDate'] is not None):
suggested_dates = [None]
while len(suggested_dates) < 4:
j = randint(0,len(pick_a_day) - 1)
suggested_dates = suggested_dates + [pubDate + timedelta(days=pick_a_day[j])]
pick_a_day.pop(j)
# assign dates
meta['r1Date'] = suggested_dates[0]
meta['r2Date'] = suggested_dates[1]
meta['r3Date'] = suggested_dates[2]
meta['r4Date'] = suggested_dates[3]
# suggest some incorrect initiators
suggested_nations = []
if suggest_no_mid:
suggested_nations = suggested_nations + ['None']
if ('country1' in meta) and (meta['country1'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country1']]
if ('country2' in meta) and (meta['country2'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country2']]
if ('country3' in meta) and (meta['country3'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country3']]
if ('country4' in meta) and (meta['country4'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country4']]
if len(suggested_nations) < 4:
if (gs_initiators[gold_key] is not None) and (gs_initiators[gold_key] != '') and (gs_initiators[gold_key] is not 'None'):
similar_nations = natDict[gs_initiators[gold_key]]
elif (len(suggested_nations) > 0) and (suggested_nations[0] in natDict):
similar_nations = natDict[suggested_nations[0]]
else: # get some random countries
similar_nations = natDict['NATO']
shuffle(similar_nations)
for nation in similar_nations:
if not(nation in suggested_nations):
suggested_nations = suggested_nations + [nation]
if len(suggested_nations) > 4:
break
# assign suggestions
meta['r1Initiator'] = suggested_nations[0]
meta['r2Initiator'] = suggested_nations[1]
meta['r3Initiator'] = suggested_nations[2]
meta['r4Initiator'] = suggested_nations[3]
# suggest some incorrect targets
suggested_nations = []
if suggest_no_mid:
suggested_nations = suggested_nations + ['None']
if ('country1' in meta) and (meta['country1'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country1']]
if ('country2' in meta) and (meta['country2'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country2']]
if ('country3' in meta) and (meta['country3'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country3']]
if ('country4' in meta) and (meta['country4'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country4']]
if len(suggested_nations) < 4:
if (gs_targets[gold_key] is not None) and (gs_targets[gold_key] != '') and (gs_targets[gold_key] is not 'None'):
similar_nations = natDict[gs_targets[gold_key]]
elif (len(suggested_nations) > 0) and (suggested_nations[0] in natDict):
similar_nations = natDict[suggested_nations[0]]
else: # get some random countries
similar_nations = natDict['NATO']
shuffle(similar_nations)
for nation in similar_nations:
if not(nation in suggested_nations):
suggested_nations = suggested_nations + [nation]
if len(suggested_nations) > 4:
break
# assign suggestions
meta['r1Targets'] = suggested_nations[0]
meta['r2Targets'] = suggested_nations[1]
meta['r3Targets'] = suggested_nations[2]
meta['r4Targets'] = suggested_nations[3]
# check to see if gold standard classification
# is correctly guessed by NELL
if meta['gsAction'] == meta['mid_attr1']:
meta['correct'] = 1
elif meta['gsAction'] == meta['mid_attr2']:
meta['correct'] = 1
elif meta['gsAction'] == meta['mid_attr3']:
meta['correct'] = 1
elif meta['gsAction'] == meta['mid_attr4']:
meta['correct'] = 1
else:
meta['correct'] = 0
#except:
# print "Could not find pipeline data for document "+meta['key']
# add data from this document to the document list
if (meta['gsAction'].lower() == 'unknown') or (meta['gsAction'] == ''):
pass # if the action is unknown, do not include document
elif meta['gsActionNum'] == 0:
readdocs += [meta] # if the action is 0 / no MID, include
elif (meta['gsInitiator'] == '') or (meta['gsInitiator'].lower() == 'none'):
pass # if the action is not zero, and no initiator, do not include
elif (meta['gsTargets'] == '') or (meta['gsTargets'].lower() == 'none'):
pass # if the action is not zero, and no target, do not include
else: # otherwise, include
readdocs += [meta]
try:
nextline(file)
except ValueError:
pass
except EOFError:
break
except ValueError:
print meta
print "document could not be read - encountered ----- separator. skipping doc."
# shuffle readdocs
from random import shuffle
shuffle(readdocs)
for data in readdocs:
# sqlalchemy.sql.expression.insert(table, values=None, inline=False, **kwargs)
from sqlalchemy.exc import IntegrityError
try:
# chk = docs.select(docs.c.filename==fname)
# result = engine.execute(chk.exists())
existing = connection.execute(docs.select(docs.c.key==data['key']))
if existing.first():
ins = docs.update().where(docs.c.key==data['key']).values(data)
else:
ins = docs.insert().values(data)
result = engine.execute(ins)
except IntegrityError as x:
print x
count = count+1
if count%100 == 0:
print count
print "%s imported."%count
# 20100913--0161-Sep13_2010_LN_NP1.txt-files.list
# Australian troops to encounter more violence in Afghanistan: Defense Force AFG-AUL
# September 13, 2010 Monday 1:25 AM EST
# News source: (c) Xinhua General News Service
# SVM score: 1.757
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Australian troops in Afghanistan can expect to encounter increased violence as
# they push deeper into Taliban sanctuaries, Australian Defense Force warned on
# Monday.
# Air Chief Marshal Angus Houston told reporters during a briefing in Canberra on
# Monday that troops of the Afghan National Army 4th Brigade, accompanied by
# their Australian mentors, were heading deeper into the Baluchi and Chora
# Valleys and Deh Rawood area of Oruzgan province in Afghanistan.
# "These are partnered patrols and it is dangerous work," said Houston, adding
# the fight was becoming more intense.
# "We can expect violence levels to increase as we contest the insurgency in
# greater numbers and across a wider area in the south. "
# Afghanistan is now in the midst of its fighting season with 10 Australian
# soldiers killed so far this year.
# The most recent fatality was Lance Corporal Jared MacKinney who was killed
# while accompanying Afghan troops in the Tangi Valley near Deh Rawood.
# The past few months had been tough for Australian troops, Houston said.
# "But it is important that we maintain our resolve, push forward with the
# strategy and keep the pressure on the Taliban," he said.
# He said last Tuesday that Australian troops had achieved significant success in
# training the Afghan security forces and pressuring insurgents.
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
| nextline | identifier_name |
import-docs.py | #!/usr/bin/python
import sys
import re
import json
from operator import itemgetter
from datetime import datetime, date, timedelta
import calendar
from random import *
import csv
in_file = "/data/mid/2011_gs_supplemented.txt"
#in_file = "/data/mid/2011_sorted_pos4.txt"
#gs_file = "/data/mid/last9months2011.xlsx"
gs_file = "/data/mid/gs_master_plus_corrected.xlsx"
action_file = "/data/mid/actions.csv"
nation_file = "/data/mid/nations.csv"
iso_file = "/data/mid/ISO2.csv"
#in_file = "2011_sorted_pos4.txt"
fileencoding = "windows-1252"
mid_action_names = [
'No militarized action',
'Threat to use force',
'Threat to use force',
'Show of force',
'Alert',
'Mobilization',
'Border fortification',
'Border violation',
'Blockade',
'Occupation of territory',
'Seizure of material or personnel',
'Attack',
'Clash',
]
mid_action_types = [
'NO_MILITARIZED_ACTION',
'THREAT_TO_USE_FORCE',
'THREAT_TO_DECLARE_WAR',
'SHOW_OF_FORCE',
'ALERT',
'MOBILIZATION',
'FORTIFY_BORDER',
'BORDER_VIOLATION',
'BLOCKADE',
'OCCUPATION_OF_TERRITORY',
'SEIZURE',
'ATTACK',
'CLASH',
]
mid_action_gold = [
'no militarized action (0)',
'threat to use force (1)',
'threat to declare war (4)',
'show of force (7)',
'alert (8)',
'mobilization (10)',
'fortify border (11)',
'border violation (12)',
'blockade (13)',
'occupation of territory (14)',
'seizure (15)',
'attack (16)',
'clash (17)',
]
# combine the lists to create dictionaries
mid_dict = dict(zip(mid_action_types, mid_action_names))
gold_dict = dict(zip(mid_action_gold, mid_action_names))
# minimum threshold of classification confidence
# to classify as MID
MID_Threshold = 0.5
# read ISO codes for each nation
isoDict = {}
nationDict = {}
with open(iso_file) as csvfile:
isoreader = csv.reader(csvfile,delimiter=',')
for row in isoreader:
key = row[0].lower().strip()
isoDict[key] = row[1]
nationDict[key] = row[2]
# read action similarities from csv
actDict = {}
with open(action_file) as csvfile:
actreader = csv.reader(csvfile,delimiter=',')
for row in actreader:
key = row[0].lower()
actDict[key] = []
for item in row[1:]:
actDict[key] = actDict[key] + [item.lower()]
# read nation similarities from csv
natDict = {}
with open(nation_file) as csvfile:
natreader = csv.reader(csvfile,delimiter=',')
for row in natreader:
try:
rowNme,rowCOW = row[0].split('(')
except:
rowNme = row[0]
key = rowNme.strip()
#key = rowTrm.upper()
natDict[key] = []
for item in row[1:]:
try:
rowNme,rowCOW = item.split('(')
except:
rowNme = item
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
natDict[key] = natDict[key] + [rowTrm]
# read gold standard classification from Excel
import xlrd
with xlrd.open_workbook(gs_file) as book:
# 0 corresponds for 1st worksheet, usually named 'Book1'
sheet = book.sheet_by_index(0)
# gets col A values, the gold standard keys
A = [ A for A in sheet.col_values(0) ]
# gets col B values, the golden standard classifications
B = [ B for B in sheet.col_values(1) ]
# get initiator
C = [ C for C in sheet.col_values(2) ]
# get target
D = [ D for D in sheet.col_values(3) ]
# get start month
E = [ E for E in sheet.col_values(4) ]
# get start day
F = [ F for F in sheet.col_values(5) ]
# get start year
G = [ G for G in sheet.col_values(6) ]
# FIX DICTIONARIES: Use the highest MID classification
# strip whitespace from the start/end of A and B elements
# convert B elements to lowercase
gold_keys = []
for row in A[1:]:
rowStr = row.encode('utf-8')
rowTrm = rowStr.strip()
rowUni = rowTrm.decode('utf-8')
gold_keys.append(rowUni)
gold_actions = []
for row in B[1:]:
rowStr = row.encode('utf-8')
rowTrm = rowStr.strip()
rowLwr = rowTrm.lower()
gold_actions.append(rowLwr)
gold_initiators = []
for row in C[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_initiators.append(rowTrm)
gold_targets = []
for row in D[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_targets.append(rowTrm)
gold_months = []
for row in E[1:]:
try:
gold_months.append(int(row))
except:
gold_months.append(1) # default to January
gold_days = []
for row in F[1:]:
try:
gold_days.append(int(row))
except:
gold_days.append(1) # default to first of month
gold_years = []
for row in G[1:]:
try:
gold_years.append(int(row))
except:
gold_years.append(None)
# gs_mids is a dictionary of document keys and their integer MID levels
gs_mids = {}
gs_actions = {}
gs_initiators = {}
gs_targets = {}
gs_stdates = {}
# loop through the spreadsheet columns, selecting the highest MID for each doc
for i in range(0,len(gold_actions)):
thisAction = gold_actions[i]
if (gold_actions[i] is not None) and (gold_actions[i] is not ""):
# convert string to int (e.g., 'Clash (17)' to 17)
junk,mid_level = thisAction.split('(')
mid_level,junk = mid_level.split(')')
mid_level = int(mid_level)
# use key to check if this document already has an MID level
if gold_keys[i] in gs_mids:
if mid_level > gold_keys[i]:
update_dict = True
else:
update_dict = False
else:
update_dict = True
# if the key is undefined or the new mid_level is higher
# then update the dictionary
if update_dict:
gs_mids[gold_keys[i]] = mid_level
gs_actions[gold_keys[i]] = gold_actions[i]
gs_initiators[gold_keys[i]] = gold_initiators[i]
gs_targets[gold_keys[i]] = gold_targets[i]
if gold_years[i] is not None:
gs_stdates[gold_keys[i]] = date(gold_years[i],gold_months[i],gold_days[i])
else:
gs_stdates[gold_keys[i]] = None
if len(sys.argv)>1:
in_file = sys.argv[1]
# db structure
# Docs table (ID, filename, title, timestamp, source, svm(float), text)
# Hits table (primary key: ID, char: AMT_HitID, foreignkey: doc)
from models import *
assignments.drop(engine, checkfirst=True)
docs.drop(engine, checkfirst=True)
metadata.create_all(engine) # create if it doesn't exist yet
def nextline (file):
raw = file.readline()
if not raw:
raise EOFError()
if raw.find('-------------------------------------------------')>=0:
raise ValueError()
return raw.decode(fileencoding)
file = open(in_file, 'r')
readdocs = []
count = 0
while file:
meta = {}
try:
while True:
# read meta-data
dataline = nextline(file).rstrip()
if '>>>>>>>' in dataline:
break
elif ':' in dataline:
# split at the first colon
tag,value = dataline.split(':',1)
tag = tag.lower()
tag = tag.strip()
tag = tag.replace(' ','_')
# the Countries meta-data is represented as a list of
# ('Country', freq) pairs, where 'Country' is the name
# of the country and freq is the frequency it occurs in
# the document. We need to parse this and select the 5
# most frequent countries for inclusion in the database
if tag == 'countries':
valueList = value.strip().split(')')
countryList = []
for countryFreq in valueList:
countryTuple = tuple(countryFreq.strip()[1:].split(','))
if countryTuple[0] != '':
country = countryTuple[0]
freq = int(countryTuple[1])
countryList += [[country,freq]]
# sort countries in descending order by frequency
countryList.sort(key=itemgetter(1),reverse=True)
# get the 5 most frequent countries
# or all countries if there are less than five
for index in range(1, min(5, len(countryList)) + 1):
(country, freq) = countryList[index - 1]
meta['country'+str(index)] = country.strip("'")
else:
# add tag and value to the dictionary
meta[tag] = value.strip()
# get text of document
text = u""
while True:
|
meta['text'] = text
# get NER and NELL-CAT from pipeline data
# open pipeline data file (HTML version)
# store the frequency of each NELL-CAT / NER location in document
#try:
locFreq = {}
counted = [False]*len(text)
with open('/data/mid/docs/' + meta['key'].strip()) as nell_file:
for line in nell_file:
if line.startswith('Type: ner'):
if line.find('Value: LOCATION') > -1:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('Type: mid-attr'):
# parse out list of MID actions
# and the pipeline's confidence in each
# from the HTML output of the pipeline
preamble,values = line.split('Value: ',1)
valueList = values.split(',')
valueDict = {}
for value in valueList:
key,num = value.split(':',1)
if num.find('<') > -1:
num,html = num.split('<',1)
valueDict[key] = float(num)
# filter out actions other than those on
# our list of actions we care about
actionDict = {}
for action in mid_action_types:
# if there is a defined strength for this action
if action in valueDict:
actionDict[action] = valueDict[action]
else:
actionDict[action] = 0
# Set 'NO_MILITARIZED_ACTION' to at least threshold
if actionDict['NO_MILITARIZED_ACTION'] < MID_Threshold:
actionDict['NO_MILITARIZED_ACTION'] = MID_Threshold
elif line.startswith('Type: nell-cat'):
isLoc = False
if line.find('Value: location') > -1:
isLoc = True
if line.find('Value: country') > -1:
isLoc = True
if line.find('Value: geo') > -1:
isLoc = True
if isLoc:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('<li><div class="annotation"'):
spanList = line.split(' ')
for span in spanList:
if span.startswith('spanStart='):
itemList = span.split('"')
spanStart = int(itemList[1])
elif span.startswith('spanEnd='):
itemList = span.split('"')
spanEnd = int(itemList[1])
# add mid action classification to document data
# pick the top 4 most confident classifications
# if the 4th classification is below threshold,
# suggest "No militarized action" instead
bestAction = sorted(actionDict, key=actionDict.get, reverse=True)
for index in range(1,5):
meta['mid_attr'+str(index)] = mid_dict[bestAction[index - 1]]
# pick the top 5 most frequent locations
bestLoc = sorted(locFreq, key=locFreq.get, reverse=True)
for index in range(1,min(5, len(bestLoc)) + 1):
meta['nelloc'+str(index)] = bestLoc[index - 1]
# add gold standard MID classification to database
gold_key = meta['gskey']
if gold_key in gs_actions:
gold_action = gs_actions[gold_key]
if gold_action in gold_dict:
gold_db = gold_dict[gold_action]
meta['gsAction'] = gold_db
else:
meta['gsAction'] = 'Unknown'
if (gs_mids[gold_key] > 6) or (gs_mids[gold_key] == 0):
meta['gsActionNum'] = gs_mids[gold_key]
else: # don't discriminate between actions 1 thru 6
meta['gsActionNum'] = 6
# convert nations into Qualtrics format
ini = gs_initiators[gold_key].lower().strip()
tar = gs_targets[gold_key].lower().strip()
if ini in nationDict:
meta['gsInitiator'] = nationDict[ini]
else:
meta['gsInitiator'] = gs_initiators[gold_key]
if tar in nationDict:
meta['gsTargets'] = nationDict[tar]
else:
meta['gsTargets'] = gs_targets[gold_key]
meta['gsDate'] = gs_stdates[gold_key]
# get ISO codes for gold standard nations
if ini in isoDict:
meta['gsInitiatorISO'] = isoDict[ini]
else:
print "ISO for " + ini + " not found."
if tar in isoDict:
meta['gsTargetISO'] = isoDict[tar]
else:
print "ISO for " + tar + " not found."
# make several incorrect suggestions for the action
similar_actions = actDict[gold_action]
# suggest that there's no MID
# with proportion of non-MID documents in gold standard
suggest_no_mid = (gold_action != mid_action_gold[0]) and (randint(0,1) <= 0.2108)
if suggest_no_mid:
# randomly add "no MID action" as a suggestion
similar_actions = [mid_action_gold[0]] + similar_actions
# assign actions
meta['r1Action'] = gold_dict[similar_actions[0]]
meta['r2Action'] = gold_dict[similar_actions[1]]
meta['r3Action'] = gold_dict[similar_actions[2]]
meta['r4Action'] = gold_dict[similar_actions[3]]
# now let's make incorrect suggestions for the date
dateList = meta['date'].split(' ')
dateStr = ' '.join(dateList[0:3])
try:
pubDate = datetime.strptime(dateStr,'%B %d, %Y')
except:
try:
pubDate = datetime.strptime(dateStr,'%B %d, %Y,')
except:
pubDate = datetime.strptime(dateStr,'%B %d %Y')
# define a range of dates: 10 days before and 1 day after publication
pick_a_day = range(-10,1)
if meta['gsDate'] is not None:
pub_gold_diff = meta['gsDate'].day - pubDate.day
if pub_gold_diff in pick_a_day:
pick_a_day.pop(pick_a_day.index(pub_gold_diff)) # cannot pick gold/correct
# now select random dates within that range
suggested_dates = []
if suggest_no_mid and (meta['gsDate'] is not None):
suggested_dates = [None]
while len(suggested_dates) < 4:
j = randint(0,len(pick_a_day) - 1)
suggested_dates = suggested_dates + [pubDate + timedelta(days=pick_a_day[j])]
pick_a_day.pop(j)
# assign dates
meta['r1Date'] = suggested_dates[0]
meta['r2Date'] = suggested_dates[1]
meta['r3Date'] = suggested_dates[2]
meta['r4Date'] = suggested_dates[3]
# suggest some incorrect initiators
suggested_nations = []
if suggest_no_mid:
suggested_nations = suggested_nations + ['None']
if ('country1' in meta) and (meta['country1'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country1']]
if ('country2' in meta) and (meta['country2'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country2']]
if ('country3' in meta) and (meta['country3'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country3']]
if ('country4' in meta) and (meta['country4'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country4']]
if len(suggested_nations) < 4:
if (gs_initiators[gold_key] is not None) and (gs_initiators[gold_key] != '') and (gs_initiators[gold_key] is not 'None'):
similar_nations = natDict[gs_initiators[gold_key]]
elif (len(suggested_nations) > 0) and (suggested_nations[0] in natDict):
similar_nations = natDict[suggested_nations[0]]
else: # get some random countries
similar_nations = natDict['NATO']
shuffle(similar_nations)
for nation in similar_nations:
if not(nation in suggested_nations):
suggested_nations = suggested_nations + [nation]
if len(suggested_nations) > 4:
break
# assign suggestions
meta['r1Initiator'] = suggested_nations[0]
meta['r2Initiator'] = suggested_nations[1]
meta['r3Initiator'] = suggested_nations[2]
meta['r4Initiator'] = suggested_nations[3]
# suggest some incorrect targets
suggested_nations = []
if suggest_no_mid:
suggested_nations = suggested_nations + ['None']
if ('country1' in meta) and (meta['country1'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country1']]
if ('country2' in meta) and (meta['country2'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country2']]
if ('country3' in meta) and (meta['country3'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country3']]
if ('country4' in meta) and (meta['country4'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country4']]
if len(suggested_nations) < 4:
if (gs_targets[gold_key] is not None) and (gs_targets[gold_key] != '') and (gs_targets[gold_key] is not 'None'):
similar_nations = natDict[gs_targets[gold_key]]
elif (len(suggested_nations) > 0) and (suggested_nations[0] in natDict):
similar_nations = natDict[suggested_nations[0]]
else: # get some random countries
similar_nations = natDict['NATO']
shuffle(similar_nations)
for nation in similar_nations:
if not(nation in suggested_nations):
suggested_nations = suggested_nations + [nation]
if len(suggested_nations) > 4:
break
# assign suggestions
meta['r1Targets'] = suggested_nations[0]
meta['r2Targets'] = suggested_nations[1]
meta['r3Targets'] = suggested_nations[2]
meta['r4Targets'] = suggested_nations[3]
# check to see if gold standard classification
# is correctly guessed by NELL
if meta['gsAction'] == meta['mid_attr1']:
meta['correct'] = 1
elif meta['gsAction'] == meta['mid_attr2']:
meta['correct'] = 1
elif meta['gsAction'] == meta['mid_attr3']:
meta['correct'] = 1
elif meta['gsAction'] == meta['mid_attr4']:
meta['correct'] = 1
else:
meta['correct'] = 0
#except:
# print "Could not find pipeline data for document "+meta['key']
# add data from this document to the document list
if (meta['gsAction'].lower() == 'unknown') or (meta['gsAction'] == ''):
pass # if the action is unknown, do not include document
elif meta['gsActionNum'] == 0:
readdocs += [meta] # if the action is 0 / no MID, include
elif (meta['gsInitiator'] == '') or (meta['gsInitiator'].lower() == 'none'):
pass # if the action is not zero, and no initiator, do not include
elif (meta['gsTargets'] == '') or (meta['gsTargets'].lower() == 'none'):
pass # if the action is not zero, and no target, do not include
else: # otherwise, include
readdocs += [meta]
try:
nextline(file)
except ValueError:
pass
except EOFError:
break
except ValueError:
print meta
print "document could not be read - encountered ----- separator. skipping doc."
# shuffle readdocs
from random import shuffle
shuffle(readdocs)
for data in readdocs:
# sqlalchemy.sql.expression.insert(table, values=None, inline=False, **kwargs)
from sqlalchemy.exc import IntegrityError
try:
# chk = docs.select(docs.c.filename==fname)
# result = engine.execute(chk.exists())
existing = connection.execute(docs.select(docs.c.key==data['key']))
if existing.first():
ins = docs.update().where(docs.c.key==data['key']).values(data)
else:
ins = docs.insert().values(data)
result = engine.execute(ins)
except IntegrityError as x:
print x
count = count+1
if count%100 == 0:
print count
print "%s imported."%count
# 20100913--0161-Sep13_2010_LN_NP1.txt-files.list
# Australian troops to encounter more violence in Afghanistan: Defense Force AFG-AUL
# September 13, 2010 Monday 1:25 AM EST
# News source: (c) Xinhua General News Service
# SVM score: 1.757
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Australian troops in Afghanistan can expect to encounter increased violence as
# they push deeper into Taliban sanctuaries, Australian Defense Force warned on
# Monday.
# Air Chief Marshal Angus Houston told reporters during a briefing in Canberra on
# Monday that troops of the Afghan National Army 4th Brigade, accompanied by
# their Australian mentors, were heading deeper into the Baluchi and Chora
# Valleys and Deh Rawood area of Oruzgan province in Afghanistan.
# "These are partnered patrols and it is dangerous work," said Houston, adding
# the fight was becoming more intense.
# "We can expect violence levels to increase as we contest the insurgency in
# greater numbers and across a wider area in the south. "
# Afghanistan is now in the midst of its fighting season with 10 Australian
# soldiers killed so far this year.
# The most recent fatality was Lance Corporal Jared MacKinney who was killed
# while accompanying Afghan troops in the Tangi Valley near Deh Rawood.
# The past few months had been tough for Australian troops, Houston said.
# "But it is important that we maintain our resolve, push forward with the
# strategy and keep the pressure on the Taliban," he said.
# He said last Tuesday that Australian troops had achieved significant success in
# training the Afghan security forces and pressuring insurgents.
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
| t = nextline(file)
if t.find('<<<<<<<<<<<<<<<')>=0:
break
t = t.replace('\r\n','\n').replace('\r','\n').rstrip()
text += t + u"\n" | conditional_block |
import-docs.py | #!/usr/bin/python
import sys
import re
import json
from operator import itemgetter
from datetime import datetime, date, timedelta
import calendar
from random import *
import csv
in_file = "/data/mid/2011_gs_supplemented.txt"
#in_file = "/data/mid/2011_sorted_pos4.txt"
#gs_file = "/data/mid/last9months2011.xlsx"
gs_file = "/data/mid/gs_master_plus_corrected.xlsx"
action_file = "/data/mid/actions.csv"
nation_file = "/data/mid/nations.csv"
iso_file = "/data/mid/ISO2.csv"
#in_file = "2011_sorted_pos4.txt"
fileencoding = "windows-1252"
mid_action_names = [
'No militarized action',
'Threat to use force',
'Threat to use force',
'Show of force',
'Alert',
'Mobilization',
'Border fortification',
'Border violation',
'Blockade',
'Occupation of territory',
'Seizure of material or personnel',
'Attack',
'Clash',
]
mid_action_types = [
'NO_MILITARIZED_ACTION',
'THREAT_TO_USE_FORCE',
'THREAT_TO_DECLARE_WAR',
'SHOW_OF_FORCE',
'ALERT',
'MOBILIZATION',
'FORTIFY_BORDER',
'BORDER_VIOLATION',
'BLOCKADE',
'OCCUPATION_OF_TERRITORY',
'SEIZURE',
'ATTACK',
'CLASH',
]
mid_action_gold = [
'no militarized action (0)',
'threat to use force (1)',
'threat to declare war (4)',
'show of force (7)',
'alert (8)',
'mobilization (10)',
'fortify border (11)',
'border violation (12)',
'blockade (13)',
'occupation of territory (14)',
'seizure (15)',
'attack (16)',
'clash (17)',
]
# combine the lists to create dictionaries
mid_dict = dict(zip(mid_action_types, mid_action_names))
gold_dict = dict(zip(mid_action_gold, mid_action_names))
# minimum threshold of classification confidence
# to classify as MID
MID_Threshold = 0.5
# read ISO codes for each nation
isoDict = {}
nationDict = {}
with open(iso_file) as csvfile:
isoreader = csv.reader(csvfile,delimiter=',')
for row in isoreader:
key = row[0].lower().strip()
isoDict[key] = row[1]
nationDict[key] = row[2]
# read action similarities from csv
actDict = {}
with open(action_file) as csvfile:
actreader = csv.reader(csvfile,delimiter=',')
for row in actreader:
key = row[0].lower()
actDict[key] = []
for item in row[1:]:
actDict[key] = actDict[key] + [item.lower()]
# read nation similarities from csv
natDict = {}
with open(nation_file) as csvfile:
natreader = csv.reader(csvfile,delimiter=',')
for row in natreader:
try:
rowNme,rowCOW = row[0].split('(')
except:
rowNme = row[0]
key = rowNme.strip()
#key = rowTrm.upper()
natDict[key] = []
for item in row[1:]:
try:
rowNme,rowCOW = item.split('(')
except:
rowNme = item
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
natDict[key] = natDict[key] + [rowTrm]
# read gold standard classification from Excel
import xlrd
with xlrd.open_workbook(gs_file) as book:
# 0 corresponds for 1st worksheet, usually named 'Book1'
sheet = book.sheet_by_index(0)
# gets col A values, the gold standard keys
A = [ A for A in sheet.col_values(0) ]
# gets col B values, the golden standard classifications
B = [ B for B in sheet.col_values(1) ]
# get initiator
C = [ C for C in sheet.col_values(2) ]
# get target
D = [ D for D in sheet.col_values(3) ]
# get start month
E = [ E for E in sheet.col_values(4) ]
# get start day
F = [ F for F in sheet.col_values(5) ]
# get start year
G = [ G for G in sheet.col_values(6) ]
# FIX DICTIONARIES: Use the highest MID classification
# strip whitespace from the start/end of A and B elements
# convert B elements to lowercase
gold_keys = []
for row in A[1:]:
rowStr = row.encode('utf-8')
rowTrm = rowStr.strip()
rowUni = rowTrm.decode('utf-8')
gold_keys.append(rowUni)
gold_actions = []
for row in B[1:]:
rowStr = row.encode('utf-8')
rowTrm = rowStr.strip()
rowLwr = rowTrm.lower()
gold_actions.append(rowLwr)
gold_initiators = []
for row in C[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_initiators.append(rowTrm)
gold_targets = []
for row in D[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_targets.append(rowTrm)
gold_months = []
for row in E[1:]:
try:
gold_months.append(int(row))
except:
gold_months.append(1) # default to January
gold_days = []
for row in F[1:]:
try:
gold_days.append(int(row))
except:
gold_days.append(1) # default to first of month
gold_years = []
for row in G[1:]:
try:
gold_years.append(int(row))
except:
gold_years.append(None)
# gs_mids is a dictionary of document keys and their integer MID levels
gs_mids = {}
gs_actions = {}
gs_initiators = {}
gs_targets = {}
gs_stdates = {}
# loop through the spreadsheet columns, selecting the highest MID for each doc
for i in range(0,len(gold_actions)):
thisAction = gold_actions[i]
if (gold_actions[i] is not None) and (gold_actions[i] is not ""):
# convert string to int (e.g., 'Clash (17)' to 17)
junk,mid_level = thisAction.split('(')
mid_level,junk = mid_level.split(')')
mid_level = int(mid_level)
# use key to check if this document already has an MID level
if gold_keys[i] in gs_mids:
if mid_level > gold_keys[i]:
update_dict = True
else:
update_dict = False
else:
update_dict = True
# if the key is undefined or the new mid_level is higher
# then update the dictionary
if update_dict:
gs_mids[gold_keys[i]] = mid_level
gs_actions[gold_keys[i]] = gold_actions[i]
gs_initiators[gold_keys[i]] = gold_initiators[i]
gs_targets[gold_keys[i]] = gold_targets[i]
if gold_years[i] is not None:
gs_stdates[gold_keys[i]] = date(gold_years[i],gold_months[i],gold_days[i])
else:
gs_stdates[gold_keys[i]] = None
if len(sys.argv)>1:
in_file = sys.argv[1]
# db structure
# Docs table (ID, filename, title, timestamp, source, svm(float), text)
# Hits table (primary key: ID, char: AMT_HitID, foreignkey: doc)
from models import *
assignments.drop(engine, checkfirst=True)
docs.drop(engine, checkfirst=True)
metadata.create_all(engine) # create if it doesn't exist yet
def nextline (file):
|
file = open(in_file, 'r')
readdocs = []
count = 0
while file:
meta = {}
try:
while True:
# read meta-data
dataline = nextline(file).rstrip()
if '>>>>>>>' in dataline:
break
elif ':' in dataline:
# split at the first colon
tag,value = dataline.split(':',1)
tag = tag.lower()
tag = tag.strip()
tag = tag.replace(' ','_')
# the Countries meta-data is represented as a list of
# ('Country', freq) pairs, where 'Country' is the name
# of the country and freq is the frequency it occurs in
# the document. We need to parse this and select the 5
# most frequent countries for inclusion in the database
if tag == 'countries':
valueList = value.strip().split(')')
countryList = []
for countryFreq in valueList:
countryTuple = tuple(countryFreq.strip()[1:].split(','))
if countryTuple[0] != '':
country = countryTuple[0]
freq = int(countryTuple[1])
countryList += [[country,freq]]
# sort countries in descending order by frequency
countryList.sort(key=itemgetter(1),reverse=True)
# get the 5 most frequent countries
# or all countries if there are less than five
for index in range(1, min(5, len(countryList)) + 1):
(country, freq) = countryList[index - 1]
meta['country'+str(index)] = country.strip("'")
else:
# add tag and value to the dictionary
meta[tag] = value.strip()
# get text of document
text = u""
while True:
t = nextline(file)
if t.find('<<<<<<<<<<<<<<<')>=0:
break
t = t.replace('\r\n','\n').replace('\r','\n').rstrip()
text += t + u"\n"
meta['text'] = text
# get NER and NELL-CAT from pipeline data
# open pipeline data file (HTML version)
# store the frequency of each NELL-CAT / NER location in document
#try:
locFreq = {}
counted = [False]*len(text)
with open('/data/mid/docs/' + meta['key'].strip()) as nell_file:
for line in nell_file:
if line.startswith('Type: ner'):
if line.find('Value: LOCATION') > -1:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('Type: mid-attr'):
# parse out list of MID actions
# and the pipeline's confidence in each
# from the HTML output of the pipeline
preamble,values = line.split('Value: ',1)
valueList = values.split(',')
valueDict = {}
for value in valueList:
key,num = value.split(':',1)
if num.find('<') > -1:
num,html = num.split('<',1)
valueDict[key] = float(num)
# filter out actions other than those on
# our list of actions we care about
actionDict = {}
for action in mid_action_types:
# if there is a defined strength for this action
if action in valueDict:
actionDict[action] = valueDict[action]
else:
actionDict[action] = 0
# Set 'NO_MILITARIZED_ACTION' to at least threshold
if actionDict['NO_MILITARIZED_ACTION'] < MID_Threshold:
actionDict['NO_MILITARIZED_ACTION'] = MID_Threshold
elif line.startswith('Type: nell-cat'):
isLoc = False
if line.find('Value: location') > -1:
isLoc = True
if line.find('Value: country') > -1:
isLoc = True
if line.find('Value: geo') > -1:
isLoc = True
if isLoc:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('<li><div class="annotation"'):
spanList = line.split(' ')
for span in spanList:
if span.startswith('spanStart='):
itemList = span.split('"')
spanStart = int(itemList[1])
elif span.startswith('spanEnd='):
itemList = span.split('"')
spanEnd = int(itemList[1])
# add mid action classification to document data
# pick the top 4 most confident classifications
# if the 4th classification is below threshold,
# suggest "No militarized action" instead
bestAction = sorted(actionDict, key=actionDict.get, reverse=True)
for index in range(1,5):
meta['mid_attr'+str(index)] = mid_dict[bestAction[index - 1]]
# pick the top 5 most frequent locations
bestLoc = sorted(locFreq, key=locFreq.get, reverse=True)
for index in range(1,min(5, len(bestLoc)) + 1):
meta['nelloc'+str(index)] = bestLoc[index - 1]
# add gold standard MID classification to database
gold_key = meta['gskey']
if gold_key in gs_actions:
gold_action = gs_actions[gold_key]
if gold_action in gold_dict:
gold_db = gold_dict[gold_action]
meta['gsAction'] = gold_db
else:
meta['gsAction'] = 'Unknown'
if (gs_mids[gold_key] > 6) or (gs_mids[gold_key] == 0):
meta['gsActionNum'] = gs_mids[gold_key]
else: # don't discriminate between actions 1 thru 6
meta['gsActionNum'] = 6
# convert nations into Qualtrics format
ini = gs_initiators[gold_key].lower().strip()
tar = gs_targets[gold_key].lower().strip()
if ini in nationDict:
meta['gsInitiator'] = nationDict[ini]
else:
meta['gsInitiator'] = gs_initiators[gold_key]
if tar in nationDict:
meta['gsTargets'] = nationDict[tar]
else:
meta['gsTargets'] = gs_targets[gold_key]
meta['gsDate'] = gs_stdates[gold_key]
# get ISO codes for gold standard nations
if ini in isoDict:
meta['gsInitiatorISO'] = isoDict[ini]
else:
print "ISO for " + ini + " not found."
if tar in isoDict:
meta['gsTargetISO'] = isoDict[tar]
else:
print "ISO for " + tar + " not found."
# make several incorrect suggestions for the action
similar_actions = actDict[gold_action]
# suggest that there's no MID
# with proportion of non-MID documents in gold standard
suggest_no_mid = (gold_action != mid_action_gold[0]) and (randint(0,1) <= 0.2108)
if suggest_no_mid:
# randomly add "no MID action" as a suggestion
similar_actions = [mid_action_gold[0]] + similar_actions
# assign actions
meta['r1Action'] = gold_dict[similar_actions[0]]
meta['r2Action'] = gold_dict[similar_actions[1]]
meta['r3Action'] = gold_dict[similar_actions[2]]
meta['r4Action'] = gold_dict[similar_actions[3]]
# now let's make incorrect suggestions for the date
dateList = meta['date'].split(' ')
dateStr = ' '.join(dateList[0:3])
try:
pubDate = datetime.strptime(dateStr,'%B %d, %Y')
except:
try:
pubDate = datetime.strptime(dateStr,'%B %d, %Y,')
except:
pubDate = datetime.strptime(dateStr,'%B %d %Y')
# define a range of dates: 10 days before and 1 day after publication
pick_a_day = range(-10,1)
if meta['gsDate'] is not None:
pub_gold_diff = meta['gsDate'].day - pubDate.day
if pub_gold_diff in pick_a_day:
pick_a_day.pop(pick_a_day.index(pub_gold_diff)) # cannot pick gold/correct
# now select random dates within that range
suggested_dates = []
if suggest_no_mid and (meta['gsDate'] is not None):
suggested_dates = [None]
while len(suggested_dates) < 4:
j = randint(0,len(pick_a_day) - 1)
suggested_dates = suggested_dates + [pubDate + timedelta(days=pick_a_day[j])]
pick_a_day.pop(j)
# assign dates
meta['r1Date'] = suggested_dates[0]
meta['r2Date'] = suggested_dates[1]
meta['r3Date'] = suggested_dates[2]
meta['r4Date'] = suggested_dates[3]
# suggest some incorrect initiators
suggested_nations = []
if suggest_no_mid:
suggested_nations = suggested_nations + ['None']
if ('country1' in meta) and (meta['country1'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country1']]
if ('country2' in meta) and (meta['country2'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country2']]
if ('country3' in meta) and (meta['country3'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country3']]
if ('country4' in meta) and (meta['country4'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country4']]
if len(suggested_nations) < 4:
if (gs_initiators[gold_key] is not None) and (gs_initiators[gold_key] != '') and (gs_initiators[gold_key] is not 'None'):
similar_nations = natDict[gs_initiators[gold_key]]
elif (len(suggested_nations) > 0) and (suggested_nations[0] in natDict):
similar_nations = natDict[suggested_nations[0]]
else: # get some random countries
similar_nations = natDict['NATO']
shuffle(similar_nations)
for nation in similar_nations:
if not(nation in suggested_nations):
suggested_nations = suggested_nations + [nation]
if len(suggested_nations) > 4:
break
# assign suggestions
meta['r1Initiator'] = suggested_nations[0]
meta['r2Initiator'] = suggested_nations[1]
meta['r3Initiator'] = suggested_nations[2]
meta['r4Initiator'] = suggested_nations[3]
# suggest some incorrect targets
suggested_nations = []
if suggest_no_mid:
suggested_nations = suggested_nations + ['None']
if ('country1' in meta) and (meta['country1'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country1']]
if ('country2' in meta) and (meta['country2'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country2']]
if ('country3' in meta) and (meta['country3'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country3']]
if ('country4' in meta) and (meta['country4'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country4']]
if len(suggested_nations) < 4:
if (gs_targets[gold_key] is not None) and (gs_targets[gold_key] != '') and (gs_targets[gold_key] is not 'None'):
similar_nations = natDict[gs_targets[gold_key]]
elif (len(suggested_nations) > 0) and (suggested_nations[0] in natDict):
similar_nations = natDict[suggested_nations[0]]
else: # get some random countries
similar_nations = natDict['NATO']
shuffle(similar_nations)
for nation in similar_nations:
if not(nation in suggested_nations):
suggested_nations = suggested_nations + [nation]
if len(suggested_nations) > 4:
break
# assign suggestions
meta['r1Targets'] = suggested_nations[0]
meta['r2Targets'] = suggested_nations[1]
meta['r3Targets'] = suggested_nations[2]
meta['r4Targets'] = suggested_nations[3]
# check to see if gold standard classification
# is correctly guessed by NELL
if meta['gsAction'] == meta['mid_attr1']:
meta['correct'] = 1
elif meta['gsAction'] == meta['mid_attr2']:
meta['correct'] = 1
elif meta['gsAction'] == meta['mid_attr3']:
meta['correct'] = 1
elif meta['gsAction'] == meta['mid_attr4']:
meta['correct'] = 1
else:
meta['correct'] = 0
#except:
# print "Could not find pipeline data for document "+meta['key']
# add data from this document to the document list
if (meta['gsAction'].lower() == 'unknown') or (meta['gsAction'] == ''):
pass # if the action is unknown, do not include document
elif meta['gsActionNum'] == 0:
readdocs += [meta] # if the action is 0 / no MID, include
elif (meta['gsInitiator'] == '') or (meta['gsInitiator'].lower() == 'none'):
pass # if the action is not zero, and no initiator, do not include
elif (meta['gsTargets'] == '') or (meta['gsTargets'].lower() == 'none'):
pass # if the action is not zero, and no target, do not include
else: # otherwise, include
readdocs += [meta]
try:
nextline(file)
except ValueError:
pass
except EOFError:
break
except ValueError:
print meta
print "document could not be read - encountered ----- separator. skipping doc."
# shuffle readdocs
from random import shuffle
shuffle(readdocs)
for data in readdocs:
# sqlalchemy.sql.expression.insert(table, values=None, inline=False, **kwargs)
from sqlalchemy.exc import IntegrityError
try:
# chk = docs.select(docs.c.filename==fname)
# result = engine.execute(chk.exists())
existing = connection.execute(docs.select(docs.c.key==data['key']))
if existing.first():
ins = docs.update().where(docs.c.key==data['key']).values(data)
else:
ins = docs.insert().values(data)
result = engine.execute(ins)
except IntegrityError as x:
print x
count = count+1
if count%100 == 0:
print count
print "%s imported."%count
# 20100913--0161-Sep13_2010_LN_NP1.txt-files.list
# Australian troops to encounter more violence in Afghanistan: Defense Force AFG-AUL
# September 13, 2010 Monday 1:25 AM EST
# News source: (c) Xinhua General News Service
# SVM score: 1.757
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Australian troops in Afghanistan can expect to encounter increased violence as
# they push deeper into Taliban sanctuaries, Australian Defense Force warned on
# Monday.
# Air Chief Marshal Angus Houston told reporters during a briefing in Canberra on
# Monday that troops of the Afghan National Army 4th Brigade, accompanied by
# their Australian mentors, were heading deeper into the Baluchi and Chora
# Valleys and Deh Rawood area of Oruzgan province in Afghanistan.
# "These are partnered patrols and it is dangerous work," said Houston, adding
# the fight was becoming more intense.
# "We can expect violence levels to increase as we contest the insurgency in
# greater numbers and across a wider area in the south. "
# Afghanistan is now in the midst of its fighting season with 10 Australian
# soldiers killed so far this year.
# The most recent fatality was Lance Corporal Jared MacKinney who was killed
# while accompanying Afghan troops in the Tangi Valley near Deh Rawood.
# The past few months had been tough for Australian troops, Houston said.
# "But it is important that we maintain our resolve, push forward with the
# strategy and keep the pressure on the Taliban," he said.
# He said last Tuesday that Australian troops had achieved significant success in
# training the Afghan security forces and pressuring insurgents.
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
| raw = file.readline()
if not raw:
raise EOFError()
if raw.find('-------------------------------------------------')>=0:
raise ValueError()
return raw.decode(fileencoding) | identifier_body |
import-docs.py | #!/usr/bin/python
import sys
import re
import json
from operator import itemgetter
from datetime import datetime, date, timedelta
import calendar
from random import *
import csv
in_file = "/data/mid/2011_gs_supplemented.txt"
#in_file = "/data/mid/2011_sorted_pos4.txt"
#gs_file = "/data/mid/last9months2011.xlsx"
gs_file = "/data/mid/gs_master_plus_corrected.xlsx"
action_file = "/data/mid/actions.csv"
nation_file = "/data/mid/nations.csv"
iso_file = "/data/mid/ISO2.csv"
#in_file = "2011_sorted_pos4.txt"
fileencoding = "windows-1252"
mid_action_names = [
'No militarized action',
'Threat to use force',
'Threat to use force',
'Show of force',
'Alert',
'Mobilization',
'Border fortification',
'Border violation',
'Blockade',
'Occupation of territory',
'Seizure of material or personnel',
'Attack',
'Clash',
]
mid_action_types = [
'NO_MILITARIZED_ACTION',
'THREAT_TO_USE_FORCE',
'THREAT_TO_DECLARE_WAR',
'SHOW_OF_FORCE',
'ALERT',
'MOBILIZATION',
'FORTIFY_BORDER',
'BORDER_VIOLATION',
'BLOCKADE',
'OCCUPATION_OF_TERRITORY',
'SEIZURE',
'ATTACK',
'CLASH',
]
mid_action_gold = [
'no militarized action (0)',
'threat to use force (1)',
'threat to declare war (4)',
'show of force (7)',
'alert (8)',
'mobilization (10)',
'fortify border (11)',
'border violation (12)',
'blockade (13)',
'occupation of territory (14)',
'seizure (15)',
'attack (16)',
'clash (17)',
]
# combine the lists to create dictionaries
mid_dict = dict(zip(mid_action_types, mid_action_names))
gold_dict = dict(zip(mid_action_gold, mid_action_names))
# minimum threshold of classification confidence
# to classify as MID
MID_Threshold = 0.5
# read ISO codes for each nation
isoDict = {}
nationDict = {}
with open(iso_file) as csvfile:
isoreader = csv.reader(csvfile,delimiter=',')
for row in isoreader:
key = row[0].lower().strip()
isoDict[key] = row[1]
nationDict[key] = row[2]
# read action similarities from csv
actDict = {}
with open(action_file) as csvfile:
actreader = csv.reader(csvfile,delimiter=',')
for row in actreader:
key = row[0].lower()
actDict[key] = []
for item in row[1:]:
actDict[key] = actDict[key] + [item.lower()]
# read nation similarities from csv
natDict = {}
with open(nation_file) as csvfile:
natreader = csv.reader(csvfile,delimiter=',')
for row in natreader:
try:
rowNme,rowCOW = row[0].split('(')
except:
rowNme = row[0]
key = rowNme.strip()
#key = rowTrm.upper()
natDict[key] = []
for item in row[1:]:
try:
rowNme,rowCOW = item.split('(')
except:
rowNme = item
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
natDict[key] = natDict[key] + [rowTrm]
# read gold standard classification from Excel
import xlrd
with xlrd.open_workbook(gs_file) as book:
# 0 corresponds for 1st worksheet, usually named 'Book1'
sheet = book.sheet_by_index(0)
# gets col A values, the gold standard keys
A = [ A for A in sheet.col_values(0) ]
# gets col B values, the golden standard classifications
B = [ B for B in sheet.col_values(1) ]
# get initiator
C = [ C for C in sheet.col_values(2) ]
# get target
D = [ D for D in sheet.col_values(3) ]
# get start month
E = [ E for E in sheet.col_values(4) ]
# get start day
F = [ F for F in sheet.col_values(5) ]
# get start year
G = [ G for G in sheet.col_values(6) ]
# FIX DICTIONARIES: Use the highest MID classification
# strip whitespace from the start/end of A and B elements
# convert B elements to lowercase
gold_keys = []
for row in A[1:]:
rowStr = row.encode('utf-8')
rowTrm = rowStr.strip()
rowUni = rowTrm.decode('utf-8')
gold_keys.append(rowUni)
gold_actions = []
for row in B[1:]:
rowStr = row.encode('utf-8')
rowTrm = rowStr.strip()
rowLwr = rowTrm.lower()
gold_actions.append(rowLwr)
gold_initiators = []
for row in C[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_initiators.append(rowTrm)
gold_targets = []
for row in D[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_targets.append(rowTrm)
gold_months = []
for row in E[1:]:
try:
gold_months.append(int(row))
except:
gold_months.append(1) # default to January
gold_days = []
for row in F[1:]:
try:
gold_days.append(int(row))
except:
gold_days.append(1) # default to first of month
gold_years = []
for row in G[1:]:
try:
gold_years.append(int(row))
except:
gold_years.append(None)
# gs_mids is a dictionary of document keys and their integer MID levels
gs_mids = {}
gs_actions = {}
gs_initiators = {}
gs_targets = {}
gs_stdates = {}
# loop through the spreadsheet columns, selecting the highest MID for each doc
for i in range(0,len(gold_actions)):
thisAction = gold_actions[i]
if (gold_actions[i] is not None) and (gold_actions[i] is not ""):
# convert string to int (e.g., 'Clash (17)' to 17)
junk,mid_level = thisAction.split('(')
mid_level,junk = mid_level.split(')')
mid_level = int(mid_level)
# use key to check if this document already has an MID level
if gold_keys[i] in gs_mids:
if mid_level > gold_keys[i]:
update_dict = True
else:
update_dict = False
else:
update_dict = True
# if the key is undefined or the new mid_level is higher
# then update the dictionary
if update_dict:
gs_mids[gold_keys[i]] = mid_level
gs_actions[gold_keys[i]] = gold_actions[i]
gs_initiators[gold_keys[i]] = gold_initiators[i]
gs_targets[gold_keys[i]] = gold_targets[i]
if gold_years[i] is not None:
gs_stdates[gold_keys[i]] = date(gold_years[i],gold_months[i],gold_days[i])
else:
gs_stdates[gold_keys[i]] = None
if len(sys.argv)>1:
in_file = sys.argv[1]
# db structure
# Docs table (ID, filename, title, timestamp, source, svm(float), text)
# Hits table (primary key: ID, char: AMT_HitID, foreignkey: doc)
from models import *
assignments.drop(engine, checkfirst=True)
docs.drop(engine, checkfirst=True)
metadata.create_all(engine) # create if it doesn't exist yet
def nextline (file):
raw = file.readline()
if not raw:
raise EOFError()
if raw.find('-------------------------------------------------')>=0:
raise ValueError()
return raw.decode(fileencoding)
file = open(in_file, 'r')
readdocs = []
count = 0
while file:
meta = {}
try:
while True:
# read meta-data
dataline = nextline(file).rstrip()
if '>>>>>>>' in dataline:
break
elif ':' in dataline:
# split at the first colon
tag,value = dataline.split(':',1)
tag = tag.lower()
tag = tag.strip()
tag = tag.replace(' ','_')
# the Countries meta-data is represented as a list of
# ('Country', freq) pairs, where 'Country' is the name
# of the country and freq is the frequency it occurs in
# the document. We need to parse this and select the 5
# most frequent countries for inclusion in the database
if tag == 'countries':
valueList = value.strip().split(')')
countryList = []
for countryFreq in valueList:
countryTuple = tuple(countryFreq.strip()[1:].split(','))
if countryTuple[0] != '':
country = countryTuple[0]
freq = int(countryTuple[1])
countryList += [[country,freq]]
# sort countries in descending order by frequency
countryList.sort(key=itemgetter(1),reverse=True)
# get the 5 most frequent countries
# or all countries if there are less than five
for index in range(1, min(5, len(countryList)) + 1):
(country, freq) = countryList[index - 1]
meta['country'+str(index)] = country.strip("'")
else:
# add tag and value to the dictionary
meta[tag] = value.strip()
# get text of document
text = u""
while True:
t = nextline(file)
if t.find('<<<<<<<<<<<<<<<')>=0:
break
t = t.replace('\r\n','\n').replace('\r','\n').rstrip()
text += t + u"\n"
meta['text'] = text
# get NER and NELL-CAT from pipeline data
# open pipeline data file (HTML version)
# store the frequency of each NELL-CAT / NER location in document
#try:
locFreq = {}
counted = [False]*len(text)
with open('/data/mid/docs/' + meta['key'].strip()) as nell_file:
for line in nell_file:
if line.startswith('Type: ner'):
if line.find('Value: LOCATION') > -1:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('Type: mid-attr'):
# parse out list of MID actions
# and the pipeline's confidence in each
# from the HTML output of the pipeline
preamble,values = line.split('Value: ',1)
valueList = values.split(',')
valueDict = {}
for value in valueList:
key,num = value.split(':',1)
if num.find('<') > -1:
num,html = num.split('<',1)
valueDict[key] = float(num)
# filter out actions other than those on
# our list of actions we care about
actionDict = {}
for action in mid_action_types:
# if there is a defined strength for this action
if action in valueDict:
actionDict[action] = valueDict[action]
else:
actionDict[action] = 0
# Set 'NO_MILITARIZED_ACTION' to at least threshold
if actionDict['NO_MILITARIZED_ACTION'] < MID_Threshold:
actionDict['NO_MILITARIZED_ACTION'] = MID_Threshold
elif line.startswith('Type: nell-cat'):
isLoc = False
if line.find('Value: location') > -1:
isLoc = True
if line.find('Value: country') > -1:
isLoc = True
if line.find('Value: geo') > -1:
isLoc = True
if isLoc:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('<li><div class="annotation"'):
spanList = line.split(' ')
for span in spanList:
if span.startswith('spanStart='):
itemList = span.split('"')
spanStart = int(itemList[1])
elif span.startswith('spanEnd='):
itemList = span.split('"')
spanEnd = int(itemList[1])
# add mid action classification to document data
# pick the top 4 most confident classifications
# if the 4th classification is below threshold,
# suggest "No militarized action" instead
bestAction = sorted(actionDict, key=actionDict.get, reverse=True)
for index in range(1,5):
meta['mid_attr'+str(index)] = mid_dict[bestAction[index - 1]]
# pick the top 5 most frequent locations
bestLoc = sorted(locFreq, key=locFreq.get, reverse=True)
for index in range(1,min(5, len(bestLoc)) + 1):
meta['nelloc'+str(index)] = bestLoc[index - 1]
# add gold standard MID classification to database
gold_key = meta['gskey']
if gold_key in gs_actions:
gold_action = gs_actions[gold_key]
if gold_action in gold_dict:
gold_db = gold_dict[gold_action]
meta['gsAction'] = gold_db
else:
meta['gsAction'] = 'Unknown'
if (gs_mids[gold_key] > 6) or (gs_mids[gold_key] == 0):
meta['gsActionNum'] = gs_mids[gold_key]
else: # don't discriminate between actions 1 thru 6
meta['gsActionNum'] = 6
# convert nations into Qualtrics format
ini = gs_initiators[gold_key].lower().strip()
tar = gs_targets[gold_key].lower().strip()
if ini in nationDict:
meta['gsInitiator'] = nationDict[ini]
else:
meta['gsInitiator'] = gs_initiators[gold_key]
if tar in nationDict:
meta['gsTargets'] = nationDict[tar]
else:
meta['gsTargets'] = gs_targets[gold_key]
meta['gsDate'] = gs_stdates[gold_key]
# get ISO codes for gold standard nations
if ini in isoDict: | if tar in isoDict:
meta['gsTargetISO'] = isoDict[tar]
else:
print "ISO for " + tar + " not found."
# make several incorrect suggestions for the action
similar_actions = actDict[gold_action]
# suggest that there's no MID
# with proportion of non-MID documents in gold standard
suggest_no_mid = (gold_action != mid_action_gold[0]) and (randint(0,1) <= 0.2108)
if suggest_no_mid:
# randomly add "no MID action" as a suggestion
similar_actions = [mid_action_gold[0]] + similar_actions
# assign actions
meta['r1Action'] = gold_dict[similar_actions[0]]
meta['r2Action'] = gold_dict[similar_actions[1]]
meta['r3Action'] = gold_dict[similar_actions[2]]
meta['r4Action'] = gold_dict[similar_actions[3]]
# now let's make incorrect suggestions for the date
dateList = meta['date'].split(' ')
dateStr = ' '.join(dateList[0:3])
try:
pubDate = datetime.strptime(dateStr,'%B %d, %Y')
except:
try:
pubDate = datetime.strptime(dateStr,'%B %d, %Y,')
except:
pubDate = datetime.strptime(dateStr,'%B %d %Y')
# define a range of dates: 10 days before and 1 day after publication
pick_a_day = range(-10,1)
if meta['gsDate'] is not None:
pub_gold_diff = meta['gsDate'].day - pubDate.day
if pub_gold_diff in pick_a_day:
pick_a_day.pop(pick_a_day.index(pub_gold_diff)) # cannot pick gold/correct
# now select random dates within that range
suggested_dates = []
if suggest_no_mid and (meta['gsDate'] is not None):
suggested_dates = [None]
while len(suggested_dates) < 4:
j = randint(0,len(pick_a_day) - 1)
suggested_dates = suggested_dates + [pubDate + timedelta(days=pick_a_day[j])]
pick_a_day.pop(j)
# assign dates
meta['r1Date'] = suggested_dates[0]
meta['r2Date'] = suggested_dates[1]
meta['r3Date'] = suggested_dates[2]
meta['r4Date'] = suggested_dates[3]
# suggest some incorrect initiators
suggested_nations = []
if suggest_no_mid:
suggested_nations = suggested_nations + ['None']
if ('country1' in meta) and (meta['country1'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country1']]
if ('country2' in meta) and (meta['country2'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country2']]
if ('country3' in meta) and (meta['country3'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country3']]
if ('country4' in meta) and (meta['country4'] != gs_initiators[gold_key]):
suggested_nations = suggested_nations + [meta['country4']]
if len(suggested_nations) < 4:
if (gs_initiators[gold_key] is not None) and (gs_initiators[gold_key] != '') and (gs_initiators[gold_key] is not 'None'):
similar_nations = natDict[gs_initiators[gold_key]]
elif (len(suggested_nations) > 0) and (suggested_nations[0] in natDict):
similar_nations = natDict[suggested_nations[0]]
else: # get some random countries
similar_nations = natDict['NATO']
shuffle(similar_nations)
for nation in similar_nations:
if not(nation in suggested_nations):
suggested_nations = suggested_nations + [nation]
if len(suggested_nations) > 4:
break
# assign suggestions
meta['r1Initiator'] = suggested_nations[0]
meta['r2Initiator'] = suggested_nations[1]
meta['r3Initiator'] = suggested_nations[2]
meta['r4Initiator'] = suggested_nations[3]
# suggest some incorrect targets
suggested_nations = []
if suggest_no_mid:
suggested_nations = suggested_nations + ['None']
if ('country1' in meta) and (meta['country1'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country1']]
if ('country2' in meta) and (meta['country2'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country2']]
if ('country3' in meta) and (meta['country3'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country3']]
if ('country4' in meta) and (meta['country4'] != gs_targets[gold_key]):
suggested_nations = suggested_nations + [meta['country4']]
if len(suggested_nations) < 4:
if (gs_targets[gold_key] is not None) and (gs_targets[gold_key] != '') and (gs_targets[gold_key] is not 'None'):
similar_nations = natDict[gs_targets[gold_key]]
elif (len(suggested_nations) > 0) and (suggested_nations[0] in natDict):
similar_nations = natDict[suggested_nations[0]]
else: # get some random countries
similar_nations = natDict['NATO']
shuffle(similar_nations)
for nation in similar_nations:
if not(nation in suggested_nations):
suggested_nations = suggested_nations + [nation]
if len(suggested_nations) > 4:
break
# assign suggestions
meta['r1Targets'] = suggested_nations[0]
meta['r2Targets'] = suggested_nations[1]
meta['r3Targets'] = suggested_nations[2]
meta['r4Targets'] = suggested_nations[3]
# check to see if gold standard classification
# is correctly guessed by NELL
if meta['gsAction'] == meta['mid_attr1']:
meta['correct'] = 1
elif meta['gsAction'] == meta['mid_attr2']:
meta['correct'] = 1
elif meta['gsAction'] == meta['mid_attr3']:
meta['correct'] = 1
elif meta['gsAction'] == meta['mid_attr4']:
meta['correct'] = 1
else:
meta['correct'] = 0
#except:
# print "Could not find pipeline data for document "+meta['key']
# add data from this document to the document list
if (meta['gsAction'].lower() == 'unknown') or (meta['gsAction'] == ''):
pass # if the action is unknown, do not include document
elif meta['gsActionNum'] == 0:
readdocs += [meta] # if the action is 0 / no MID, include
elif (meta['gsInitiator'] == '') or (meta['gsInitiator'].lower() == 'none'):
pass # if the action is not zero, and no initiator, do not include
elif (meta['gsTargets'] == '') or (meta['gsTargets'].lower() == 'none'):
pass # if the action is not zero, and no target, do not include
else: # otherwise, include
readdocs += [meta]
try:
nextline(file)
except ValueError:
pass
except EOFError:
break
except ValueError:
print meta
print "document could not be read - encountered ----- separator. skipping doc."
# shuffle readdocs
from random import shuffle
shuffle(readdocs)
for data in readdocs:
# sqlalchemy.sql.expression.insert(table, values=None, inline=False, **kwargs)
from sqlalchemy.exc import IntegrityError
try:
# chk = docs.select(docs.c.filename==fname)
# result = engine.execute(chk.exists())
existing = connection.execute(docs.select(docs.c.key==data['key']))
if existing.first():
ins = docs.update().where(docs.c.key==data['key']).values(data)
else:
ins = docs.insert().values(data)
result = engine.execute(ins)
except IntegrityError as x:
print x
count = count+1
if count%100 == 0:
print count
print "%s imported."%count
# 20100913--0161-Sep13_2010_LN_NP1.txt-files.list
# Australian troops to encounter more violence in Afghanistan: Defense Force AFG-AUL
# September 13, 2010 Monday 1:25 AM EST
# News source: (c) Xinhua General News Service
# SVM score: 1.757
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Australian troops in Afghanistan can expect to encounter increased violence as
# they push deeper into Taliban sanctuaries, Australian Defense Force warned on
# Monday.
# Air Chief Marshal Angus Houston told reporters during a briefing in Canberra on
# Monday that troops of the Afghan National Army 4th Brigade, accompanied by
# their Australian mentors, were heading deeper into the Baluchi and Chora
# Valleys and Deh Rawood area of Oruzgan province in Afghanistan.
# "These are partnered patrols and it is dangerous work," said Houston, adding
# the fight was becoming more intense.
# "We can expect violence levels to increase as we contest the insurgency in
# greater numbers and across a wider area in the south. "
# Afghanistan is now in the midst of its fighting season with 10 Australian
# soldiers killed so far this year.
# The most recent fatality was Lance Corporal Jared MacKinney who was killed
# while accompanying Afghan troops in the Tangi Valley near Deh Rawood.
# The past few months had been tough for Australian troops, Houston said.
# "But it is important that we maintain our resolve, push forward with the
# strategy and keep the pressure on the Taliban," he said.
# He said last Tuesday that Australian troops had achieved significant success in
# training the Afghan security forces and pressuring insurgents.
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< | meta['gsInitiatorISO'] = isoDict[ini]
else:
print "ISO for " + ini + " not found." | random_line_split |
auth.controllers.js | /**
* Admin login/signup/update/delete actions which execute the queries defined in auth.models.js.
*
* @version 1.0.0
* @author [Yayen Lin](https://github.com/yayen-lin)
* src: https://github.com/Scavenge-UW/Scavenge
*/
// TODO: remove console log debugging output
// TODO: return json auth needs to be re-determined
// our db for auth action
const authDB = require("../models/auth.models.js");
// helper functions
const Utils = require("../helpers/utils");
const Response = require("../helpers/response");
const Validation = require("../helpers/validation");
const datetimeConverter = require("../helpers/datetimeConverter");
// tools
const moment = require("moment");
const session = require("express-session");
/* -------------------------------------- cookie setting -------------------------------------- */
let refreshTokensByID = {}; // { manager_id: refreshToken }
// cookie setting w/ access token
const accessCookieOptions = {
// cookie expires after 90 mins from the time it is set.
expires: new Date(
Date.now() + process.env.JWT_COOKIE_ACCESS_EXPIRES * 60 * 1000
),
httpOnly: true, // for security reason it's recommended to set httpOnly to true
sameSite: true,
};
// cookie setting w/ refresh token
const refreshCookieOptions = {
// cookie expires after 3 days from the time it is set.
expires: new Date(
Date.now() + process.env.JWT_COOKIE_REFRESH_EXPIRES * 60 * 60 * 1000
),
httpOnly: true, // for security reason it's recommended to set httpOnly to true
sameSite: true,
};
/**
* Admin sign up action
*
* @param {*} req
* @param {*} res
* @returns
*/
exports.adminSignupAction = (req, res) => {
const { username, firstname, lastname, password } = req.body;
const privilegeDefault = "3";
const activeDefault = "1";
const createdOn = datetimeConverter.toMySqlDateTime(moment(new Date()));
// check username
if (!Validation.validateUsername(username)) {
return Response.sendErrorResponse({
res,
message: "Please provide a valid username",
statusCode: 400,
});
}
// check password
if (!Validation.validatePassword(password)) {
return Response.sendErrorResponse({
res,
message: "Please provide a valid password",
statusCode: 400,
});
}
// hash + salt password
const hash = Utils.hashPassword(password);
// new user info
const vals = [
username,
firstname || null,
lastname || null,
hash,
privilegeDefault,
activeDefault,
createdOn,
];
authDB
.adminSignup(req, res, vals)
.then(async (rows) => {
console.log("auth.controllers - signup - rows = ", rows);
// TODO: check and remove password if exists in response
// dbResponse = rows[0];
// delete dbResponse.password;
return Response.sendResponse({
res,
responseBody: { user: rows /*dbResponse*/ },
statusCode: 201,
message: "User successfully created",
});
})
.catch((err) => {
console.log(err, "error");
return Response.sendErrorResponse({
res,
message: error,
statusCode: 500,
});
});
};
/**
* Admin login action
*
* @param {*} req
* @param {*} res
* @returns
*/
exports.adminLoginAction = (req, res) => {
const { username, password } = req.body;
if (Validation.isEmpty(username) || !Validation.validateUsername(username)) {
return Response.sendErrorResponse({
res,
message: "Username is missing.",
statusCode: 400,
});
}
if (Validation.isEmpty(password) || !Validation.validatePassword(password)) {
return Response.sendErrorResponse({
res,
message: "Password is missing.",
statusCode: 400,
});
}
authDB
.adminLogin(req, res, username)
.then(async (results) => {
console.log("auth.controllers - login - results = ", results);
// results = [
// RowDataPacket {
// manager_id: 6,
// username: 'andy-01',
// firstname: 'Yayen',
// lastname: 'Lin',
// password: '$2a$10$LSBVvNc8IU.mA9lKHHKka.vmv./MpDVax.5XZRJoxqvqqPzFZJny6',
// privilege: '3',
// active: '1',
// createdOn: 2021-07-11T09:48:55.000Z
// }
// ]
const dbResponse = results[0];
if (!dbResponse)
return Response.sendErrorResponse({
res,
message: "Username does not exist.",
statusCode: 400,
});
if (!Validation.comparePassword(dbResponse.password, password))
return Response.sendErrorResponse({
res,
message: "The password you provided is incorrect",
statusCode: 400,
});
// login successfully
console.log("Logged in successfully!");
console.log("dbResponse", dbResponse);
// access token - give users access to protected resources
const token = Utils.generateJWT(dbResponse); // passing payload to jwt
// refresh token - allow users request new tokens
const refreshExpiry = moment()
.utc()
.add(3, "days")
.endOf("day")
.format("X");
const refreshToken = Utils.generateJWT({
exp: parseInt(refreshExpiry),
data: dbResponse.manager_id,
});
// add cookies to the response
res.cookie(process.env.JWT_ACCESS, token, accessCookieOptions);
res.cookie(process.env.JWT_REFRESH, refreshToken, refreshCookieOptions);
// add refreshToken to our refreshToken obj
refreshTokensByID[dbResponse.manager_id] = refreshToken;
console.log("added to refershTokensByID: ", refreshTokensByID);
// FIXME: create session for logged in user.
let sess = req.session;
sess.user_id = dbResponse.manager_id;
console.log("session", sess);
delete dbResponse.password; // removed password before return
return Response.sendResponse({
res,
responseBody: {
user: dbResponse,
token,
refresh: refreshToken,
},
message: "Login successful.",
});
})
.catch((err) => {
console.log(err);
return Response.sendErrorResponse({
res,
message: err,
statusCode: 500,
});
});
};
/**
* fetch logged in user info
*
* @param {*} req
* @param {*} res
* @returns current logged in user info
*/
exports.me = async (req, res) => {
const { user, token, tokenExp, toRefresh } = res;
console.log(res.user);
delete res.user.iat;
delete res.user.exp;
delete res.user.aud;
delete res.user.iss;
delete res.user.sub;
delete res.user.password;
try {
return Response.sendResponse({
res,
message: "User details successfully fetched",
responseBody: {
user: user,
token: token,
refresh: refreshTokensByID[user.manager_id],
tokenExp: tokenExp,
toRefresh: toRefresh,
},
});
} catch (error) {
console.log(error);
return Response.sendErrorResponse({
res,
message: "Unable to fetch currently logged in user information.",
statusCode: 400,
});
}
};
/**
* if access token has expired, renew the access token and call next();
* if not, call next(); directly.
*
* @param {*} req
* @param {*} res
* @param {*} next
*/
exports.refreshTokenAction = async (req, res) => {
// console.log("----------------------------------------- req");
// console.log(req.token);
// console.log(req.user);
// console.log(req.sessionID);
// console.log("----------------------------------------- res");
// console.log(res.token);
// console.log(res.user);
const { user } = req.body;
// console.log("res", res.user.manager_id);
const refresh = refreshTokensByID[user.manager_id];
// FIXME: refresh turns undefined sometimes
console.log("refresh", refresh);
// if refresh token missing
if (!refresh)
return Response.sendErrorResponse({
res,
message: "No refresh token provided.", | statusCode: 403,
});
// if refresh token expires
if (refresh) {
try {
const decoded = Utils.verifyJWT(refresh);
// {
// exp: 1626825599,
// data: 6,
// iat: 1626500973,
// aud: 'jwt-node',
// iss: 'jwt-node',
// sub: 'jwt-node'
// }
const exp = decoded.exp || null;
const now = new Date(Date.now()).getTime() / 1000;
// if no exp in decoded or id doesn't match
if (!exp || decoded.data !== user.manager_id)
return Response.sendErrorResponse({
res,
message: "Invalid refresh token.",
statusCode: 403,
});
console.log("Got here - 1");
// if refresh token expires
if (now > exp)
return Response.sendErrorResponse({
res,
message: "Refresh token expired, please log back in again.",
statusCode: 403,
});
console.log("Got here - 2");
// generate new access token using logged in user's info
const newToken = Utils.generateJWT(user);
// clear the old cookie
res.clearCookie(process.env.JWT_ACCESS);
// add the new cookie to response
res.cookie(process.env.JWT_ACCESS, newToken, accessCookieOptions);
console.log("Got here - 3");
console.log(user); // FIXME: undefined?
return Response.sendResponse({
res,
message: "Token renewed.",
responseBody: {
user: user,
token: newToken,
refresh: refresh,
},
statusCode: 200,
});
} catch (err) {
console.log(err);
return Response.sendErrorResponse({
res,
message: err,
statusCode: 500,
});
}
}
};
/**
* Admin logout action
*
* @param {*} req
* @param {*} res
* @returns
*/
exports.adminLogoutAction = (req, res) => {
console.log("LOGGIN OUT!!");
// replace cookie with logout cookie
res.cookie(process.env.JWT_ACCESS, "logout", {
// cookie expires after 2 sec from the time it is set.
expires: new Date(Date.now() + 2 * 1000),
httpOnly: true,
sameSite: true,
});
// replace refresh cookie with logout cookie
res.cookie(process.env.JWT_REFRESH, "logout", {
// cookie expires after 2 sec from the time it is set.
expires: new Date(Date.now() + 2 * 1000),
httpOnly: true,
sameSite: true,
});
// destroy session
req.session.destroy((err) => {
if (err)
return Response.sendErrorResponse({
res,
message: "Something happened while destroying the session.",
statusCode: 400,
});
});
try {
return Response.sendResponse({
res,
message: "Successfully logged out.",
statusCode: 200,
});
} catch (error) {
console.log(errer);
return Response.sendErrorResponse({
res,
message: "Unable to log out.",
statusCode: 400,
});
}
}; | random_line_split | |
auth.controllers.js | /**
* Admin login/signup/update/delete actions which execute the queries defined in auth.models.js.
*
* @version 1.0.0
* @author [Yayen Lin](https://github.com/yayen-lin)
* src: https://github.com/Scavenge-UW/Scavenge
*/
// TODO: remove console log debugging output
// TODO: return json auth needs to be re-determined
// our db for auth action
const authDB = require("../models/auth.models.js");
// helper functions
const Utils = require("../helpers/utils");
const Response = require("../helpers/response");
const Validation = require("../helpers/validation");
const datetimeConverter = require("../helpers/datetimeConverter");
// tools
const moment = require("moment");
const session = require("express-session");
/* -------------------------------------- cookie setting -------------------------------------- */
let refreshTokensByID = {}; // { manager_id: refreshToken }
// cookie setting w/ access token
const accessCookieOptions = {
// cookie expires after 90 mins from the time it is set.
expires: new Date(
Date.now() + process.env.JWT_COOKIE_ACCESS_EXPIRES * 60 * 1000
),
httpOnly: true, // for security reason it's recommended to set httpOnly to true
sameSite: true,
};
// cookie setting w/ refresh token
const refreshCookieOptions = {
// cookie expires after 3 days from the time it is set.
expires: new Date(
Date.now() + process.env.JWT_COOKIE_REFRESH_EXPIRES * 60 * 60 * 1000
),
httpOnly: true, // for security reason it's recommended to set httpOnly to true
sameSite: true,
};
/**
* Admin sign up action
*
* @param {*} req
* @param {*} res
* @returns
*/
exports.adminSignupAction = (req, res) => {
const { username, firstname, lastname, password } = req.body;
const privilegeDefault = "3";
const activeDefault = "1";
const createdOn = datetimeConverter.toMySqlDateTime(moment(new Date()));
// check username
if (!Validation.validateUsername(username)) {
return Response.sendErrorResponse({
res,
message: "Please provide a valid username",
statusCode: 400,
});
}
// check password
if (!Validation.validatePassword(password)) {
return Response.sendErrorResponse({
res,
message: "Please provide a valid password",
statusCode: 400,
});
}
// hash + salt password
const hash = Utils.hashPassword(password);
// new user info
const vals = [
username,
firstname || null,
lastname || null,
hash,
privilegeDefault,
activeDefault,
createdOn,
];
authDB
.adminSignup(req, res, vals)
.then(async (rows) => {
console.log("auth.controllers - signup - rows = ", rows);
// TODO: check and remove password if exists in response
// dbResponse = rows[0];
// delete dbResponse.password;
return Response.sendResponse({
res,
responseBody: { user: rows /*dbResponse*/ },
statusCode: 201,
message: "User successfully created",
});
})
.catch((err) => {
console.log(err, "error");
return Response.sendErrorResponse({
res,
message: error,
statusCode: 500,
});
});
};
/**
* Admin login action
*
* @param {*} req
* @param {*} res
* @returns
*/
exports.adminLoginAction = (req, res) => {
const { username, password } = req.body;
if (Validation.isEmpty(username) || !Validation.validateUsername(username)) |
if (Validation.isEmpty(password) || !Validation.validatePassword(password)) {
return Response.sendErrorResponse({
res,
message: "Password is missing.",
statusCode: 400,
});
}
authDB
.adminLogin(req, res, username)
.then(async (results) => {
console.log("auth.controllers - login - results = ", results);
// results = [
// RowDataPacket {
// manager_id: 6,
// username: 'andy-01',
// firstname: 'Yayen',
// lastname: 'Lin',
// password: '$2a$10$LSBVvNc8IU.mA9lKHHKka.vmv./MpDVax.5XZRJoxqvqqPzFZJny6',
// privilege: '3',
// active: '1',
// createdOn: 2021-07-11T09:48:55.000Z
// }
// ]
const dbResponse = results[0];
if (!dbResponse)
return Response.sendErrorResponse({
res,
message: "Username does not exist.",
statusCode: 400,
});
if (!Validation.comparePassword(dbResponse.password, password))
return Response.sendErrorResponse({
res,
message: "The password you provided is incorrect",
statusCode: 400,
});
// login successfully
console.log("Logged in successfully!");
console.log("dbResponse", dbResponse);
// access token - give users access to protected resources
const token = Utils.generateJWT(dbResponse); // passing payload to jwt
// refresh token - allow users request new tokens
const refreshExpiry = moment()
.utc()
.add(3, "days")
.endOf("day")
.format("X");
const refreshToken = Utils.generateJWT({
exp: parseInt(refreshExpiry),
data: dbResponse.manager_id,
});
// add cookies to the response
res.cookie(process.env.JWT_ACCESS, token, accessCookieOptions);
res.cookie(process.env.JWT_REFRESH, refreshToken, refreshCookieOptions);
// add refreshToken to our refreshToken obj
refreshTokensByID[dbResponse.manager_id] = refreshToken;
console.log("added to refershTokensByID: ", refreshTokensByID);
// FIXME: create session for logged in user.
let sess = req.session;
sess.user_id = dbResponse.manager_id;
console.log("session", sess);
delete dbResponse.password; // removed password before return
return Response.sendResponse({
res,
responseBody: {
user: dbResponse,
token,
refresh: refreshToken,
},
message: "Login successful.",
});
})
.catch((err) => {
console.log(err);
return Response.sendErrorResponse({
res,
message: err,
statusCode: 500,
});
});
};
/**
* fetch logged in user info
*
* @param {*} req
* @param {*} res
* @returns current logged in user info
*/
exports.me = async (req, res) => {
const { user, token, tokenExp, toRefresh } = res;
console.log(res.user);
delete res.user.iat;
delete res.user.exp;
delete res.user.aud;
delete res.user.iss;
delete res.user.sub;
delete res.user.password;
try {
return Response.sendResponse({
res,
message: "User details successfully fetched",
responseBody: {
user: user,
token: token,
refresh: refreshTokensByID[user.manager_id],
tokenExp: tokenExp,
toRefresh: toRefresh,
},
});
} catch (error) {
console.log(error);
return Response.sendErrorResponse({
res,
message: "Unable to fetch currently logged in user information.",
statusCode: 400,
});
}
};
/**
* if access token has expired, renew the access token and call next();
* if not, call next(); directly.
*
* @param {*} req
* @param {*} res
* @param {*} next
*/
exports.refreshTokenAction = async (req, res) => {
// console.log("----------------------------------------- req");
// console.log(req.token);
// console.log(req.user);
// console.log(req.sessionID);
// console.log("----------------------------------------- res");
// console.log(res.token);
// console.log(res.user);
const { user } = req.body;
// console.log("res", res.user.manager_id);
const refresh = refreshTokensByID[user.manager_id];
// FIXME: refresh turns undefined sometimes
console.log("refresh", refresh);
// if refresh token missing
if (!refresh)
return Response.sendErrorResponse({
res,
message: "No refresh token provided.",
statusCode: 403,
});
// if refresh token expires
if (refresh) {
try {
const decoded = Utils.verifyJWT(refresh);
// {
// exp: 1626825599,
// data: 6,
// iat: 1626500973,
// aud: 'jwt-node',
// iss: 'jwt-node',
// sub: 'jwt-node'
// }
const exp = decoded.exp || null;
const now = new Date(Date.now()).getTime() / 1000;
// if no exp in decoded or id doesn't match
if (!exp || decoded.data !== user.manager_id)
return Response.sendErrorResponse({
res,
message: "Invalid refresh token.",
statusCode: 403,
});
console.log("Got here - 1");
// if refresh token expires
if (now > exp)
return Response.sendErrorResponse({
res,
message: "Refresh token expired, please log back in again.",
statusCode: 403,
});
console.log("Got here - 2");
// generate new access token using logged in user's info
const newToken = Utils.generateJWT(user);
// clear the old cookie
res.clearCookie(process.env.JWT_ACCESS);
// add the new cookie to response
res.cookie(process.env.JWT_ACCESS, newToken, accessCookieOptions);
console.log("Got here - 3");
console.log(user); // FIXME: undefined?
return Response.sendResponse({
res,
message: "Token renewed.",
responseBody: {
user: user,
token: newToken,
refresh: refresh,
},
statusCode: 200,
});
} catch (err) {
console.log(err);
return Response.sendErrorResponse({
res,
message: err,
statusCode: 500,
});
}
}
};
/**
* Admin logout action
*
* @param {*} req
* @param {*} res
* @returns
*/
exports.adminLogoutAction = (req, res) => {
console.log("LOGGIN OUT!!");
// replace cookie with logout cookie
res.cookie(process.env.JWT_ACCESS, "logout", {
// cookie expires after 2 sec from the time it is set.
expires: new Date(Date.now() + 2 * 1000),
httpOnly: true,
sameSite: true,
});
// replace refresh cookie with logout cookie
res.cookie(process.env.JWT_REFRESH, "logout", {
// cookie expires after 2 sec from the time it is set.
expires: new Date(Date.now() + 2 * 1000),
httpOnly: true,
sameSite: true,
});
// destroy session
req.session.destroy((err) => {
if (err)
return Response.sendErrorResponse({
res,
message: "Something happened while destroying the session.",
statusCode: 400,
});
});
try {
return Response.sendResponse({
res,
message: "Successfully logged out.",
statusCode: 200,
});
} catch (error) {
console.log(errer);
return Response.sendErrorResponse({
res,
message: "Unable to log out.",
statusCode: 400,
});
}
};
| {
return Response.sendErrorResponse({
res,
message: "Username is missing.",
statusCode: 400,
});
} | conditional_block |
main.py | #coding:utf-8
# Thanks to:Kuldeep Singh, student at LNMIIT,Jaipur,India
# import Statements
import calendar
import time
import datetime
from kivy import resources
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.properties import ListProperty
from kivy.properties import ObjectProperty
from kivy.properties import StringProperty
from kivy.clock import Clock
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.event import EventDispatcher
from kivy.uix.textinput import TextInput
from sklearn.externals import joblib
from kivy.uix.image import Image
import pandas as pd
import numpy as np
from model import Model
from kivy.garden.graph import Graph, MeshLinePlot
from kivy.uix.scrollview import ScrollView
from datetime import timedelta
# Add source for Chinese characters
resources.resource_add_path("C:\\Windows\\Fonts")
font_name=resources.resource_find('WeiRuanZhengHeiTi-2.ttc')
color_shadow_blue=(.53125,.66796875,.7890625,1)
color_sky_blue=(1/256,158/256,213/256,1)
color_deep_blue=(17/256,64/256,108/256,1)
color_light_blue=(38/256,188/256,213/256,1)
# Builder used to load all the kivy files to be loaded in the main.py file
Builder.load_file('months.kv')
Builder.load_file('dates.kv')
Builder.load_file('select.kv')
Builder.load_file('status.kv')
Builder.load_file('days.kv')
Builder.load_file('calender.kv')
#------Kivy GUI Configuration--
# class for calender.kv file
class Calender(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# Integrating other classes
self.select_=Select()
self.months_=Months()
self.days_=Days()
self.dates_=Dates()
self.status_=Status()
# Adding layout
self.layout_1=BoxLayout(size_hint=(1,.1))
self.layout_1.add_widget(self.select_)
self.layout_2=BoxLayout()
self.layout_3=BoxLayout(orientation='vertical')
self.layout_3.add_widget(self.days_)
self.layout_3.add_widget(self.dates_)
self.layout_2.add_widget(self.months_)
self.layout_2.add_widget(self.layout_3)
self.layout_4=BoxLayout(size_hint=(1,.1))
self.layout_4.add_widget(self.status_)
self.add_widget(self.layout_1)
self.add_widget(self.layout_2)
self.add_widget(self.layout_4)
# ------------------------------------------------------------------------------------------------#
class Content(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='horizontal'
self.layout_num=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_time=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_action=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_num.add_widget(Label(text='故障电梯编号',font_name=font_name,font_size='20sp'))
self.layout_time.add_widget(Label(text='故障时间',font_name=font_name,font_size='20sp'))
self.layout_action.add_widget(Label(text='采取操作',font_name=font_name,font_size='20sp'))
self.textinput_num=TextInput(multiline=False)
self.textinput_time=TextInput(multiline=False)
self.textinput_time.text=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.textinput_action=TextInput(multiline=False)
self.layout_num.add_widget(self.textinput_num)
self.layout_time.add_widget(self.textinput_time)
self.layout_action.add_widget(self.textinput_action)
self.add_widget(self.layout_num)
self.add_widget(self.layout_time)
self.add_widget(self.layout_action)
self.add_widget(Button(text='确认',font_size='20sp',color=(0,0,0,1),font_name=font_name,on_press=self.ct_on_press))
def ct_on_press(self,event):
self.app_=App.get_running_app()
self.model_=self.app_.calendar_.dates_.model_
self.num=self.textinput_num.text
self.time=self.textinput_time.text
self.action=self.textinput_action.text
elevators=self.model_.samples
if self.num is not '':
idx=np.where(elevators['equip_no']==int(self.num))[0]
body=self.num+'\t'+self.time+'\t'+self.action+'\t'
if len(idx)>0:
infos=elevators[['Business type','Description','City','速度','设备型号','T']].iloc[idx[0]]
for info in infos:
body=body+str(info)+'\t'
body=body+'\n'
file_name='Log-New-Breakdown.tsv'
f=open(file_name,'a',encoding="utf-8")
f.write(body)
f.close()
print('成功输入新数据!')
else:
pass
self.textinput_num.text=''
self.textinput_time.text=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.textinput_action.text=''
# class for status.kv file
class Status(BoxLayout,EventDispatcher):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='horizontal'
self.add_widget(Button(on_press=self.on_press,text='输入走修数据',font_name=font_name,background_color=color_deep_blue,background_normal='',font_size='20sp',color=(1,1,1,1)))
def on_dismiss(self, arg):
pass
def on_press(self,event):
self.popup = Popup(title='Input Data',
content = Content(),
size_hint=(0.8,0.8))
self.popup.bind(on_dismiss=self.on_dismiss)
self.popup.open()
# ------------------------------------------------------------------------------------------------#
# class for Days.kv file
class Days(GridLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# ------------------------------------------------------------------------------------------------#
# class for select.kv file
class Select(BoxLayout):
lbl_ = ObjectProperty(None)
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.lbl_.text='通力电梯公司维保可视化产品'
self.lbl_.font_name=font_name
# class for Reminder in Dates
class Reminder(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.app_=App.get_running_app()
# Get the dates clicked
self.model_=self.app_.calendar_.dates_.model_
# Information to be added
self.body=self.model_.printSelectedResults()
# Layout arrangementw
self.orientation = 'vertical'
# Elevators information
self.layout_comp=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.layout_map=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.img_comp_1=Image(source='component_1.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.img_comp_2=Image(source='component_2.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.layout_comp.add_widget(self.img_comp_1)
self.layout_comp.add_widget(self.img_comp_2)
self.img_map_1=Image(source='map_kunshan.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.img_map_2=Image(source='map_sh.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.layout_map.add_widget(self.img_map_1)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='昆\n山\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_map.add_widget(self.img_map_2)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='上\n海\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_fig=BoxLayout(orientation = 'vertical' , size_hint = (1,0.7))
#self.layout_fig.add_widget(self.layout_comp)
self.layout_fig.add_widget(self.layout_map)
self.add_widget(self.layout_fig)
#self.layout_scroll_lb=Label(text=self.body,size_hint=(1,None))
#self.layout_scroll_lb.height=self.layout_scroll_lb.texture_size[1]
#self.layout_scroll=ScrollableLabel(text=self.body)
#self.layout_scroll.add_widget(self.layout_scroll_lb)
#self.layout1_title=Label(text='以下电梯预测将在30天内发生故障:\n'+self.body,font_name=font_name)
#self.layout1_title.size=self.layout1_title.texture_size
#self.layout1.add_widget(self.layout1_title)
#self.layout1.add_widget(self.layout_scroll)
# Plots
self.graph_theme = {
'label_options': {
'color': (0,0,0,1), # color of tick labels and titles
'bold': True},
'tick_color': (0,0,0,1)} # ticks and grid
self.graph=Graph(xlabel='Current time',ylabel='Maintenance date',
x_ticks_major=5, y_ticks_major=5,
y_grid_label=True, x_grid_label=True, padding=5,
x_grid=True, y_grid=True, xmin=-0, xmax=31, ymin=-0, ymax=31,**self.graph_theme)
self.plot = MeshLinePlot(color=[1, 0, 0, 1])
self.best_maint_dates=joblib.load('mon_best_int_np.asv')
self.best_maint_dates=self.best_maint_dates[self.model_.month-1]
self.plot.points = [(x+1, x+1+self.best_maint_dates[x]) for x in range(len(self.best_maint_dates))]
self.graph.add_plot(self.plot)
self.layout_graph=BoxLayout(orientation = 'vertical' , size_hint = (0.7,1))
self.layout_graph.add_widget(Label(text='本月最优维保日期随时间变化图',font_name=font_name,size_hint=(1,0.1),font_size='16sp',color=(0,0,0,1)))
self.layout_graph.add_widget(self.graph)
# Note for user
self.layout_info=BoxLayout(orientation = 'vertical' , size_hint = (0.3,1))
self.layout_info.add_widget(Label(text='待预防性维护电梯信息:\n设备编号\n设备所在区域类型\n故障信息\n所在城市\n电梯运行速度\n设备型号\n距离上一次维修天数',font_name=font_name,pos_hint={'x': 0.5, 'center_y': .5},font_size='16sp',color=(0,0,0,1)))
self.layout_note=BoxLayout(orientation = 'vertical' , size_hint = (0.5,0.8))
self.layout_note.add_widget(Button(on_press = self.on_press,text='输出\n电梯\n信息',font_name=font_name,pos_hint={'x': .5, 'y': 1},size_hint=(0.4,0.2),font_size='20sp',color=(0,0,0,1),background_color=color_shadow_blue))
self.layout_graph_note=BoxLayout(orientation = 'horizontal' , size_hint = (1,0.5))
self.layout_graph_note.add_widget(self.layout_graph)
self.layout_graph_note.add_widget(self.layout_info)
self.layout_graph_note.add_widget(self.layout_note)
self.add_widget(self.layout_graph_note)
self.layout2 = BoxLayout(orientation = 'horizontal' , size_hint = (1,.15))
self.add_widget(self.layout2)
self.layout2.add_widget(Label(text = "请按 'ESC'键或点击窗外以关闭窗口",font_name=font_name,font_size='20sp',color=(1,0,0,1)))
def on_release(self,event):
print ("Reminder OK Clicked!")
def on_press(self,event):
file_name='Log-{:}-{:}-{:}.tsv'.format(self.model_.year,self.model_.month,self.model_.day)
f=open(file_name,'w',encoding="utf-8")
f.write(self.body)
f.close()
# ------------------------------------------------------------------------------------------------#
# class for dates.kv file
class Dates(GridLayo | self.model_=Model(year=self.month_.year,month=self.month_.month,day=self.month_.day)
# Best maintainance date
self.maintainance_timedelta=datetime.timedelta(days=self.model_.findBestMaintInterval())
self.best_maint_date=datetime.datetime(self.month_.year,self.month_.month,self.month_.day)
self.best_maint_date=self.best_maint_date+self.maintainance_timedelta
print('Best maintenance interval: {:} and best maintenance date: {:}'.format(self.maintainance_timedelta,self.best_maint_date))
# Update dates paddle when choose different months
self.update_dates(self.month_.year,self.month_.month)
def update_dates(self,year,month):
print('Update dates!')
self.clear_widgets()
c = calendar.monthcalendar(year,month)
# Show the best maintenance date if current month is clicked
if self.best_maint_date.month is month:
for i in c:
for j in i:
if j == 0:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=''),font_size='20sp',color=(0,0,0,1)))
elif j==self.best_maint_date.day:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=j),background_color=(1,0,0,1),font_size='20sp',color=(0,0,0,1)))
else:
self.add_widget(Button(on_press = self.on_press, on_release=self.on_release,text = '{j}'.format(j=j),font_size='20sp',color=(0,0,0,1)))
else:
for i in c:
for j in i:
if j == 0:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=''),font_size='20sp',color=(0,0,0,1)))
else:
self.add_widget(Button(on_press = self.on_press, on_release=self.on_release,text = '{j}'.format(j=j),font_size='20sp',color=(0,0,0,1)))
def on_dismiss(self, arg):
# Do something on close of popup
print('Popup dismiss')
pass
def on_release(self,event):
event.background_color = 154/256,226/256,248/256,1
def on_press(self,event):
print ("date clicked :" + event.text)
event.background_color = 1,0,0,1
self.popup = Popup(title='Preventive Maintenance Information',title_color=(0,0,0,1),
content = Reminder(),
size_hint=(0.9,0.9),background='background.png')
self.popup.bind(on_dismiss = self.on_dismiss)
self.popup.open()
# ------------------------------------------------------------------------------------------------#
# class for months.kv file
class Months(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# Displayed time is defined here
self.now=datetime.datetime.now()
self.year=2014 # Suppose to be self.now.year but change it into year
self.month=8
self.day=1
# An pointer to current month button
self.now_btn=Button()
self.btn_color=(17/256,64/256,108/256,1)
def month_btn_press(self,instance):
# Renew previous button
self.now_btn.background_color=(17/256,64/256,108/256,1)
instance.background_color=1,0,0,1
#Update the month of the button
self.month=self.get_month(instance.text)
self.now_btn=instance
def month_btn_release(self,instance):
#instance.background_color=0.1,.5,.5,1
#instance.bind(on_release= lambda instance : Dates.update_dates())
app_=App.get_running_app()
dates_=app_.calendar_.dates_
dates_.update_dates(self.year,self.month)
pass
def get_month(self,month_name):
month_names=['Null','Jan','Feb','Mar','April','May','June','July','Aug','Sept','Oct','Nov','Dec']
return month_names.index(month_name)
# ------------------------------------------------------------------------------------------------#
# mainApp class
class mainApp(App):
time = StringProperty()
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.calendar_=Calender()
#self.model_=Model() # Place model into Dates()
self.year=self.calendar_.months_.year
self.month=self.calendar_.months_.month
self.day=self.calendar_.months_.day
def update(self,*args):
self.now_real=datetime.datetime.now()
self.t=datetime.datetime(self.year,self.month,self.day,self.now_real.hour,self.now_real.minute,self.now_real.second)
self.time=self.t.strftime('%Y-%m-%d %H:%M:%S')
def build(self):
self.title = "KONE通力电梯可视化产品"
self.font_name=font_name
Clock.schedule_interval(self.update,1)
return self.calendar_
if __name__ =='__main__':
app = mainApp()
app.run()
| ut):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.cols = 7
self.month_=Months()# In order to get current month and day
| identifier_body |
main.py | #coding:utf-8
# Thanks to:Kuldeep Singh, student at LNMIIT,Jaipur,India
# import Statements
import calendar
import time
import datetime
from kivy import resources
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.properties import ListProperty
from kivy.properties import ObjectProperty
from kivy.properties import StringProperty
from kivy.clock import Clock
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.event import EventDispatcher
from kivy.uix.textinput import TextInput
from sklearn.externals import joblib
from kivy.uix.image import Image
import pandas as pd
import numpy as np
from model import Model
from kivy.garden.graph import Graph, MeshLinePlot
from kivy.uix.scrollview import ScrollView
from datetime import timedelta
# Add source for Chinese characters
resources.resource_add_path("C:\\Windows\\Fonts")
font_name=resources.resource_find('WeiRuanZhengHeiTi-2.ttc')
color_shadow_blue=(.53125,.66796875,.7890625,1)
color_sky_blue=(1/256,158/256,213/256,1)
color_deep_blue=(17/256,64/256,108/256,1)
color_light_blue=(38/256,188/256,213/256,1)
# Builder used to load all the kivy files to be loaded in the main.py file
Builder.load_file('months.kv')
Builder.load_file('dates.kv')
Builder.load_file('select.kv')
Builder.load_file('status.kv')
Builder.load_file('days.kv')
Builder.load_file('calender.kv')
#------Kivy GUI Configuration--
# class for calender.kv file
class Calender(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# Integrating other classes
self.select_=Select()
self.months_=Months()
self.days_=Days()
self.dates_=Dates()
self.status_=Status()
# Adding layout
self.layout_1=BoxLayout(size_hint=(1,.1))
self.layout_1.add_widget(self.select_)
self.layout_2=BoxLayout()
self.layout_3=BoxLayout(orientation='vertical')
self.layout_3.add_widget(self.days_)
self.layout_3.add_widget(self.dates_)
self.layout_2.add_widget(self.months_)
self.layout_2.add_widget(self.layout_3)
self.layout_4=BoxLayout(size_hint=(1,.1))
self.layout_4.add_widget(self.status_)
self.add_widget(self.layout_1)
self.add_widget(self.layout_2)
self.add_widget(self.layout_4)
# ------------------------------------------------------------------------------------------------# | def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='horizontal'
self.layout_num=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_time=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_action=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_num.add_widget(Label(text='故障电梯编号',font_name=font_name,font_size='20sp'))
self.layout_time.add_widget(Label(text='故障时间',font_name=font_name,font_size='20sp'))
self.layout_action.add_widget(Label(text='采取操作',font_name=font_name,font_size='20sp'))
self.textinput_num=TextInput(multiline=False)
self.textinput_time=TextInput(multiline=False)
self.textinput_time.text=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.textinput_action=TextInput(multiline=False)
self.layout_num.add_widget(self.textinput_num)
self.layout_time.add_widget(self.textinput_time)
self.layout_action.add_widget(self.textinput_action)
self.add_widget(self.layout_num)
self.add_widget(self.layout_time)
self.add_widget(self.layout_action)
self.add_widget(Button(text='确认',font_size='20sp',color=(0,0,0,1),font_name=font_name,on_press=self.ct_on_press))
def ct_on_press(self,event):
self.app_=App.get_running_app()
self.model_=self.app_.calendar_.dates_.model_
self.num=self.textinput_num.text
self.time=self.textinput_time.text
self.action=self.textinput_action.text
elevators=self.model_.samples
if self.num is not '':
idx=np.where(elevators['equip_no']==int(self.num))[0]
body=self.num+'\t'+self.time+'\t'+self.action+'\t'
if len(idx)>0:
infos=elevators[['Business type','Description','City','速度','设备型号','T']].iloc[idx[0]]
for info in infos:
body=body+str(info)+'\t'
body=body+'\n'
file_name='Log-New-Breakdown.tsv'
f=open(file_name,'a',encoding="utf-8")
f.write(body)
f.close()
print('成功输入新数据!')
else:
pass
self.textinput_num.text=''
self.textinput_time.text=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.textinput_action.text=''
# class for status.kv file
class Status(BoxLayout,EventDispatcher):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='horizontal'
self.add_widget(Button(on_press=self.on_press,text='输入走修数据',font_name=font_name,background_color=color_deep_blue,background_normal='',font_size='20sp',color=(1,1,1,1)))
def on_dismiss(self, arg):
pass
def on_press(self,event):
self.popup = Popup(title='Input Data',
content = Content(),
size_hint=(0.8,0.8))
self.popup.bind(on_dismiss=self.on_dismiss)
self.popup.open()
# ------------------------------------------------------------------------------------------------#
# class for Days.kv file
class Days(GridLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# ------------------------------------------------------------------------------------------------#
# class for select.kv file
class Select(BoxLayout):
lbl_ = ObjectProperty(None)
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.lbl_.text='通力电梯公司维保可视化产品'
self.lbl_.font_name=font_name
# class for Reminder in Dates
class Reminder(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.app_=App.get_running_app()
# Get the dates clicked
self.model_=self.app_.calendar_.dates_.model_
# Information to be added
self.body=self.model_.printSelectedResults()
# Layout arrangementw
self.orientation = 'vertical'
# Elevators information
self.layout_comp=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.layout_map=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.img_comp_1=Image(source='component_1.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.img_comp_2=Image(source='component_2.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.layout_comp.add_widget(self.img_comp_1)
self.layout_comp.add_widget(self.img_comp_2)
self.img_map_1=Image(source='map_kunshan.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.img_map_2=Image(source='map_sh.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.layout_map.add_widget(self.img_map_1)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='昆\n山\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_map.add_widget(self.img_map_2)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='上\n海\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_fig=BoxLayout(orientation = 'vertical' , size_hint = (1,0.7))
#self.layout_fig.add_widget(self.layout_comp)
self.layout_fig.add_widget(self.layout_map)
self.add_widget(self.layout_fig)
#self.layout_scroll_lb=Label(text=self.body,size_hint=(1,None))
#self.layout_scroll_lb.height=self.layout_scroll_lb.texture_size[1]
#self.layout_scroll=ScrollableLabel(text=self.body)
#self.layout_scroll.add_widget(self.layout_scroll_lb)
#self.layout1_title=Label(text='以下电梯预测将在30天内发生故障:\n'+self.body,font_name=font_name)
#self.layout1_title.size=self.layout1_title.texture_size
#self.layout1.add_widget(self.layout1_title)
#self.layout1.add_widget(self.layout_scroll)
# Plots
self.graph_theme = {
'label_options': {
'color': (0,0,0,1), # color of tick labels and titles
'bold': True},
'tick_color': (0,0,0,1)} # ticks and grid
self.graph=Graph(xlabel='Current time',ylabel='Maintenance date',
x_ticks_major=5, y_ticks_major=5,
y_grid_label=True, x_grid_label=True, padding=5,
x_grid=True, y_grid=True, xmin=-0, xmax=31, ymin=-0, ymax=31,**self.graph_theme)
self.plot = MeshLinePlot(color=[1, 0, 0, 1])
self.best_maint_dates=joblib.load('mon_best_int_np.asv')
self.best_maint_dates=self.best_maint_dates[self.model_.month-1]
self.plot.points = [(x+1, x+1+self.best_maint_dates[x]) for x in range(len(self.best_maint_dates))]
self.graph.add_plot(self.plot)
self.layout_graph=BoxLayout(orientation = 'vertical' , size_hint = (0.7,1))
self.layout_graph.add_widget(Label(text='本月最优维保日期随时间变化图',font_name=font_name,size_hint=(1,0.1),font_size='16sp',color=(0,0,0,1)))
self.layout_graph.add_widget(self.graph)
# Note for user
self.layout_info=BoxLayout(orientation = 'vertical' , size_hint = (0.3,1))
self.layout_info.add_widget(Label(text='待预防性维护电梯信息:\n设备编号\n设备所在区域类型\n故障信息\n所在城市\n电梯运行速度\n设备型号\n距离上一次维修天数',font_name=font_name,pos_hint={'x': 0.5, 'center_y': .5},font_size='16sp',color=(0,0,0,1)))
self.layout_note=BoxLayout(orientation = 'vertical' , size_hint = (0.5,0.8))
self.layout_note.add_widget(Button(on_press = self.on_press,text='输出\n电梯\n信息',font_name=font_name,pos_hint={'x': .5, 'y': 1},size_hint=(0.4,0.2),font_size='20sp',color=(0,0,0,1),background_color=color_shadow_blue))
self.layout_graph_note=BoxLayout(orientation = 'horizontal' , size_hint = (1,0.5))
self.layout_graph_note.add_widget(self.layout_graph)
self.layout_graph_note.add_widget(self.layout_info)
self.layout_graph_note.add_widget(self.layout_note)
self.add_widget(self.layout_graph_note)
self.layout2 = BoxLayout(orientation = 'horizontal' , size_hint = (1,.15))
self.add_widget(self.layout2)
self.layout2.add_widget(Label(text = "请按 'ESC'键或点击窗外以关闭窗口",font_name=font_name,font_size='20sp',color=(1,0,0,1)))
def on_release(self,event):
print ("Reminder OK Clicked!")
def on_press(self,event):
file_name='Log-{:}-{:}-{:}.tsv'.format(self.model_.year,self.model_.month,self.model_.day)
f=open(file_name,'w',encoding="utf-8")
f.write(self.body)
f.close()
# ------------------------------------------------------------------------------------------------#
# class for dates.kv file
class Dates(GridLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.cols = 7
self.month_=Months()# In order to get current month and day
self.model_=Model(year=self.month_.year,month=self.month_.month,day=self.month_.day)
# Best maintainance date
self.maintainance_timedelta=datetime.timedelta(days=self.model_.findBestMaintInterval())
self.best_maint_date=datetime.datetime(self.month_.year,self.month_.month,self.month_.day)
self.best_maint_date=self.best_maint_date+self.maintainance_timedelta
print('Best maintenance interval: {:} and best maintenance date: {:}'.format(self.maintainance_timedelta,self.best_maint_date))
# Update dates paddle when choose different months
self.update_dates(self.month_.year,self.month_.month)
def update_dates(self,year,month):
print('Update dates!')
self.clear_widgets()
c = calendar.monthcalendar(year,month)
# Show the best maintenance date if current month is clicked
if self.best_maint_date.month is month:
for i in c:
for j in i:
if j == 0:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=''),font_size='20sp',color=(0,0,0,1)))
elif j==self.best_maint_date.day:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=j),background_color=(1,0,0,1),font_size='20sp',color=(0,0,0,1)))
else:
self.add_widget(Button(on_press = self.on_press, on_release=self.on_release,text = '{j}'.format(j=j),font_size='20sp',color=(0,0,0,1)))
else:
for i in c:
for j in i:
if j == 0:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=''),font_size='20sp',color=(0,0,0,1)))
else:
self.add_widget(Button(on_press = self.on_press, on_release=self.on_release,text = '{j}'.format(j=j),font_size='20sp',color=(0,0,0,1)))
def on_dismiss(self, arg):
# Do something on close of popup
print('Popup dismiss')
pass
def on_release(self,event):
event.background_color = 154/256,226/256,248/256,1
def on_press(self,event):
print ("date clicked :" + event.text)
event.background_color = 1,0,0,1
self.popup = Popup(title='Preventive Maintenance Information',title_color=(0,0,0,1),
content = Reminder(),
size_hint=(0.9,0.9),background='background.png')
self.popup.bind(on_dismiss = self.on_dismiss)
self.popup.open()
# ------------------------------------------------------------------------------------------------#
# class for months.kv file
class Months(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# Displayed time is defined here
self.now=datetime.datetime.now()
self.year=2014 # Suppose to be self.now.year but change it into year
self.month=8
self.day=1
# An pointer to current month button
self.now_btn=Button()
self.btn_color=(17/256,64/256,108/256,1)
def month_btn_press(self,instance):
# Renew previous button
self.now_btn.background_color=(17/256,64/256,108/256,1)
instance.background_color=1,0,0,1
#Update the month of the button
self.month=self.get_month(instance.text)
self.now_btn=instance
def month_btn_release(self,instance):
#instance.background_color=0.1,.5,.5,1
#instance.bind(on_release= lambda instance : Dates.update_dates())
app_=App.get_running_app()
dates_=app_.calendar_.dates_
dates_.update_dates(self.year,self.month)
pass
def get_month(self,month_name):
month_names=['Null','Jan','Feb','Mar','April','May','June','July','Aug','Sept','Oct','Nov','Dec']
return month_names.index(month_name)
# ------------------------------------------------------------------------------------------------#
# mainApp class
class mainApp(App):
time = StringProperty()
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.calendar_=Calender()
#self.model_=Model() # Place model into Dates()
self.year=self.calendar_.months_.year
self.month=self.calendar_.months_.month
self.day=self.calendar_.months_.day
def update(self,*args):
self.now_real=datetime.datetime.now()
self.t=datetime.datetime(self.year,self.month,self.day,self.now_real.hour,self.now_real.minute,self.now_real.second)
self.time=self.t.strftime('%Y-%m-%d %H:%M:%S')
def build(self):
self.title = "KONE通力电梯可视化产品"
self.font_name=font_name
Clock.schedule_interval(self.update,1)
return self.calendar_
if __name__ =='__main__':
app = mainApp()
app.run() | class Content(BoxLayout): | random_line_split |
main.py | #coding:utf-8
# Thanks to:Kuldeep Singh, student at LNMIIT,Jaipur,India
# import Statements
import calendar
import time
import datetime
from kivy import resources
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.properties import ListProperty
from kivy.properties import ObjectProperty
from kivy.properties import StringProperty
from kivy.clock import Clock
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.event import EventDispatcher
from kivy.uix.textinput import TextInput
from sklearn.externals import joblib
from kivy.uix.image import Image
import pandas as pd
import numpy as np
from model import Model
from kivy.garden.graph import Graph, MeshLinePlot
from kivy.uix.scrollview import ScrollView
from datetime import timedelta
# Add source for Chinese characters
resources.resource_add_path("C:\\Windows\\Fonts")
font_name=resources.resource_find('WeiRuanZhengHeiTi-2.ttc')
color_shadow_blue=(.53125,.66796875,.7890625,1)
color_sky_blue=(1/256,158/256,213/256,1)
color_deep_blue=(17/256,64/256,108/256,1)
color_light_blue=(38/256,188/256,213/256,1)
# Builder used to load all the kivy files to be loaded in the main.py file
Builder.load_file('months.kv')
Builder.load_file('dates.kv')
Builder.load_file('select.kv')
Builder.load_file('status.kv')
Builder.load_file('days.kv')
Builder.load_file('calender.kv')
#------Kivy GUI Configuration--
# class for calender.kv file
class Calender(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# Integrating other classes
self.select_=Select()
self.months_=Months()
self.days_=Days()
self.dates_=Dates()
self.status_=Status()
# Adding layout
self.layout_1=BoxLayout(size_hint=(1,.1))
self.layout_1.add_widget(self.select_)
self.layout_2=BoxLayout()
self.layout_3=BoxLayout(orientation='vertical')
self.layout_3.add_widget(self.days_)
self.layout_3.add_widget(self.dates_)
self.layout_2.add_widget(self.months_)
self.layout_2.add_widget(self.layout_3)
self.layout_4=BoxLayout(size_hint=(1,.1))
self.layout_4.add_widget(self.status_)
self.add_widget(self.layout_1)
self.add_widget(self.layout_2)
self.add_widget(self.layout_4)
# ------------------------------------------------------------------------------------------------#
class Content(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='horizontal'
self.layout_num=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_time=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_action=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_num.add_widget(Label(text='故障电梯编号',font_name=font_name,font_size='20sp'))
self.layout_time.add_widget(Label(text='故障时间',font_name=font_name,font_size='20sp'))
self.layout_action.add_widget(Label(text='采取操作',font_name=font_name,font_size='20sp'))
self.textinput_num=TextInput(multiline=False)
self.textinput_time=TextInput(multiline=False)
self.textinput_time.text=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.textinput_action=TextInput(multiline=False)
self.layout_num.add_widget(self.textinput_num)
self.layout_time.add_widget(self.textinput_time)
self.layout_action.add_widget(self.textinput_action)
self.add_widget(self.layout_num)
self.add_widget(self.layout_time)
self.add_widget(self.layout_action)
self.add_widget(Button(text='确认',font_size='20sp',color=(0,0,0,1),font_name=font_name,on_press=self.ct_on_press))
def ct_on_press(self,event):
self.app_=App.get_running_app()
self.model_=self.app_.calendar_.dates_.model_
self.num=self.textinput_num.text
self.time=self.textinput_time.text
self.action=self.textinput_action.text
elevators=self.model_.samples
if self.num is not '':
idx=np.where(elevators['equip_no']==int(self.num))[0]
body=self.num+'\t'+self.time+'\t'+self.action+'\t'
if len(idx)>0:
infos=elevators[['Business type','Description','City','速度','设备型号','T']].iloc[idx[0]]
for info in infos:
body=body+str(info)+'\t'
body=body+'\n'
file_name='Log-New-Breakdown.tsv'
f=open(file_name,'a',encoding="utf-8")
f.write(body)
f.close()
print('成功输入新数据!')
else:
pass
self.textinput_num.text=''
self.textinput_time.text=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.textinput_action.text=''
# class for status.kv file
class Status(BoxLayout,EventDispatcher):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='horizontal'
self.add_widget(Button(on_press=self.on_press,text='输入走修数据',font_name=font_name,background_color=color_deep_blue,background_normal='',font_size='20sp',color=(1,1,1,1)))
def on_dismiss(self, arg):
pass
def on_press(self,event):
self.popup = Popup(title='Input Data',
content = Content(),
size_hint=(0.8,0.8))
self.popup.bind(on_dismiss=self.on_dismiss)
self.popup.open()
# ------------------------------------------------------------------------------------------------#
# class for Days.kv file
class Days(GridLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# ------------------------------------------------------------------------------------------------#
# class for select.kv file
class Select(BoxLayout):
lbl_ = ObjectProperty(None)
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.lbl_.text='通力电梯公司维保可视化产品'
self.lbl_.font_name=font_name
# class for Reminder in Dates
class Reminder(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.app_=App.get_running_app()
# Get the dates clicked
self.model_=self.app_.calendar_.dates_.model_
# Information to be added
self.body=self.model_.printSelectedResults()
# Layout arrangementw
self.orientation = 'vertical'
# Elevators information
self.layout_comp=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.layout_map=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.img_comp_1=Image(source='component_1.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.img_comp_2=Image(source='component_2.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.layout_comp.add_widget(self.img_comp_1)
self.layout_comp.add_widget(self.img_comp_2)
self.img_map_1=Image(source='map_kunshan.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.img_map_2=Image(source='map_sh.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.layout_map.add_widget(self.img_map_1)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='昆\n山\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_map.add_widget(self.img_map_2)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='上\n海\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_fig=BoxLayout(orientation = 'vertical' , size_hint = (1,0.7))
#self.layout_fig.add_widget(self.layout_comp)
self.layout_fig.add_widget(self.layout_map)
self.add_widget(self.layout_fig)
#self.layout_scroll_lb=Label(text=self.body,size_hint=(1,None))
#self.layout_scroll_lb.height=self.layout_scroll_lb.texture_size[1]
#self.layout_scroll=ScrollableLabel(text=self.body)
#self.layout_scroll.add_widget(self.layout_scroll_lb)
#self.layout1_title=Label(text='以下电梯预测将在30天内发生故障:\n'+self.body,font_name=font_name)
#self.layout1_title.size=self.layout1_title.texture_size
#self.layout1.add_widget(self.layout1_title)
#self.layout1.add_widget(self.layout_scroll)
# Plots
self.graph_theme = {
'label_options': {
'color': (0,0,0,1), # color of tick labels and titles
'bold': True},
'tick_color': (0,0,0,1)} # ticks and grid
self.graph=Graph(xlabel='Current time',ylabel='Maintenance date',
x_ticks_major=5, y_ticks_major=5,
y_grid_label=True, x_grid_label=True, padding=5,
x_grid=True, y_grid=True, xmin=-0, xmax=31, ymin=-0, ymax=31,**self.graph_theme)
self.plot = MeshLinePlot(color=[1, 0, 0, 1])
self.best_maint_dates=joblib.load('mon_best_int_np.asv')
self.best_maint_dates=self.best_maint_dates[self.model_.month-1]
self.plot.points = [(x+1, x+1+self.best_maint_dates[x]) for x in range(len(self.best_maint_dates))]
self.graph.add_plot(self.plot)
self.layout_graph=BoxLayout(orientation = 'vertical' , size_hint = (0.7,1))
self.layout_graph.add_widget(Label(text='本月最优维保日期随时间变化图',font_name=font_name,size_hint=(1,0.1),font_size='16sp',color=(0,0,0,1)))
self.layout_graph.add_widget(self.graph)
# Note for user
self.layout_info=BoxLayout(orientation = 'vertical' , size_hint = (0.3,1))
self.layout_info.add_widget(Label(text='待预防性维护电梯信息:\n设备编号\n设备所在区域类型\n故障信息\n所在城市\n电梯运行速度\n设备型号\n距离上一次维修天数',font_name=font_name,pos_hint={'x': 0.5, 'center_y': .5},font_size='16sp',color=(0,0,0,1)))
self.layout_note=BoxLayout(orientation = 'vertical' , size_hint = (0.5,0.8))
self.layout_note.add_widget(Button(on_press = self.on_press,text='输出\n电梯\n信息',font_name=font_name,pos_hint={'x': .5, 'y': 1},size_hint=(0.4,0.2),font_size='20sp',color=(0,0,0,1),background_color=color_shadow_blue))
self.layout_graph_note=BoxLayout(orientation = 'horizontal' , size_hint = (1,0.5))
self.layout_graph_note.add_widget(self.layout_graph)
self.layout_graph_note.add_widget(self.layout_info)
self.layout_graph_note.add_widget(self.layout_note)
self.add_widget(self.layout_graph_note)
self.layout2 = BoxLayout(orientation = 'horizontal' , size_hint = (1,.15))
self.add_widget(self.layout2)
self.layout2.add_widget(Label(text = "请按 'ESC'键或点击窗外以关闭窗口",font_name=font_name,font_size='20sp',color=(1,0,0,1)))
def on_release(self,event):
print ("Reminder OK Clicked!")
def on_press(self,event):
file_name='Log-{:}-{:}-{:}.tsv'.format(self.model_.year,self.model_.month,self.model_.day)
f=open(file_name,'w',encoding="utf-8")
f.write(self.body)
f.close()
# ------------------------------------------------------------------------------------------------#
# class for dates.kv file
class Dates(GridLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.cols = 7
self.month_=Months()# In order to get current month and day
self.model_=Model(year=self.month_.year,month=self.month_.month,day=self.month_.day)
# Best maintainance date
self.maintainance_timedelta=datetime.timedelta(days=self.model_.findBestMaintInterval())
self.best_maint_date=datetime.datetime(self.month_.year,self.month_.month,self.month_.day)
self.best_maint_date=self.best_maint_date+self.maintainance_timedelta
print('Best maintenance interval: {:} and best maintenance date: {:}'.format(self.maintainance_timedelta,self.best_maint_date))
# Update dates paddle when choose different months
self.update_dates(self.month_.year,self.month_.month)
def update_dates(self,year,month):
print('Update dates!')
self.clear_widgets()
c = calendar.monthcalendar(year,month)
# Show the best maintenance date if current month is clicked
if self.best_maint_date.month is month:
for i in c:
for j in i:
if j == 0:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=''),font_size='20sp',color=(0,0,0,1)))
elif j==self.best_maint_date.day:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=j),background_color=(1,0,0,1),font_size='20sp',color=(0,0,0,1)))
else:
self.add_widget(Button(on_press = self.on_press, on_release=self.on_release,text = '{j}'.format(j=j),font_size='20sp',color=(0,0,0,1)))
else:
for i in c:
for j in i:
if j == 0:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=''),font_size='20sp',color=(0,0,0,1)))
else:
self.add_widget(Button(on_press = self.on_press, | = 1,0,0,1
self.popup = Popup(title='Preventive Maintenance Information',title_color=(0,0,0,1),
content = Reminder(),
size_hint=(0.9,0.9),background='background.png')
self.popup.bind(on_dismiss = self.on_dismiss)
self.popup.open()
# ------------------------------------------------------------------------------------------------#
# class for months.kv file
class Months(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# Displayed time is defined here
self.now=datetime.datetime.now()
self.year=2014 # Suppose to be self.now.year but change it into year
self.month=8
self.day=1
# An pointer to current month button
self.now_btn=Button()
self.btn_color=(17/256,64/256,108/256,1)
def month_btn_press(self,instance):
# Renew previous button
self.now_btn.background_color=(17/256,64/256,108/256,1)
instance.background_color=1,0,0,1
#Update the month of the button
self.month=self.get_month(instance.text)
self.now_btn=instance
def month_btn_release(self,instance):
#instance.background_color=0.1,.5,.5,1
#instance.bind(on_release= lambda instance : Dates.update_dates())
app_=App.get_running_app()
dates_=app_.calendar_.dates_
dates_.update_dates(self.year,self.month)
pass
def get_month(self,month_name):
month_names=['Null','Jan','Feb','Mar','April','May','June','July','Aug','Sept','Oct','Nov','Dec']
return month_names.index(month_name)
# ------------------------------------------------------------------------------------------------#
# mainApp class
class mainApp(App):
time = StringProperty()
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.calendar_=Calender()
#self.model_=Model() # Place model into Dates()
self.year=self.calendar_.months_.year
self.month=self.calendar_.months_.month
self.day=self.calendar_.months_.day
def update(self,*args):
self.now_real=datetime.datetime.now()
self.t=datetime.datetime(self.year,self.month,self.day,self.now_real.hour,self.now_real.minute,self.now_real.second)
self.time=self.t.strftime('%Y-%m-%d %H:%M:%S')
def build(self):
self.title = "KONE通力电梯可视化产品"
self.font_name=font_name
Clock.schedule_interval(self.update,1)
return self.calendar_
if __name__ =='__main__':
app = mainApp()
app.run()
| on_release=self.on_release,text = '{j}'.format(j=j),font_size='20sp',color=(0,0,0,1)))
def on_dismiss(self, arg):
# Do something on close of popup
print('Popup dismiss')
pass
def on_release(self,event):
event.background_color = 154/256,226/256,248/256,1
def on_press(self,event):
print ("date clicked :" + event.text)
event.background_color | conditional_block |
main.py | #coding:utf-8
# Thanks to:Kuldeep Singh, student at LNMIIT,Jaipur,India
# import Statements
import calendar
import time
import datetime
from kivy import resources
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.properties import ListProperty
from kivy.properties import ObjectProperty
from kivy.properties import StringProperty
from kivy.clock import Clock
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.event import EventDispatcher
from kivy.uix.textinput import TextInput
from sklearn.externals import joblib
from kivy.uix.image import Image
import pandas as pd
import numpy as np
from model import Model
from kivy.garden.graph import Graph, MeshLinePlot
from kivy.uix.scrollview import ScrollView
from datetime import timedelta
# Add source for Chinese characters
resources.resource_add_path("C:\\Windows\\Fonts")
font_name=resources.resource_find('WeiRuanZhengHeiTi-2.ttc')
color_shadow_blue=(.53125,.66796875,.7890625,1)
color_sky_blue=(1/256,158/256,213/256,1)
color_deep_blue=(17/256,64/256,108/256,1)
color_light_blue=(38/256,188/256,213/256,1)
# Builder used to load all the kivy files to be loaded in the main.py file
Builder.load_file('months.kv')
Builder.load_file('dates.kv')
Builder.load_file('select.kv')
Builder.load_file('status.kv')
Builder.load_file('days.kv')
Builder.load_file('calender.kv')
#------Kivy GUI Configuration--
# class for calender.kv file
class Calender(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# Integrating other classes
self.select_=Select()
self.months_=Months()
self.days_=Days()
self.dates_=Dates()
self.status_=Status()
# Adding layout
self.layout_1=BoxLayout(size_hint=(1,.1))
self.layout_1.add_widget(self.select_)
self.layout_2=BoxLayout()
self.layout_3=BoxLayout(orientation='vertical')
self.layout_3.add_widget(self.days_)
self.layout_3.add_widget(self.dates_)
self.layout_2.add_widget(self.months_)
self.layout_2.add_widget(self.layout_3)
self.layout_4=BoxLayout(size_hint=(1,.1))
self.layout_4.add_widget(self.status_)
self.add_widget(self.layout_1)
self.add_widget(self.layout_2)
self.add_widget(self.layout_4)
# ------------------------------------------------------------------------------------------------#
class Content(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='horizontal'
self.layout_num=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_time=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_action=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_num.add_widget(Label(text='故障电梯编号',font_name=font_name,font_size='20sp'))
self.layout_time.add_widget(Label(text='故障时间',font_name=font_name,font_size='20sp'))
self.layout_action.add_widget(Label(text='采取操作',font_name=font_name,font_size='20sp'))
self.textinput_num=TextInput(multiline=False)
self.textinput_time=TextInput(multiline=False)
self.textinput_time.text=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.textinput_action=TextInput(multiline=False)
self.layout_num.add_widget(self.textinput_num)
self.layout_time.add_widget(self.textinput_time)
self.layout_action.add_widget(self.textinput_action)
self.add_widget(self.layout_num)
self.add_widget(self.layout_time)
self.add_widget(self.layout_action)
self.add_widget(Button(text='确认',font_size='20sp',color=(0,0,0,1),font_name=font_name,on_press=self.ct_on_press))
def ct_on_press(self,event):
self.app_=App.get_running_app()
self.model_=self.app_.calendar_.dates_.model_
self.num=self.textinput_num.text
self.time=self.textinput_time.text
self.action=self.textinput_action.text
elevators=self.model_.samples
if self.num is not '':
idx=np.where(elevators['equip_no']==int(self.num))[0]
body=self.num+'\t'+self.time+'\t'+self.action+'\t'
if len(idx)>0:
infos=elevators[['Business type','Description','City','速度','设备型号','T']].iloc[idx[0]]
for info in infos:
body=body+str(info)+'\t'
body=body+'\n'
file_name='Log-New-Breakdown.tsv'
f=open(file_name,'a',encoding="utf-8")
f.write(body)
f.close()
print('成功输入新数据!')
else:
pass
self.textinput_num.text=''
self.textinput_time.text=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.textinput_action.text=''
# class for status.kv file
class Status(BoxLayout,EventDispatcher):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='horizontal'
self.add_widget(Button(on_press=self.on_press,text='输入走修数据',font_name=font_name,background_color=color_deep_blue,background_normal='',font_size='20sp',color=(1,1,1,1)))
def on_dismiss(self, arg):
pass
def on_press(self,event):
self.popup = Popup(title='Input Data',
content = Content(),
size_hint=(0.8,0.8))
self.popup.bind(on_dismiss=self.on_dismiss)
self.popup.open()
# ------------------------------------------------------------------------------------------------#
# class for Days.kv file
class Days(GridLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# ------------------------------------------------------------------------------------------------#
# class for select.kv file
class Select(BoxLayout):
lbl_ = ObjectProperty(None)
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.lbl_.text='通力电梯公司维保可视化产品'
self.lbl_.font_name=font_name
# class for Reminder in Dates
class Reminder(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.app_=App.get_running_app()
# Get the dates clicked
self.model_=self.app_.calendar_.dates_.model_
# Information to be added
self.body=self.model_.printSelectedResults()
# Layout arrangementw
self.orientation = 'vertical'
# Elevators information
self.layout_comp=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.layout_map=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.img_comp_1=Image(source='component_1.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.img_comp_2=Image(source='component_2.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.layout_comp.add_widget(self.img_comp_1)
self.layout_comp.add_widget(self.img_comp_2)
self.img_map_1=Image(source='map_kunshan.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.img_map_2=Image(source='map_sh.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.layout_map.add_widget(self.img_map_1)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='昆\n山\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_map.add_widget(self.img_map_2)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='上\n海\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_fig=BoxLayout(orientation = 'vertical' , size_hint = (1,0.7))
#self.layout_fig.add_widget(self.layout_comp)
self.layout_fig.add_widget(self.layout_map)
self.add_widget(self.layout_fig)
#self.layout_scroll_lb=Label(text=self.body,size_hint=(1,None))
#self.layout_scroll_lb.height=self.layout_scroll_lb.texture_size[1]
#self.layout_scroll=ScrollableLabel(text=self.body)
#self.layout_scroll.add_widget(self.layout_scroll_lb)
#self.layout1_title=Label(text='以下电梯预测将在30天内发生故障:\n'+self.body,font_name=font_name)
#self.layout1_title.size=self.layout1_title.texture_size
#self.layout1.add_widget(self.layout1_title)
#self.layout1.add_widget(self.layout_scroll)
# Plots
self.graph_theme = {
'label_options': {
'color': (0,0,0,1), # color of tick labels and titles
'bold': True},
'tick_color': (0,0,0,1)} # ticks and grid
self.graph=Graph(xlabel='Current time',ylabel='Maintenance date',
x_ticks_major=5, y_ticks_major=5,
y_grid_label=True, x_grid_label=True, padding=5,
x_grid=True, y_grid=True, xmin=-0, xmax=31, ymin=-0, ymax=31,**self.graph_theme)
self.plot = MeshLinePlot(color=[1, 0, 0, 1])
self.best_maint_dates=joblib.load('mon_best_int_np.asv')
self.best_maint_dates=self.best_maint_dates[self.model_.month-1]
self.plot.points = [(x+1, x+1+self.best_maint_dates[x]) for x in range(len(self.best_maint_dates))]
self.graph.add_plot(self.plot)
self.layout_graph=BoxLayout(orientation = 'vertical' , size_hint = (0.7,1))
self.layout_graph.add_widget(Label(text='本月最优维保日期随时间变化图',font_name=font_name,size_hint=(1,0.1),font_size='16sp',color=(0,0,0,1)))
self.layout_graph.add_widget(self.graph)
# Note for user
self.layout_info=BoxLayout(orientation = 'vertical' , size_hint = (0.3,1))
self.layout_info.add_widget(Label(text='待预防性维护电梯信息:\n设备编号\n设备所在区域类型\n故障信息\n所在城市\n电梯运行速度\n设备型号\n距离上一次维修天数',font_name=font_name,pos_hint={'x': 0.5, 'center_y': .5},font_size='16sp',color=(0,0,0,1)))
self.layout_note=BoxLayout(orientation = 'vertical' , size_hint = (0.5,0.8))
self.layout_note.add_widget(Button(on_press = self.on_press,text='输出\n电梯\n信息',font_name=font_name,pos_hint={'x': .5, 'y': 1},size_hint=(0.4,0.2),font_size='20sp',color=(0,0,0,1),background_color=color_shadow_blue))
self.layout_graph_note=BoxLayout(orientation = 'horizontal' , size_hint = (1,0.5))
self.layout_graph_note.add_widget(self.layout_graph)
self.layout_graph_note.add_widget(self.layout_info)
self.layout_graph_note.add_widget(self.layout_note)
self.add_widget(self.layout_graph_note)
self.layout2 = BoxLayout(orientation = 'horizontal' , size_hint = (1,.15))
self.add_widget(self.layout2)
self.layout2.add_widget(Label(text = "请按 'ESC'键或点击窗外以关闭窗口",font_name=font_name,font_size='20sp',color=(1,0,0,1)))
def on_release(self,event):
print ("Reminder OK Clicked!")
def on_press(self,event):
file_name='Log-{:}-{:}-{:}.tsv'.format(self.model_.year,self.model_.month,self.model_.day)
f=open(file_name,'w',encoding="utf-8")
f.write(self.body)
f.close()
# ------------------------------------------------------------------------------------------------#
# class for date | e
class Dates(GridLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.cols = 7
self.month_=Months()# In order to get current month and day
self.model_=Model(year=self.month_.year,month=self.month_.month,day=self.month_.day)
# Best maintainance date
self.maintainance_timedelta=datetime.timedelta(days=self.model_.findBestMaintInterval())
self.best_maint_date=datetime.datetime(self.month_.year,self.month_.month,self.month_.day)
self.best_maint_date=self.best_maint_date+self.maintainance_timedelta
print('Best maintenance interval: {:} and best maintenance date: {:}'.format(self.maintainance_timedelta,self.best_maint_date))
# Update dates paddle when choose different months
self.update_dates(self.month_.year,self.month_.month)
def update_dates(self,year,month):
print('Update dates!')
self.clear_widgets()
c = calendar.monthcalendar(year,month)
# Show the best maintenance date if current month is clicked
if self.best_maint_date.month is month:
for i in c:
for j in i:
if j == 0:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=''),font_size='20sp',color=(0,0,0,1)))
elif j==self.best_maint_date.day:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=j),background_color=(1,0,0,1),font_size='20sp',color=(0,0,0,1)))
else:
self.add_widget(Button(on_press = self.on_press, on_release=self.on_release,text = '{j}'.format(j=j),font_size='20sp',color=(0,0,0,1)))
else:
for i in c:
for j in i:
if j == 0:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=''),font_size='20sp',color=(0,0,0,1)))
else:
self.add_widget(Button(on_press = self.on_press, on_release=self.on_release,text = '{j}'.format(j=j),font_size='20sp',color=(0,0,0,1)))
def on_dismiss(self, arg):
# Do something on close of popup
print('Popup dismiss')
pass
def on_release(self,event):
event.background_color = 154/256,226/256,248/256,1
def on_press(self,event):
print ("date clicked :" + event.text)
event.background_color = 1,0,0,1
self.popup = Popup(title='Preventive Maintenance Information',title_color=(0,0,0,1),
content = Reminder(),
size_hint=(0.9,0.9),background='background.png')
self.popup.bind(on_dismiss = self.on_dismiss)
self.popup.open()
# ------------------------------------------------------------------------------------------------#
# class for months.kv file
class Months(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# Displayed time is defined here
self.now=datetime.datetime.now()
self.year=2014 # Suppose to be self.now.year but change it into year
self.month=8
self.day=1
# An pointer to current month button
self.now_btn=Button()
self.btn_color=(17/256,64/256,108/256,1)
def month_btn_press(self,instance):
# Renew previous button
self.now_btn.background_color=(17/256,64/256,108/256,1)
instance.background_color=1,0,0,1
#Update the month of the button
self.month=self.get_month(instance.text)
self.now_btn=instance
def month_btn_release(self,instance):
#instance.background_color=0.1,.5,.5,1
#instance.bind(on_release= lambda instance : Dates.update_dates())
app_=App.get_running_app()
dates_=app_.calendar_.dates_
dates_.update_dates(self.year,self.month)
pass
def get_month(self,month_name):
month_names=['Null','Jan','Feb','Mar','April','May','June','July','Aug','Sept','Oct','Nov','Dec']
return month_names.index(month_name)
# ------------------------------------------------------------------------------------------------#
# mainApp class
class mainApp(App):
time = StringProperty()
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.calendar_=Calender()
#self.model_=Model() # Place model into Dates()
self.year=self.calendar_.months_.year
self.month=self.calendar_.months_.month
self.day=self.calendar_.months_.day
def update(self,*args):
self.now_real=datetime.datetime.now()
self.t=datetime.datetime(self.year,self.month,self.day,self.now_real.hour,self.now_real.minute,self.now_real.second)
self.time=self.t.strftime('%Y-%m-%d %H:%M:%S')
def build(self):
self.title = "KONE通力电梯可视化产品"
self.font_name=font_name
Clock.schedule_interval(self.update,1)
return self.calendar_
if __name__ =='__main__':
app = mainApp()
app.run()
| s.kv fil | identifier_name |
gossip.rs | use crate::clock::{Clock, HybridTimestamp};
use crate::event_emitter::EventEmitter;
use crate::proto::gossip::*;
use crate::proto::gossip_grpc::*;
use crate::proto::PeerState;
use crate::rpc_client::RpcClient;
use failure::{err_msg, format_err, Error};
use futures::prelude::*;
use futures::sync::{mpsc, oneshot};
use grpcio::{RpcContext, Service, UnarySink};
use log::*;
use std::collections::HashMap;
use std::sync::{Arc, RwLock, Weak};
use std::time::{Duration, Instant};
use tokio::timer::Interval;
#[derive(Clone)]
pub struct GossipServer {
state: GossipState,
sender: mpsc::Sender<GossipEvent>,
}
impl Gossip for GossipServer {
fn exchange(&mut self, ctx: RpcContext, req: GossipData, sink: UnarySink<GossipData>) {
ctx.spawn(
self.sender
.clone()
.send(GossipEvent::GossipReceived(req))
.map(|_| ())
.map_err(|_| error!("Failed to update gossip state")),
);
let out = self.state.get_current();
ctx.spawn(
sink.success(out)
.map_err(|err| error!("Error exhanging gossip: {:?}", err)),
);
}
}
#[derive(Eq, PartialEq)]
enum ClientEvent {
GossipTick,
Done,
}
#[derive(Clone)]
pub enum PeerStateEvent {
PeerJoined(u64),
}
impl GossipServer {
pub fn new(node_id: u64, bootstrap: &[String], self_address: &str, clock: Clock) -> Self {
let (sender, receiver) = mpsc::channel(32);
let state = GossipState::new(node_id, self_address, bootstrap, sender.clone(), clock);
run_gossip_event_handler(receiver, state.new_ref(), node_id);
GossipServer { state, sender }
}
pub fn build_service(&self) -> Service {
create_gossip(self.clone())
}
pub fn state(&self) -> GossipState {
self.state.clone()
}
pub fn update_meta_leader(&self, id: u64) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::MetaLeaderChanged(id))
}
pub fn update_node_liveness(&self, peer: PeerState) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::PeerUpdate(peer))
}
fn event(&self, event: GossipEvent) -> impl Future<Item = (), Error = ()> {
self.sender.clone().send(event).map(|_| ()).map_err(|_| ())
}
}
fn run_gossip_event_handler(
receiver: mpsc::Receiver<GossipEvent>,
state: GossipStateRef,
self_id: u64,
) {
let f = receiver.for_each(move |event| {
match event {
GossipEvent::NewPeerDiscovered(address) => {
connect_to_client(state.upgrade(), self_id, &address);
}
GossipEvent::GossipReceived(data) => {
state.upgrade().merge_gossip(data);
}
GossipEvent::MetaLeaderChanged(id) => {
state.upgrade().update_meta_leader(id);
}
GossipEvent::PeerUpdate(peer) => {
state.upgrade().update_node_liveness(&peer);
}
};
Ok(())
});
tokio::spawn(f);
}
#[derive(Clone)]
pub struct GossipState {
inner: Arc<RwLock<InnerGossipState>>,
}
#[derive(Clone)]
pub struct GossipStateRef {
inner: Weak<RwLock<InnerGossipState>>,
}
struct InnerGossipState {
clock: Clock,
current: GossipData,
connections: HashMap<String, oneshot::Sender<()>>,
clients: HashMap<String, RpcClient>,
peers: HashMap<u64, GossipData>,
event_publisher: mpsc::Sender<GossipEvent>,
event_emitter: EventEmitter<PeerStateEvent>,
}
enum GossipEvent {
GossipReceived(GossipData),
NewPeerDiscovered(String),
MetaLeaderChanged(u64),
PeerUpdate(PeerState),
}
impl GossipState {
fn new(
node_id: u64,
self_address: &str,
bootstrap: &[String],
event_publisher: mpsc::Sender<GossipEvent>,
clock: Clock,
) -> Self {
let mut current = GossipData::new();
current.set_node_id(node_id);
current.set_address(self_address.to_string());
let event_emitter = EventEmitter::new(32);
let inner = InnerGossipState {
current,
event_publisher,
event_emitter,
clock,
connections: HashMap::new(),
clients: HashMap::new(),
peers: HashMap::new(),
};
inner.publish_peer_discovered(self_address);
bootstrap
.iter()
.for_each(|address| inner.publish_peer_discovered(address));
Self {
inner: Arc::new(RwLock::new(inner)),
}
}
fn get_current(&self) -> GossipData {
let locked = self.inner.read().unwrap();
let mut gossip = locked.current.clone();
gossip.set_updated_at(locked.clock.now().into());
gossip
}
pub fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.inner.read().unwrap().get_client(node_id)
}
pub fn get_meta_leader_client(&self) -> Result<RpcClient, Error> {
let locked = self.inner.read().unwrap();
locked
.meta_leader_id()
.ok_or_else(|| err_msg("Leader not available"))
.and_then(|node_id| self.get_client(node_id))
}
fn merge_gossip(&self, gossip: GossipData) {
self.inner.write().unwrap().merge_gossip(gossip)
}
pub fn update_meta_leader(&self, node_id: u64) {
self.inner.write().unwrap().update_meta_leader(node_id)
}
fn update_node_liveness(&self, peer_state: &PeerState) {
self.inner.write().unwrap().update_node_liveness(peer_state)
}
fn new_ref(&self) -> GossipStateRef {
GossipStateRef {
inner: Arc::downgrade(&self.inner),
}
}
fn update_clock(&self, peer_sent_at: HybridTimestamp) {
self.inner
.read()
.unwrap()
.clock
.update(&peer_sent_at)
.unwrap_or_else(|err| error!("Failed to update clock: {:?}", err));
}
}
impl GossipStateRef {
fn upgrade(&self) -> GossipState {
GossipState {
inner: self.inner.upgrade().unwrap(),
}
}
}
impl InnerGossipState {
fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.peers
.get(&node_id)
.and_then(|gossip| self.clients.get(gossip.get_address()))
.cloned()
.ok_or_else(|| format_err!("Not connected to '{}'", node_id))
}
fn add_connection(&mut self, addr: &str, sender: oneshot::Sender<()>, client: RpcClient) {
self.connections.insert(addr.to_string(), sender);
self.clients.insert(addr.to_string(), client);
}
fn merge_gossip(&mut self, gossip: GossipData) {
let peer_id = gossip.get_node_id();
let current_addrs = self.current.mut_peer_addresses();
if current_addrs.get(&peer_id).is_none() {
let address = gossip.get_address();
current_addrs.insert(peer_id, address.to_string());
self.publish_peer_discovered(address);
}
gossip
.get_node_liveness()
.values()
.for_each(|peer| self.update_node_liveness(peer));
gossip
.get_peer_addresses()
.iter()
.filter(|(id, _)| !self.peers.contains_key(id))
.for_each(|(_, address)| self.publish_peer_discovered(address));
self.peers.insert(peer_id, gossip);
}
fn publish_event(&self, event: GossipEvent) {
let f = self.event_publisher.clone().send(event);
tokio::spawn(f.map(|_| ()).map_err(|_| ()));
}
fn | (&self, address: &str) {
self.publish_event(GossipEvent::NewPeerDiscovered(address.to_string()));
}
fn update_meta_leader(&mut self, node_id: u64) {
self.current.set_meta_leader_id(node_id);
}
fn meta_leader_id(&self) -> Option<u64> {
if self.current.meta_leader_id != 0 {
return Some(self.current.meta_leader_id);
}
self.peers
.values()
.filter(|peer| peer.meta_leader_id != 0)
.max_by_key(|peer| -> HybridTimestamp { peer.get_updated_at().into() })
.map(|peer| peer.meta_leader_id)
}
fn update_node_liveness(&mut self, peer: &PeerState) {
let peer_id = peer.get_peer().id;
if self.current.get_node_liveness().get(&peer_id).is_none() {
self.emit_new_live_node(peer_id)
}
self.current
.mut_node_liveness()
.insert(peer_id, peer.clone());
}
fn emit_new_live_node(&self, peer_id: u64) {
self.event_emitter.emit(PeerStateEvent::PeerJoined(peer_id))
}
}
struct ClientContext {
state: GossipStateRef,
client: RpcClient,
}
fn connect_to_client(state: GossipState, self_id: u64, address: &str) {
let mut locked_state = state.inner.write().unwrap();
if locked_state.connections.contains_key(address) {
return;
}
info!("Discovered: {}", address);
let client = RpcClient::new(self_id, address);
let (sender, receiver) = oneshot::channel();
locked_state.add_connection(address, sender, client.clone());
drop(locked_state);
let gossip_stream = Interval::new(Instant::now(), Duration::from_secs(5))
.map(|_| ClientEvent::GossipTick)
.map_err(|err| error!("Error in gossip tick: {:?}", err));
let close_stream = receiver
.into_stream()
.map(|_: ()| ClientEvent::Done)
.map_err(|_| ());
let (sender, receiver) = mpsc::channel(64);
let producer = gossip_stream
.select(close_stream)
.take_while(|item| Ok(*item != ClientEvent::Done))
.for_each(move |event| sender.clone().send(event).map_err(|_| ()).map(|_| ()));
let consumer = ClientContext::new(state.new_ref(), client).run(receiver);
tokio::spawn(consumer);
tokio::spawn(producer);
}
impl ClientContext {
pub fn new(state: GossipStateRef, client: RpcClient) -> Self {
Self { state, client }
}
pub fn run(self, receiver: mpsc::Receiver<ClientEvent>) -> impl Future<Item = (), Error = ()> {
// TODO: should age out nodes that have been failing gossip for a while
// TODO: should have a separate heartbeat loop for tracking peer offsets
receiver.for_each(move |_event| {
let state = self.state.upgrade();
let current_gossip = state.get_current();
self.client
.gossip(¤t_gossip)
.map(move |gossip| {
if gossip.get_node_id() != current_gossip.get_node_id() {
state.update_clock(gossip.get_updated_at().into());
}
state.merge_gossip(gossip)
})
.then(move |_| Ok(()))
})
}
}
| publish_peer_discovered | identifier_name |
gossip.rs | use crate::clock::{Clock, HybridTimestamp};
use crate::event_emitter::EventEmitter;
use crate::proto::gossip::*;
use crate::proto::gossip_grpc::*;
use crate::proto::PeerState;
use crate::rpc_client::RpcClient;
use failure::{err_msg, format_err, Error};
use futures::prelude::*;
use futures::sync::{mpsc, oneshot};
use grpcio::{RpcContext, Service, UnarySink};
use log::*;
use std::collections::HashMap;
use std::sync::{Arc, RwLock, Weak};
use std::time::{Duration, Instant};
use tokio::timer::Interval;
#[derive(Clone)]
pub struct GossipServer {
state: GossipState,
sender: mpsc::Sender<GossipEvent>,
}
impl Gossip for GossipServer {
fn exchange(&mut self, ctx: RpcContext, req: GossipData, sink: UnarySink<GossipData>) {
ctx.spawn(
self.sender
.clone()
.send(GossipEvent::GossipReceived(req))
.map(|_| ())
.map_err(|_| error!("Failed to update gossip state")),
);
let out = self.state.get_current();
ctx.spawn(
sink.success(out)
.map_err(|err| error!("Error exhanging gossip: {:?}", err)),
);
}
}
#[derive(Eq, PartialEq)]
enum ClientEvent {
GossipTick,
Done,
}
#[derive(Clone)]
pub enum PeerStateEvent {
PeerJoined(u64),
}
impl GossipServer {
pub fn new(node_id: u64, bootstrap: &[String], self_address: &str, clock: Clock) -> Self |
pub fn build_service(&self) -> Service {
create_gossip(self.clone())
}
pub fn state(&self) -> GossipState {
self.state.clone()
}
pub fn update_meta_leader(&self, id: u64) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::MetaLeaderChanged(id))
}
pub fn update_node_liveness(&self, peer: PeerState) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::PeerUpdate(peer))
}
fn event(&self, event: GossipEvent) -> impl Future<Item = (), Error = ()> {
self.sender.clone().send(event).map(|_| ()).map_err(|_| ())
}
}
fn run_gossip_event_handler(
receiver: mpsc::Receiver<GossipEvent>,
state: GossipStateRef,
self_id: u64,
) {
let f = receiver.for_each(move |event| {
match event {
GossipEvent::NewPeerDiscovered(address) => {
connect_to_client(state.upgrade(), self_id, &address);
}
GossipEvent::GossipReceived(data) => {
state.upgrade().merge_gossip(data);
}
GossipEvent::MetaLeaderChanged(id) => {
state.upgrade().update_meta_leader(id);
}
GossipEvent::PeerUpdate(peer) => {
state.upgrade().update_node_liveness(&peer);
}
};
Ok(())
});
tokio::spawn(f);
}
#[derive(Clone)]
pub struct GossipState {
inner: Arc<RwLock<InnerGossipState>>,
}
#[derive(Clone)]
pub struct GossipStateRef {
inner: Weak<RwLock<InnerGossipState>>,
}
struct InnerGossipState {
clock: Clock,
current: GossipData,
connections: HashMap<String, oneshot::Sender<()>>,
clients: HashMap<String, RpcClient>,
peers: HashMap<u64, GossipData>,
event_publisher: mpsc::Sender<GossipEvent>,
event_emitter: EventEmitter<PeerStateEvent>,
}
enum GossipEvent {
GossipReceived(GossipData),
NewPeerDiscovered(String),
MetaLeaderChanged(u64),
PeerUpdate(PeerState),
}
impl GossipState {
fn new(
node_id: u64,
self_address: &str,
bootstrap: &[String],
event_publisher: mpsc::Sender<GossipEvent>,
clock: Clock,
) -> Self {
let mut current = GossipData::new();
current.set_node_id(node_id);
current.set_address(self_address.to_string());
let event_emitter = EventEmitter::new(32);
let inner = InnerGossipState {
current,
event_publisher,
event_emitter,
clock,
connections: HashMap::new(),
clients: HashMap::new(),
peers: HashMap::new(),
};
inner.publish_peer_discovered(self_address);
bootstrap
.iter()
.for_each(|address| inner.publish_peer_discovered(address));
Self {
inner: Arc::new(RwLock::new(inner)),
}
}
fn get_current(&self) -> GossipData {
let locked = self.inner.read().unwrap();
let mut gossip = locked.current.clone();
gossip.set_updated_at(locked.clock.now().into());
gossip
}
pub fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.inner.read().unwrap().get_client(node_id)
}
pub fn get_meta_leader_client(&self) -> Result<RpcClient, Error> {
let locked = self.inner.read().unwrap();
locked
.meta_leader_id()
.ok_or_else(|| err_msg("Leader not available"))
.and_then(|node_id| self.get_client(node_id))
}
fn merge_gossip(&self, gossip: GossipData) {
self.inner.write().unwrap().merge_gossip(gossip)
}
pub fn update_meta_leader(&self, node_id: u64) {
self.inner.write().unwrap().update_meta_leader(node_id)
}
fn update_node_liveness(&self, peer_state: &PeerState) {
self.inner.write().unwrap().update_node_liveness(peer_state)
}
fn new_ref(&self) -> GossipStateRef {
GossipStateRef {
inner: Arc::downgrade(&self.inner),
}
}
fn update_clock(&self, peer_sent_at: HybridTimestamp) {
self.inner
.read()
.unwrap()
.clock
.update(&peer_sent_at)
.unwrap_or_else(|err| error!("Failed to update clock: {:?}", err));
}
}
impl GossipStateRef {
fn upgrade(&self) -> GossipState {
GossipState {
inner: self.inner.upgrade().unwrap(),
}
}
}
impl InnerGossipState {
fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.peers
.get(&node_id)
.and_then(|gossip| self.clients.get(gossip.get_address()))
.cloned()
.ok_or_else(|| format_err!("Not connected to '{}'", node_id))
}
fn add_connection(&mut self, addr: &str, sender: oneshot::Sender<()>, client: RpcClient) {
self.connections.insert(addr.to_string(), sender);
self.clients.insert(addr.to_string(), client);
}
fn merge_gossip(&mut self, gossip: GossipData) {
let peer_id = gossip.get_node_id();
let current_addrs = self.current.mut_peer_addresses();
if current_addrs.get(&peer_id).is_none() {
let address = gossip.get_address();
current_addrs.insert(peer_id, address.to_string());
self.publish_peer_discovered(address);
}
gossip
.get_node_liveness()
.values()
.for_each(|peer| self.update_node_liveness(peer));
gossip
.get_peer_addresses()
.iter()
.filter(|(id, _)| !self.peers.contains_key(id))
.for_each(|(_, address)| self.publish_peer_discovered(address));
self.peers.insert(peer_id, gossip);
}
fn publish_event(&self, event: GossipEvent) {
let f = self.event_publisher.clone().send(event);
tokio::spawn(f.map(|_| ()).map_err(|_| ()));
}
fn publish_peer_discovered(&self, address: &str) {
self.publish_event(GossipEvent::NewPeerDiscovered(address.to_string()));
}
fn update_meta_leader(&mut self, node_id: u64) {
self.current.set_meta_leader_id(node_id);
}
fn meta_leader_id(&self) -> Option<u64> {
if self.current.meta_leader_id != 0 {
return Some(self.current.meta_leader_id);
}
self.peers
.values()
.filter(|peer| peer.meta_leader_id != 0)
.max_by_key(|peer| -> HybridTimestamp { peer.get_updated_at().into() })
.map(|peer| peer.meta_leader_id)
}
fn update_node_liveness(&mut self, peer: &PeerState) {
let peer_id = peer.get_peer().id;
if self.current.get_node_liveness().get(&peer_id).is_none() {
self.emit_new_live_node(peer_id)
}
self.current
.mut_node_liveness()
.insert(peer_id, peer.clone());
}
fn emit_new_live_node(&self, peer_id: u64) {
self.event_emitter.emit(PeerStateEvent::PeerJoined(peer_id))
}
}
struct ClientContext {
state: GossipStateRef,
client: RpcClient,
}
fn connect_to_client(state: GossipState, self_id: u64, address: &str) {
let mut locked_state = state.inner.write().unwrap();
if locked_state.connections.contains_key(address) {
return;
}
info!("Discovered: {}", address);
let client = RpcClient::new(self_id, address);
let (sender, receiver) = oneshot::channel();
locked_state.add_connection(address, sender, client.clone());
drop(locked_state);
let gossip_stream = Interval::new(Instant::now(), Duration::from_secs(5))
.map(|_| ClientEvent::GossipTick)
.map_err(|err| error!("Error in gossip tick: {:?}", err));
let close_stream = receiver
.into_stream()
.map(|_: ()| ClientEvent::Done)
.map_err(|_| ());
let (sender, receiver) = mpsc::channel(64);
let producer = gossip_stream
.select(close_stream)
.take_while(|item| Ok(*item != ClientEvent::Done))
.for_each(move |event| sender.clone().send(event).map_err(|_| ()).map(|_| ()));
let consumer = ClientContext::new(state.new_ref(), client).run(receiver);
tokio::spawn(consumer);
tokio::spawn(producer);
}
impl ClientContext {
pub fn new(state: GossipStateRef, client: RpcClient) -> Self {
Self { state, client }
}
pub fn run(self, receiver: mpsc::Receiver<ClientEvent>) -> impl Future<Item = (), Error = ()> {
// TODO: should age out nodes that have been failing gossip for a while
// TODO: should have a separate heartbeat loop for tracking peer offsets
receiver.for_each(move |_event| {
let state = self.state.upgrade();
let current_gossip = state.get_current();
self.client
.gossip(¤t_gossip)
.map(move |gossip| {
if gossip.get_node_id() != current_gossip.get_node_id() {
state.update_clock(gossip.get_updated_at().into());
}
state.merge_gossip(gossip)
})
.then(move |_| Ok(()))
})
}
}
| {
let (sender, receiver) = mpsc::channel(32);
let state = GossipState::new(node_id, self_address, bootstrap, sender.clone(), clock);
run_gossip_event_handler(receiver, state.new_ref(), node_id);
GossipServer { state, sender }
} | identifier_body |
gossip.rs | use crate::clock::{Clock, HybridTimestamp};
use crate::event_emitter::EventEmitter;
use crate::proto::gossip::*;
use crate::proto::gossip_grpc::*;
use crate::proto::PeerState;
use crate::rpc_client::RpcClient;
use failure::{err_msg, format_err, Error};
use futures::prelude::*;
use futures::sync::{mpsc, oneshot};
use grpcio::{RpcContext, Service, UnarySink};
use log::*;
use std::collections::HashMap;
use std::sync::{Arc, RwLock, Weak};
use std::time::{Duration, Instant};
use tokio::timer::Interval;
#[derive(Clone)]
pub struct GossipServer {
state: GossipState,
sender: mpsc::Sender<GossipEvent>,
}
impl Gossip for GossipServer {
fn exchange(&mut self, ctx: RpcContext, req: GossipData, sink: UnarySink<GossipData>) {
ctx.spawn(
self.sender
.clone()
.send(GossipEvent::GossipReceived(req))
.map(|_| ())
.map_err(|_| error!("Failed to update gossip state")),
);
let out = self.state.get_current();
ctx.spawn(
sink.success(out)
.map_err(|err| error!("Error exhanging gossip: {:?}", err)),
);
}
}
#[derive(Eq, PartialEq)]
enum ClientEvent {
GossipTick,
Done,
}
#[derive(Clone)]
pub enum PeerStateEvent {
PeerJoined(u64),
}
impl GossipServer {
pub fn new(node_id: u64, bootstrap: &[String], self_address: &str, clock: Clock) -> Self {
let (sender, receiver) = mpsc::channel(32);
let state = GossipState::new(node_id, self_address, bootstrap, sender.clone(), clock);
run_gossip_event_handler(receiver, state.new_ref(), node_id);
GossipServer { state, sender }
}
pub fn build_service(&self) -> Service {
create_gossip(self.clone())
}
pub fn state(&self) -> GossipState {
self.state.clone()
}
pub fn update_meta_leader(&self, id: u64) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::MetaLeaderChanged(id))
}
pub fn update_node_liveness(&self, peer: PeerState) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::PeerUpdate(peer))
}
fn event(&self, event: GossipEvent) -> impl Future<Item = (), Error = ()> {
self.sender.clone().send(event).map(|_| ()).map_err(|_| ())
}
}
fn run_gossip_event_handler(
receiver: mpsc::Receiver<GossipEvent>,
state: GossipStateRef,
self_id: u64,
) {
let f = receiver.for_each(move |event| {
match event {
GossipEvent::NewPeerDiscovered(address) => {
connect_to_client(state.upgrade(), self_id, &address);
}
GossipEvent::GossipReceived(data) => {
state.upgrade().merge_gossip(data);
}
GossipEvent::MetaLeaderChanged(id) => {
state.upgrade().update_meta_leader(id);
}
GossipEvent::PeerUpdate(peer) => {
state.upgrade().update_node_liveness(&peer);
}
};
Ok(())
});
tokio::spawn(f);
}
#[derive(Clone)]
pub struct GossipState {
inner: Arc<RwLock<InnerGossipState>>,
}
#[derive(Clone)]
pub struct GossipStateRef {
inner: Weak<RwLock<InnerGossipState>>,
}
struct InnerGossipState {
clock: Clock,
current: GossipData,
connections: HashMap<String, oneshot::Sender<()>>,
clients: HashMap<String, RpcClient>,
peers: HashMap<u64, GossipData>,
event_publisher: mpsc::Sender<GossipEvent>,
event_emitter: EventEmitter<PeerStateEvent>,
}
enum GossipEvent {
GossipReceived(GossipData),
NewPeerDiscovered(String),
MetaLeaderChanged(u64),
PeerUpdate(PeerState),
}
impl GossipState {
fn new(
node_id: u64,
self_address: &str,
bootstrap: &[String],
event_publisher: mpsc::Sender<GossipEvent>,
clock: Clock,
) -> Self {
let mut current = GossipData::new();
current.set_node_id(node_id);
current.set_address(self_address.to_string());
let event_emitter = EventEmitter::new(32);
let inner = InnerGossipState {
current,
event_publisher,
event_emitter,
clock,
connections: HashMap::new(),
clients: HashMap::new(),
peers: HashMap::new(),
};
inner.publish_peer_discovered(self_address);
bootstrap
.iter()
.for_each(|address| inner.publish_peer_discovered(address));
Self {
inner: Arc::new(RwLock::new(inner)),
}
}
fn get_current(&self) -> GossipData {
let locked = self.inner.read().unwrap();
let mut gossip = locked.current.clone();
gossip.set_updated_at(locked.clock.now().into());
gossip
}
pub fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.inner.read().unwrap().get_client(node_id)
}
pub fn get_meta_leader_client(&self) -> Result<RpcClient, Error> {
let locked = self.inner.read().unwrap();
locked
.meta_leader_id()
.ok_or_else(|| err_msg("Leader not available"))
.and_then(|node_id| self.get_client(node_id))
}
fn merge_gossip(&self, gossip: GossipData) {
self.inner.write().unwrap().merge_gossip(gossip)
}
pub fn update_meta_leader(&self, node_id: u64) {
self.inner.write().unwrap().update_meta_leader(node_id)
}
fn update_node_liveness(&self, peer_state: &PeerState) {
self.inner.write().unwrap().update_node_liveness(peer_state)
}
fn new_ref(&self) -> GossipStateRef {
GossipStateRef {
inner: Arc::downgrade(&self.inner),
}
}
fn update_clock(&self, peer_sent_at: HybridTimestamp) {
self.inner
.read()
.unwrap()
.clock
.update(&peer_sent_at)
.unwrap_or_else(|err| error!("Failed to update clock: {:?}", err));
}
}
impl GossipStateRef {
fn upgrade(&self) -> GossipState {
GossipState {
inner: self.inner.upgrade().unwrap(),
}
}
}
impl InnerGossipState {
fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.peers
.get(&node_id)
.and_then(|gossip| self.clients.get(gossip.get_address()))
.cloned()
.ok_or_else(|| format_err!("Not connected to '{}'", node_id))
}
fn add_connection(&mut self, addr: &str, sender: oneshot::Sender<()>, client: RpcClient) {
self.connections.insert(addr.to_string(), sender);
self.clients.insert(addr.to_string(), client);
}
fn merge_gossip(&mut self, gossip: GossipData) {
let peer_id = gossip.get_node_id();
let current_addrs = self.current.mut_peer_addresses();
if current_addrs.get(&peer_id).is_none() |
gossip
.get_node_liveness()
.values()
.for_each(|peer| self.update_node_liveness(peer));
gossip
.get_peer_addresses()
.iter()
.filter(|(id, _)| !self.peers.contains_key(id))
.for_each(|(_, address)| self.publish_peer_discovered(address));
self.peers.insert(peer_id, gossip);
}
fn publish_event(&self, event: GossipEvent) {
let f = self.event_publisher.clone().send(event);
tokio::spawn(f.map(|_| ()).map_err(|_| ()));
}
fn publish_peer_discovered(&self, address: &str) {
self.publish_event(GossipEvent::NewPeerDiscovered(address.to_string()));
}
fn update_meta_leader(&mut self, node_id: u64) {
self.current.set_meta_leader_id(node_id);
}
fn meta_leader_id(&self) -> Option<u64> {
if self.current.meta_leader_id != 0 {
return Some(self.current.meta_leader_id);
}
self.peers
.values()
.filter(|peer| peer.meta_leader_id != 0)
.max_by_key(|peer| -> HybridTimestamp { peer.get_updated_at().into() })
.map(|peer| peer.meta_leader_id)
}
fn update_node_liveness(&mut self, peer: &PeerState) {
let peer_id = peer.get_peer().id;
if self.current.get_node_liveness().get(&peer_id).is_none() {
self.emit_new_live_node(peer_id)
}
self.current
.mut_node_liveness()
.insert(peer_id, peer.clone());
}
fn emit_new_live_node(&self, peer_id: u64) {
self.event_emitter.emit(PeerStateEvent::PeerJoined(peer_id))
}
}
struct ClientContext {
state: GossipStateRef,
client: RpcClient,
}
fn connect_to_client(state: GossipState, self_id: u64, address: &str) {
let mut locked_state = state.inner.write().unwrap();
if locked_state.connections.contains_key(address) {
return;
}
info!("Discovered: {}", address);
let client = RpcClient::new(self_id, address);
let (sender, receiver) = oneshot::channel();
locked_state.add_connection(address, sender, client.clone());
drop(locked_state);
let gossip_stream = Interval::new(Instant::now(), Duration::from_secs(5))
.map(|_| ClientEvent::GossipTick)
.map_err(|err| error!("Error in gossip tick: {:?}", err));
let close_stream = receiver
.into_stream()
.map(|_: ()| ClientEvent::Done)
.map_err(|_| ());
let (sender, receiver) = mpsc::channel(64);
let producer = gossip_stream
.select(close_stream)
.take_while(|item| Ok(*item != ClientEvent::Done))
.for_each(move |event| sender.clone().send(event).map_err(|_| ()).map(|_| ()));
let consumer = ClientContext::new(state.new_ref(), client).run(receiver);
tokio::spawn(consumer);
tokio::spawn(producer);
}
impl ClientContext {
pub fn new(state: GossipStateRef, client: RpcClient) -> Self {
Self { state, client }
}
pub fn run(self, receiver: mpsc::Receiver<ClientEvent>) -> impl Future<Item = (), Error = ()> {
// TODO: should age out nodes that have been failing gossip for a while
// TODO: should have a separate heartbeat loop for tracking peer offsets
receiver.for_each(move |_event| {
let state = self.state.upgrade();
let current_gossip = state.get_current();
self.client
.gossip(¤t_gossip)
.map(move |gossip| {
if gossip.get_node_id() != current_gossip.get_node_id() {
state.update_clock(gossip.get_updated_at().into());
}
state.merge_gossip(gossip)
})
.then(move |_| Ok(()))
})
}
}
| {
let address = gossip.get_address();
current_addrs.insert(peer_id, address.to_string());
self.publish_peer_discovered(address);
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.