file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
view_test.go | ); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Compilers.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
if err := e.Core.Settings.Sync(ctx); err != nil {
e.tb.Fatal("Error:", err)
}
}
func (e *TestEnv) CreateUserRoles(login string, roles ...string) error {
for _, role := range roles {
if _, err := e.Socket.CreateUserRole(context.Background(), login, role); err != nil {
return err
}
}
return nil
}
func (e *TestEnv) Check(data any) {
e.checks.Check(data)
}
func (e *TestEnv) Close() {
e.Server.Close()
e.Core.Stop()
_ = db.ApplyMigrations(context.Background(), e.Core.DB, "solve", migrations.Schema, db.WithZeroMigration)
_ = db.ApplyMigrations(context.Background(), e.Core.DB, "solve_data", migrations.Data, db.WithZeroMigration)
e.checks.Close()
}
func (e *TestEnv) WaitProblemUpdated(id int64) {
for {
if err := e.Core.Tasks.Sync(context.Background()); err != nil {
e.tb.Fatal("Error:", err)
}
tasks, err := e.Core.Tasks.FindByProblem(id)
if err != nil {
e.tb.Fatal("Error:", err)
}
if len(tasks) == 0 {
e.tb.Fatal("Empty problem tasks")
}
if tasks[0].Status == models.SucceededTask {
return
}
if tasks[0].Status == models.FailedTask {
e.tb.Fatalf("Task failed: %q", string(tasks[0].State))
}
time.Sleep(time.Second)
}
}
type TestEnvOption interface {
UpdateConfig(*config.Config)
Setup(*TestEnv) error
}
type WithInvoker struct{}
func (o WithInvoker) UpdateConfig(cfg *config.Config) {
cfg.Invoker = &config.Invoker{
Workers: 1,
Safeexec: config.Safeexec{
Path: "../safeexec/safeexec",
},
}
}
func (o WithInvoker) Setup(env *TestEnv) error {
return invoker.New(env.Core).Start()
}
func NewTestEnv(tb testing.TB, options ...TestEnvOption) *TestEnv {
env := TestEnv{
tb: tb,
checks: newTestCheckState(tb),
Now: time.Date(2020, 1, 1, 10, 0, 0, 0, time.UTC),
Rand: rand.New(rand.NewSource(42)),
}
cfg := config.Config{
DB: config.DB{
Options: config.SQLiteOptions{Path: ":memory:"},
},
Security: &config.Security{
PasswordSalt: "qwerty123",
},
Storage: &config.Storage{
Options: config.LocalStorageOptions{
FilesDir: tb.TempDir(),
},
},
}
if _, ok := tb.(*testing.B); ok || os.Getenv("TEST_ENABLE_LOGS") != "1" {
log.SetLevel(log.OFF)
cfg.LogLevel = config.LogLevel(log.OFF)
}
for _, option := range options {
option.UpdateConfig(&cfg)
}
if c, err := core.NewCore(cfg); err != nil {
tb.Fatal("Error:", err)
} else {
env.Core = c
}
env.Core.SetupAllStores()
ctx := context.Background()
_ = db.ApplyMigrations(ctx, env.Core.DB, "solve", migrations.Schema, db.WithZeroMigration)
_ = db.ApplyMigrations(ctx, env.Core.DB, "solve_data", migrations.Data, db.WithZeroMigration)
if err := db.ApplyMigrations(ctx, env.Core.DB, "solve", migrations.Schema); err != nil {
tb.Fatal("Error:", err)
}
if err := db.ApplyMigrations(ctx, env.Core.DB, "solve_data", migrations.Data); err != nil {
tb.Fatal("Error:", err)
}
if err := env.Core.Start(); err != nil {
tb.Fatal("Error:", err)
}
e := echo.New()
e.Logger = env.Core.Logger()
view := NewView(env.Core)
nowFn := func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
c.Set(nowKey, env.Now)
return next(c)
}
}
e.Use(nowFn)
view.Register(e.Group("/api"))
view.RegisterSocket(e.Group("/socket"))
view.StartDaemons()
env.Server = httptest.NewServer(e)
env.Client = newTestClient(env.Server.URL + "/api")
env.Socket = newTestClient(env.Server.URL + "/socket")
for _, option := range options {
option.Setup(&env)
}
return &env
}
type TestUser struct {
User
Password string
env *TestEnv
}
func (u *TestUser) LoginClient() {
_, err := u.env.Client.Login(context.Background(), u.User.Login, u.Password)
if err != nil {
u.env.tb.Fatal("Error:", err)
}
}
func (u *TestUser) LogoutClient() {
if err := u.env.Client.Logout(context.Background()); err != nil {
u.env.tb.Fatal("Error:", err)
}
}
func (u *TestUser) AddRoles(names ...string) {
if err := u.env.CreateUserRoles(u.User.Login, names...); err != nil {
u.env.tb.Fatal("Error:", err)
}
u.env.SyncStores()
}
func NewTestUser(e *TestEnv) *TestUser {
login := fmt.Sprintf("login-%d", e.Rand.Int31())
password := fmt.Sprintf("password-%d", e.Rand.Int63())
user, err := e.Client.Register(context.Background(), RegisterUserForm{
Login: login,
Email: login + "@example.com",
Password: password,
FirstName: "First",
LastName: "Last",
MiddleName: "Middle",
})
if err != nil { | e.tb.Fatal("Error:", err)
}
return &TestUser{
User: user,
Password: password,
env: e,
}
}
type testCheckState struct {
tb testing.TB
checks []json.RawMessage
pos int
reset bool
path string
}
func (s *testCheckState) Check(data any) {
raw, err := json.MarshalIndent(data, "", " ")
if err != nil {
s.tb.Fatal("Unable to marshal data:", data)
}
if s.pos > len(s.checks) {
s.tb.Fatalf("Invalid check position: %d", s.pos)
}
if s.pos == len(s.checks) {
if s.reset {
s.checks = append(s.checks, raw)
s.pos++
return
}
s.tb.Errorf("Unexpected check with data: %s", raw)
s.tb.Fatalf("Maybe you should use: TEST_RESET_DATA=1")
}
options := jsondiff.DefaultConsoleOptions()
diff, report := jsondiff.Compare(s.checks[s.pos], raw, &options)
if diff != jsondiff.FullMatch {
if s.reset {
s.checks[s.pos] = raw
s.pos++
return
}
s.tb.Errorf("Unexpected result difference: %s", report)
s.tb.Fatalf("Maybe you should use: TEST_RESET_DATA=1")
}
s.pos++
}
func (s *testCheckState) Close() {
if s.reset {
if s.pos == 0 {
_ = os.Remove(s.path)
return
}
raw, err := json.MarshalIndent(s.checks, "", " ")
if err != nil {
s.tb.Fatal("Unable to marshal test data:", err)
}
if err := os.WriteFile(
s.path, raw, os.ModePerm,
); err != nil {
s.tb.Fatal("Error:", err)
}
}
}
func newTestCheckState(tb testing.TB) *testCheckState {
state := testCheckState{
tb: tb,
reset: os.Getenv("TEST_RESET_DATA") == "1",
path: filepath.Join("testdata", tb.Name()+".json"),
}
if !state.reset {
file, err := os.Open(state.path)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
tb.Fatal("Error:", err)
}
} else {
defer file.Close()
if err := json.NewDecoder(file).Decode(&state.checks); err != nil {
tb.Fatal("Error:", err)
}
}
}
return &state
}
type testClient struct {
*Client
}
type testJar struct {
mutex sync.Mutex
cookies map[string]*http.Cookie
}
func (j *testJar) Cookies(*url.URL) []*http.Cookie {
j.mutex.Lock()
defer j.mutex.Unlock()
var cookies []*http.Cookie
for _, cookie := range j.cookies {
cookies = append(cookies, cookie)
}
return cookies
}
func (j *testJar) SetCookies(u *url.URL, cookies []*http.Cookie) {
j.mutex.Lock()
defer j.mutex.Unlock()
if j.cookies == nil {
j.cookies = map[string]*http.Cookie{}
}
for _, cookie := range cookies {
j.cookies[cookie.Name] = cookie
}
}
func newTestClient(endpoint string) *testClient {
client | random_line_split | |
view_test.go | _ = db.ApplyMigrations(ctx, env.Core.DB, "solve_data", migrations.Data, db.WithZeroMigration)
if err := db.ApplyMigrations(ctx, env.Core.DB, "solve", migrations.Schema); err != nil {
tb.Fatal("Error:", err)
}
if err := db.ApplyMigrations(ctx, env.Core.DB, "solve_data", migrations.Data); err != nil {
tb.Fatal("Error:", err)
}
if err := env.Core.Start(); err != nil {
tb.Fatal("Error:", err)
}
e := echo.New()
e.Logger = env.Core.Logger()
view := NewView(env.Core)
nowFn := func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
c.Set(nowKey, env.Now)
return next(c)
}
}
e.Use(nowFn)
view.Register(e.Group("/api"))
view.RegisterSocket(e.Group("/socket"))
view.StartDaemons()
env.Server = httptest.NewServer(e)
env.Client = newTestClient(env.Server.URL + "/api")
env.Socket = newTestClient(env.Server.URL + "/socket")
for _, option := range options {
option.Setup(&env)
}
return &env
}
type TestUser struct {
User
Password string
env *TestEnv
}
func (u *TestUser) LoginClient() {
_, err := u.env.Client.Login(context.Background(), u.User.Login, u.Password)
if err != nil {
u.env.tb.Fatal("Error:", err)
}
}
func (u *TestUser) LogoutClient() {
if err := u.env.Client.Logout(context.Background()); err != nil {
u.env.tb.Fatal("Error:", err)
}
}
func (u *TestUser) AddRoles(names ...string) {
if err := u.env.CreateUserRoles(u.User.Login, names...); err != nil {
u.env.tb.Fatal("Error:", err)
}
u.env.SyncStores()
}
func NewTestUser(e *TestEnv) *TestUser {
login := fmt.Sprintf("login-%d", e.Rand.Int31())
password := fmt.Sprintf("password-%d", e.Rand.Int63())
user, err := e.Client.Register(context.Background(), RegisterUserForm{
Login: login,
Email: login + "@example.com",
Password: password,
FirstName: "First",
LastName: "Last",
MiddleName: "Middle",
})
if err != nil {
e.tb.Fatal("Error:", err)
}
return &TestUser{
User: user,
Password: password,
env: e,
}
}
type testCheckState struct {
tb testing.TB
checks []json.RawMessage
pos int
reset bool
path string
}
func (s *testCheckState) Check(data any) {
raw, err := json.MarshalIndent(data, "", " ")
if err != nil {
s.tb.Fatal("Unable to marshal data:", data)
}
if s.pos > len(s.checks) {
s.tb.Fatalf("Invalid check position: %d", s.pos)
}
if s.pos == len(s.checks) {
if s.reset {
s.checks = append(s.checks, raw)
s.pos++
return
}
s.tb.Errorf("Unexpected check with data: %s", raw)
s.tb.Fatalf("Maybe you should use: TEST_RESET_DATA=1")
}
options := jsondiff.DefaultConsoleOptions()
diff, report := jsondiff.Compare(s.checks[s.pos], raw, &options)
if diff != jsondiff.FullMatch {
if s.reset {
s.checks[s.pos] = raw
s.pos++
return
}
s.tb.Errorf("Unexpected result difference: %s", report)
s.tb.Fatalf("Maybe you should use: TEST_RESET_DATA=1")
}
s.pos++
}
func (s *testCheckState) Close() {
if s.reset {
if s.pos == 0 {
_ = os.Remove(s.path)
return
}
raw, err := json.MarshalIndent(s.checks, "", " ")
if err != nil {
s.tb.Fatal("Unable to marshal test data:", err)
}
if err := os.WriteFile(
s.path, raw, os.ModePerm,
); err != nil {
s.tb.Fatal("Error:", err)
}
}
}
func newTestCheckState(tb testing.TB) *testCheckState {
state := testCheckState{
tb: tb,
reset: os.Getenv("TEST_RESET_DATA") == "1",
path: filepath.Join("testdata", tb.Name()+".json"),
}
if !state.reset {
file, err := os.Open(state.path)
if err != nil {
if !errors.Is(err, os.ErrNotExist) {
tb.Fatal("Error:", err)
}
} else {
defer file.Close()
if err := json.NewDecoder(file).Decode(&state.checks); err != nil {
tb.Fatal("Error:", err)
}
}
}
return &state
}
type testClient struct {
*Client
}
type testJar struct {
mutex sync.Mutex
cookies map[string]*http.Cookie
}
func (j *testJar) Cookies(*url.URL) []*http.Cookie {
j.mutex.Lock()
defer j.mutex.Unlock()
var cookies []*http.Cookie
for _, cookie := range j.cookies {
cookies = append(cookies, cookie)
}
return cookies
}
func (j *testJar) SetCookies(u *url.URL, cookies []*http.Cookie) {
j.mutex.Lock()
defer j.mutex.Unlock()
if j.cookies == nil {
j.cookies = map[string]*http.Cookie{}
}
for _, cookie := range cookies {
j.cookies[cookie.Name] = cookie
}
}
func newTestClient(endpoint string) *testClient {
client := NewClient(endpoint)
client.client.Jar = &testJar{}
client.Headers = map[string]string{"X-Solve-Sync": "1"}
return &testClient{client}
}
func (c *testClient) Status() (Status, error) {
req, err := http.NewRequest(http.MethodGet, c.getURL("/v0/status"), nil)
if err != nil {
return Status{}, err
}
var respData Status
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) ObserveUser(login string) (User, error) {
req, err := http.NewRequest(
http.MethodGet, c.getURL("/v0/users/%s", login), nil,
)
if err != nil {
return User{}, err
}
var respData User
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) ObserveContests() (Contests, error) {
req, err := http.NewRequest(
http.MethodGet, c.getURL("/v0/contests"), nil,
)
if err != nil {
return Contests{}, err
}
var respData Contests
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func (c *testClient) CreateContest(form createContestForm) (Contest, error) {
data, err := json.Marshal(form)
if err != nil {
return Contest{}, err
}
req, err := http.NewRequest(
http.MethodPost, c.getURL("/v0/contests"),
bytes.NewReader(data),
)
if err != nil {
return Contest{}, err
}
var respData Contest
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) CreateContestProblem(
contestID int64,
form createContestProblemForm,
) (ContestProblem, error) {
data, err := json.Marshal(form)
if err != nil {
return ContestProblem{}, err
}
req, err := http.NewRequest(
http.MethodPost,
c.getURL("/v0/contests/%d/problems", contestID),
bytes.NewReader(data),
)
if err != nil {
return ContestProblem{}, err
}
var respData ContestProblem
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) CreateRoleRole(role string, child string) (Role, error) {
req, err := http.NewRequest(
http.MethodPost, c.getURL("/v0/roles/%s/roles/%s", role, child),
nil,
)
if err != nil {
return Role{}, err
}
var respData Role
_, err = c.doRequest(req, http.StatusCreated, &respData)
return respData, err
}
func (c *testClient) DeleteRoleRole(role string, child string) (Role, error) {
req, err := http.NewRequest(
http.MethodDelete, c.getURL("/v0/roles/%s/roles/%s", role, child),
nil,
)
if err != nil {
return Role{}, err
}
var respData Role
_, err = c.doRequest(req, http.StatusOK, &respData)
return respData, err
}
func TestPing(t *testing.T) {
e := NewTestEnv(t)
defer e.Close()
if err := e.Client.Ping(context.Background()); err != nil {
t.Fatal("Error:", err)
}
}
func | TestHealth | identifier_name | |
client.go | .org/x/net/context"
)
// Client used to communicate with Cloud Foundry
type Client struct {
Config Config
Endpoint Endpoints
}
type Endpoints struct {
Links Links `json:"links"`
}
type Links struct {
AuthEndpoint EndpointUrl `json:"login"`
TokenEndpoint EndpointUrl `json:"uaa"`
}
type EndpointUrl struct {
URL string `json:"href"`
}
// Config is used to configure the creation of a client
type Config struct {
ApiAddress string `json:"api_url"`
Username string `json:"user"`
Password string `json:"password"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
SkipSslValidation bool `json:"skip_ssl_validation"`
HttpClient *http.Client
Token string `json:"auth_token"`
TokenSource oauth2.TokenSource
tokenSourceDeadline *time.Time
UserAgent string `json:"user_agent"`
Origin string `json:"-"`
}
type LoginHint struct {
Origin string `json:"origin"`
}
// Request is used to help build up a request
type Request struct {
method string
url string
params url.Values
body io.Reader
obj interface{}
}
// NewClient returns a new client
func NewClient(config *Config) (client *Client, err error) {
// bootstrap the config
defConfig := DefaultConfig()
if len(config.ApiAddress) == 0 {
config.ApiAddress = defConfig.ApiAddress
}
if len(config.Username) == 0 {
config.Username = defConfig.Username
}
if len(config.Password) == 0 {
config.Password = defConfig.Password
}
if len(config.Token) == 0 {
config.Token = defConfig.Token
}
if len(config.UserAgent) == 0 {
config.UserAgent = defConfig.UserAgent
}
if config.HttpClient == nil {
config.HttpClient = defConfig.HttpClient
}
if config.HttpClient.Transport == nil {
config.HttpClient.Transport = shallowDefaultTransport()
}
var tp *http.Transport
switch t := config.HttpClient.Transport.(type) {
case *http.Transport:
tp = t
case *oauth2.Transport:
if bt, ok := t.Base.(*http.Transport); ok {
tp = bt
}
}
if tp != nil {
if tp.TLSClientConfig == nil {
tp.TLSClientConfig = &tls.Config{}
}
tp.TLSClientConfig.InsecureSkipVerify = config.SkipSslValidation
}
config.ApiAddress = strings.TrimRight(config.ApiAddress, "/")
client = &Client{
Config: *config,
}
if err := client.refreshEndpoint(); err != nil {
return nil, err
}
return client, nil
}
// DefaultConfig creates a default config object used by CF client
func DefaultConfig() *Config {
return &Config{
ApiAddress: "http://api.bosh-lite.com",
Username: "admin",
Password: "admin",
Token: "",
SkipSslValidation: false,
HttpClient: http.DefaultClient,
UserAgent: "SM-CF-client/1.0",
}
}
// NewRequest is used to create a new Request
func (c *Client) NewRequest(method, path string) *Request {
requestUrl := path
if !strings.HasPrefix(path, "http") {
requestUrl = c.Config.ApiAddress + path
}
r := &Request{
method: method,
url: requestUrl,
params: make(map[string][]string),
}
return r
}
// NewRequestWithBody is used to create a new request with
func (c *Client) NewRequestWithBody(method, path string, body io.Reader) *Request {
r := c.NewRequest(method, path)
r.body = body
return r
}
// DoRequest runs a request with our client
func (c *Client) DoRequest(r *Request) (*http.Response, error) {
req, err := r.toHTTP()
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.Config.UserAgent)
if req.Body != nil && req.Header.Get("Content-type") == "" {
req.Header.Set("Content-type", "application/json")
}
resp, err := c.Config.HttpClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= http.StatusBadRequest {
return c.handleError(resp)
}
return resp, nil
}
func (c *Client) refreshEndpoint() error {
// we want to keep the Timeout value from config.HttpClient
timeout := c.Config.HttpClient.Timeout
ctx := context.Background()
ctx = context.WithValue(ctx, oauth2.HTTPClient, c.Config.HttpClient)
endpoint, err := getInfo(c.Config.ApiAddress, oauth2.NewClient(ctx, nil))
if err != nil {
return errors.Wrap(err, "Could not get endpoints from the root call")
}
switch {
case c.Config.Token != "":
c.Config = getUserTokenAuth(ctx, c.Config, endpoint)
case c.Config.ClientID != "":
c.Config = getClientAuth(ctx, c.Config, endpoint)
default:
c.Config, err = getUserAuth(ctx, c.Config, endpoint)
if err != nil {
return err
}
}
// make sure original Timeout value will be used
if c.Config.HttpClient.Timeout != timeout {
c.Config.HttpClient.Timeout = timeout
}
c.Endpoint = *endpoint
return nil
}
// getUserTokenAuth initializes client credentials from existing bearer token.
func getUserTokenAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
// Token is expected to have no "bearer" prefix
token := &oauth2.Token{
AccessToken: config.Token,
TokenType: "Bearer"}
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config
}
func getClientAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &clientcredentials.Config{
ClientID: config.ClientID,
ClientSecret: config.ClientSecret,
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
}
config.TokenSource = authConfig.TokenSource(ctx)
config.HttpClient = authConfig.Client(ctx)
return config
}
func getUserAuth(ctx context.Context, config Config, endpoints *Endpoints) (Config, error) {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
if config.Origin != "" {
loginHint := LoginHint{config.Origin}
origin, err := json.Marshal(loginHint)
if err != nil {
return config, errors.Wrap(err, "Error creating login_hint")
}
val := url.Values{}
val.Set("login_hint", string(origin))
authConfig.Endpoint.TokenURL = fmt.Sprintf("%s?%s", authConfig.Endpoint.TokenURL, val.Encode())
}
token, err := authConfig.PasswordCredentialsToken(ctx, config.Username, config.Password)
if err != nil {
return config, errors.Wrap(err, "Error getting token")
}
config.tokenSourceDeadline = &token.Expiry
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config, err
}
func getInfo(api string, httpClient *http.Client) (*Endpoints, error) {
var endpoints Endpoints
if api == "" {
return nil, fmt.Errorf("CF ApiAddress cannot be empty")
}
resp, err := httpClient.Get(api + "/")
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = decodeBody(resp, &endpoints)
if err != nil {
return nil, err
}
return &endpoints, err
}
func shallowDefaultTransport() *http.Transport {
defaultTransport := http.DefaultTransport.(*http.Transport)
return &http.Transport{
Proxy: defaultTransport.Proxy,
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
}
}
// decodeBody is used to JSON decode a body
func decodeBody(resp *http.Response, out interface{}) error {
defer resp.Body.Close()
dec := json.NewDecoder(resp.Body)
return dec.Decode(out)
}
// encodeBody is used to encode a request body
func encodeBody(obj interface{}) (io.Reader, error) {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
if err := enc.Encode(obj); err != nil {
return nil, err
}
return buf, nil
}
// toHTTP converts the request to an HTTP Request
func (r *Request) toHTTP() (*http.Request, error) {
// Check if we should encode the body
if r.body == nil && r.obj != nil {
b, err := encodeBody(r.obj)
if err != nil {
return nil, err
}
r.body = b | }
// Create the HTTP Request | random_line_split | |
client.go | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package cfclient
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// Client used to communicate with Cloud Foundry
type Client struct {
Config Config
Endpoint Endpoints
}
type Endpoints struct {
Links Links `json:"links"`
}
type Links struct {
AuthEndpoint EndpointUrl `json:"login"`
TokenEndpoint EndpointUrl `json:"uaa"`
}
type EndpointUrl struct {
URL string `json:"href"`
}
// Config is used to configure the creation of a client
type Config struct {
ApiAddress string `json:"api_url"`
Username string `json:"user"`
Password string `json:"password"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
SkipSslValidation bool `json:"skip_ssl_validation"`
HttpClient *http.Client
Token string `json:"auth_token"`
TokenSource oauth2.TokenSource
tokenSourceDeadline *time.Time
UserAgent string `json:"user_agent"`
Origin string `json:"-"`
}
type LoginHint struct {
Origin string `json:"origin"`
}
// Request is used to help build up a request
type Request struct {
method string
url string
params url.Values
body io.Reader
obj interface{}
}
// NewClient returns a new client
func NewClient(config *Config) (client *Client, err error) {
// bootstrap the config
defConfig := DefaultConfig()
if len(config.ApiAddress) == 0 {
config.ApiAddress = defConfig.ApiAddress
}
if len(config.Username) == 0 {
config.Username = defConfig.Username
}
if len(config.Password) == 0 {
config.Password = defConfig.Password
}
if len(config.Token) == 0 {
config.Token = defConfig.Token
}
if len(config.UserAgent) == 0 {
config.UserAgent = defConfig.UserAgent
}
if config.HttpClient == nil {
config.HttpClient = defConfig.HttpClient
}
if config.HttpClient.Transport == nil {
config.HttpClient.Transport = shallowDefaultTransport()
}
var tp *http.Transport
switch t := config.HttpClient.Transport.(type) {
case *http.Transport:
tp = t
case *oauth2.Transport:
if bt, ok := t.Base.(*http.Transport); ok {
tp = bt
}
}
if tp != nil {
if tp.TLSClientConfig == nil {
tp.TLSClientConfig = &tls.Config{}
}
tp.TLSClientConfig.InsecureSkipVerify = config.SkipSslValidation
}
config.ApiAddress = strings.TrimRight(config.ApiAddress, "/")
client = &Client{
Config: *config,
}
if err := client.refreshEndpoint(); err != nil {
return nil, err
}
return client, nil
}
// DefaultConfig creates a default config object used by CF client
func DefaultConfig() *Config {
return &Config{
ApiAddress: "http://api.bosh-lite.com",
Username: "admin",
Password: "admin",
Token: "",
SkipSslValidation: false,
HttpClient: http.DefaultClient,
UserAgent: "SM-CF-client/1.0",
}
}
// NewRequest is used to create a new Request
func (c *Client) NewRequest(method, path string) *Request {
requestUrl := path
if !strings.HasPrefix(path, "http") {
requestUrl = c.Config.ApiAddress + path
}
r := &Request{
method: method,
url: requestUrl,
params: make(map[string][]string),
}
return r
}
// NewRequestWithBody is used to create a new request with
func (c *Client) NewRequestWithBody(method, path string, body io.Reader) *Request {
r := c.NewRequest(method, path)
r.body = body
return r
}
// DoRequest runs a request with our client
func (c *Client) DoRequest(r *Request) (*http.Response, error) {
req, err := r.toHTTP()
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.Config.UserAgent)
if req.Body != nil && req.Header.Get("Content-type") == "" {
req.Header.Set("Content-type", "application/json")
}
resp, err := c.Config.HttpClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= http.StatusBadRequest {
return c.handleError(resp)
}
return resp, nil
}
func (c *Client) refreshEndpoint() error {
// we want to keep the Timeout value from config.HttpClient
timeout := c.Config.HttpClient.Timeout
ctx := context.Background()
ctx = context.WithValue(ctx, oauth2.HTTPClient, c.Config.HttpClient)
endpoint, err := getInfo(c.Config.ApiAddress, oauth2.NewClient(ctx, nil))
if err != nil {
return errors.Wrap(err, "Could not get endpoints from the root call")
}
switch {
case c.Config.Token != "":
c.Config = getUserTokenAuth(ctx, c.Config, endpoint)
case c.Config.ClientID != "":
c.Config = getClientAuth(ctx, c.Config, endpoint)
default:
c.Config, err = getUserAuth(ctx, c.Config, endpoint)
if err != nil {
return err
}
}
// make sure original Timeout value will be used
if c.Config.HttpClient.Timeout != timeout {
c.Config.HttpClient.Timeout = timeout
}
c.Endpoint = *endpoint
return nil
}
// getUserTokenAuth initializes client credentials from existing bearer token.
func getUserTokenAuth(ctx context.Context, config Config, endpoints *Endpoints) Config |
func getClientAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &clientcredentials.Config{
ClientID: config.ClientID,
ClientSecret: config.ClientSecret,
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
}
config.TokenSource = authConfig.TokenSource(ctx)
config.HttpClient = authConfig.Client(ctx)
return config
}
func getUserAuth(ctx context.Context, config Config, endpoints *Endpoints) (Config, error) {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
if config.Origin != "" {
loginHint := LoginHint{config.Origin}
origin, err := json.Marshal(loginHint)
if err != nil {
return config, errors.Wrap(err, "Error creating login_hint")
}
val := url.Values{}
val.Set("login_hint", string(origin))
authConfig.Endpoint.TokenURL = fmt.Sprintf("%s?%s", authConfig.Endpoint.TokenURL, val.Encode())
}
token, err := authConfig.PasswordCredentialsToken(ctx, config.Username, config.Password)
if err != nil {
return config, errors.Wrap(err, "Error getting token")
}
config.tokenSourceDeadline = &token.Expiry
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config, err
}
func getInfo(api string, httpClient *http.Client) (*Endpoints, error) {
var endpoints Endpoints
if api == "" {
return nil, fmt.Errorf("CF ApiAddress cannot be empty")
}
resp, err := httpClient.Get(api + "/")
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = decodeBody(resp, &endpoints)
if err != nil {
return nil, err
}
return &endpoints, err
}
func shallowDefaultTransport() *http.Transport {
defaultTransport := http.DefaultTransport.(*http.Transport)
return &http.Transport{
Proxy: defaultTransport.Proxy,
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
}
}
// decodeBody is used to JSON decode a body
func decodeBody(resp *http.Response, out interface{}) error | {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
// Token is expected to have no "bearer" prefix
token := &oauth2.Token{
AccessToken: config.Token,
TokenType: "Bearer"}
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config
} | identifier_body |
client.go | kipSslValidation bool `json:"skip_ssl_validation"`
HttpClient *http.Client
Token string `json:"auth_token"`
TokenSource oauth2.TokenSource
tokenSourceDeadline *time.Time
UserAgent string `json:"user_agent"`
Origin string `json:"-"`
}
type LoginHint struct {
Origin string `json:"origin"`
}
// Request is used to help build up a request
type Request struct {
method string
url string
params url.Values
body io.Reader
obj interface{}
}
// NewClient returns a new client
func NewClient(config *Config) (client *Client, err error) {
// bootstrap the config
defConfig := DefaultConfig()
if len(config.ApiAddress) == 0 {
config.ApiAddress = defConfig.ApiAddress
}
if len(config.Username) == 0 {
config.Username = defConfig.Username
}
if len(config.Password) == 0 {
config.Password = defConfig.Password
}
if len(config.Token) == 0 {
config.Token = defConfig.Token
}
if len(config.UserAgent) == 0 {
config.UserAgent = defConfig.UserAgent
}
if config.HttpClient == nil {
config.HttpClient = defConfig.HttpClient
}
if config.HttpClient.Transport == nil {
config.HttpClient.Transport = shallowDefaultTransport()
}
var tp *http.Transport
switch t := config.HttpClient.Transport.(type) {
case *http.Transport:
tp = t
case *oauth2.Transport:
if bt, ok := t.Base.(*http.Transport); ok {
tp = bt
}
}
if tp != nil {
if tp.TLSClientConfig == nil {
tp.TLSClientConfig = &tls.Config{}
}
tp.TLSClientConfig.InsecureSkipVerify = config.SkipSslValidation
}
config.ApiAddress = strings.TrimRight(config.ApiAddress, "/")
client = &Client{
Config: *config,
}
if err := client.refreshEndpoint(); err != nil {
return nil, err
}
return client, nil
}
// DefaultConfig creates a default config object used by CF client
func DefaultConfig() *Config {
return &Config{
ApiAddress: "http://api.bosh-lite.com",
Username: "admin",
Password: "admin",
Token: "",
SkipSslValidation: false,
HttpClient: http.DefaultClient,
UserAgent: "SM-CF-client/1.0",
}
}
// NewRequest is used to create a new Request
func (c *Client) NewRequest(method, path string) *Request {
requestUrl := path
if !strings.HasPrefix(path, "http") {
requestUrl = c.Config.ApiAddress + path
}
r := &Request{
method: method,
url: requestUrl,
params: make(map[string][]string),
}
return r
}
// NewRequestWithBody is used to create a new request with
func (c *Client) NewRequestWithBody(method, path string, body io.Reader) *Request {
r := c.NewRequest(method, path)
r.body = body
return r
}
// DoRequest runs a request with our client
func (c *Client) DoRequest(r *Request) (*http.Response, error) {
req, err := r.toHTTP()
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.Config.UserAgent)
if req.Body != nil && req.Header.Get("Content-type") == "" {
req.Header.Set("Content-type", "application/json")
}
resp, err := c.Config.HttpClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= http.StatusBadRequest {
return c.handleError(resp)
}
return resp, nil
}
func (c *Client) refreshEndpoint() error {
// we want to keep the Timeout value from config.HttpClient
timeout := c.Config.HttpClient.Timeout
ctx := context.Background()
ctx = context.WithValue(ctx, oauth2.HTTPClient, c.Config.HttpClient)
endpoint, err := getInfo(c.Config.ApiAddress, oauth2.NewClient(ctx, nil))
if err != nil {
return errors.Wrap(err, "Could not get endpoints from the root call")
}
switch {
case c.Config.Token != "":
c.Config = getUserTokenAuth(ctx, c.Config, endpoint)
case c.Config.ClientID != "":
c.Config = getClientAuth(ctx, c.Config, endpoint)
default:
c.Config, err = getUserAuth(ctx, c.Config, endpoint)
if err != nil {
return err
}
}
// make sure original Timeout value will be used
if c.Config.HttpClient.Timeout != timeout {
c.Config.HttpClient.Timeout = timeout
}
c.Endpoint = *endpoint
return nil
}
// getUserTokenAuth initializes client credentials from existing bearer token.
func getUserTokenAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
// Token is expected to have no "bearer" prefix
token := &oauth2.Token{
AccessToken: config.Token,
TokenType: "Bearer"}
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config
}
func getClientAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &clientcredentials.Config{
ClientID: config.ClientID,
ClientSecret: config.ClientSecret,
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
}
config.TokenSource = authConfig.TokenSource(ctx)
config.HttpClient = authConfig.Client(ctx)
return config
}
func getUserAuth(ctx context.Context, config Config, endpoints *Endpoints) (Config, error) {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
if config.Origin != "" {
loginHint := LoginHint{config.Origin}
origin, err := json.Marshal(loginHint)
if err != nil {
return config, errors.Wrap(err, "Error creating login_hint")
}
val := url.Values{}
val.Set("login_hint", string(origin))
authConfig.Endpoint.TokenURL = fmt.Sprintf("%s?%s", authConfig.Endpoint.TokenURL, val.Encode())
}
token, err := authConfig.PasswordCredentialsToken(ctx, config.Username, config.Password)
if err != nil {
return config, errors.Wrap(err, "Error getting token")
}
config.tokenSourceDeadline = &token.Expiry
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config, err
}
func getInfo(api string, httpClient *http.Client) (*Endpoints, error) {
var endpoints Endpoints
if api == "" {
return nil, fmt.Errorf("CF ApiAddress cannot be empty")
}
resp, err := httpClient.Get(api + "/")
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = decodeBody(resp, &endpoints)
if err != nil {
return nil, err
}
return &endpoints, err
}
func shallowDefaultTransport() *http.Transport {
defaultTransport := http.DefaultTransport.(*http.Transport)
return &http.Transport{
Proxy: defaultTransport.Proxy,
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
}
}
// decodeBody is used to JSON decode a body
func decodeBody(resp *http.Response, out interface{}) error {
defer resp.Body.Close()
dec := json.NewDecoder(resp.Body)
return dec.Decode(out)
}
// encodeBody is used to encode a request body
func encodeBody(obj interface{}) (io.Reader, error) {
buf := bytes.NewBuffer(nil)
enc := json.NewEncoder(buf)
if err := enc.Encode(obj); err != nil {
return nil, err
}
return buf, nil
}
// toHTTP converts the request to an HTTP Request
func (r *Request) toHTTP() (*http.Request, error) {
// Check if we should encode the body
if r.body == nil && r.obj != nil {
b, err := encodeBody(r.obj)
if err != nil {
return nil, err
}
r.body = b
}
// Create the HTTP Request
return http.NewRequest(r.method, r.url, r.body)
}
func (c *Client) handleError(resp *http.Response) (*http.Response, error) {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return resp, CloudFoundryHTTPError{
StatusCode: resp.StatusCode,
Status: resp.Status,
Body: body,
}
}
defer resp.Body.Close()
var cfErrors CloudFoundryErrors
if err := json.Unmarshal(body, &cfErrors); err != nil | {
return resp, CloudFoundryHTTPError{
StatusCode: resp.StatusCode,
Status: resp.Status,
Body: body,
}
} | conditional_block | |
client.go | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package cfclient
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// Client used to communicate with Cloud Foundry
type Client struct {
Config Config
Endpoint Endpoints
}
type Endpoints struct {
Links Links `json:"links"`
}
type Links struct {
AuthEndpoint EndpointUrl `json:"login"`
TokenEndpoint EndpointUrl `json:"uaa"`
}
type EndpointUrl struct {
URL string `json:"href"`
}
// Config is used to configure the creation of a client
type Config struct {
ApiAddress string `json:"api_url"`
Username string `json:"user"`
Password string `json:"password"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
SkipSslValidation bool `json:"skip_ssl_validation"`
HttpClient *http.Client
Token string `json:"auth_token"`
TokenSource oauth2.TokenSource
tokenSourceDeadline *time.Time
UserAgent string `json:"user_agent"`
Origin string `json:"-"`
}
type LoginHint struct {
Origin string `json:"origin"`
}
// Request is used to help build up a request
type Request struct {
method string
url string
params url.Values
body io.Reader
obj interface{}
}
// NewClient returns a new client
func NewClient(config *Config) (client *Client, err error) {
// bootstrap the config
defConfig := DefaultConfig()
if len(config.ApiAddress) == 0 {
config.ApiAddress = defConfig.ApiAddress
}
if len(config.Username) == 0 {
config.Username = defConfig.Username
}
if len(config.Password) == 0 {
config.Password = defConfig.Password
}
if len(config.Token) == 0 {
config.Token = defConfig.Token
}
if len(config.UserAgent) == 0 {
config.UserAgent = defConfig.UserAgent
}
if config.HttpClient == nil {
config.HttpClient = defConfig.HttpClient
}
if config.HttpClient.Transport == nil {
config.HttpClient.Transport = shallowDefaultTransport()
}
var tp *http.Transport
switch t := config.HttpClient.Transport.(type) {
case *http.Transport:
tp = t
case *oauth2.Transport:
if bt, ok := t.Base.(*http.Transport); ok {
tp = bt
}
}
if tp != nil {
if tp.TLSClientConfig == nil {
tp.TLSClientConfig = &tls.Config{}
}
tp.TLSClientConfig.InsecureSkipVerify = config.SkipSslValidation
}
config.ApiAddress = strings.TrimRight(config.ApiAddress, "/")
client = &Client{
Config: *config,
}
if err := client.refreshEndpoint(); err != nil {
return nil, err
}
return client, nil
}
// DefaultConfig creates a default config object used by CF client
func DefaultConfig() *Config {
return &Config{
ApiAddress: "http://api.bosh-lite.com",
Username: "admin",
Password: "admin",
Token: "",
SkipSslValidation: false,
HttpClient: http.DefaultClient,
UserAgent: "SM-CF-client/1.0",
}
}
// NewRequest is used to create a new Request
func (c *Client) NewRequest(method, path string) *Request {
requestUrl := path
if !strings.HasPrefix(path, "http") {
requestUrl = c.Config.ApiAddress + path
}
r := &Request{
method: method,
url: requestUrl,
params: make(map[string][]string),
}
return r
}
// NewRequestWithBody is used to create a new request with
func (c *Client) NewRequestWithBody(method, path string, body io.Reader) *Request {
r := c.NewRequest(method, path)
r.body = body
return r
}
// DoRequest runs a request with our client
func (c *Client) DoRequest(r *Request) (*http.Response, error) {
req, err := r.toHTTP()
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", c.Config.UserAgent)
if req.Body != nil && req.Header.Get("Content-type") == "" {
req.Header.Set("Content-type", "application/json")
}
resp, err := c.Config.HttpClient.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode >= http.StatusBadRequest {
return c.handleError(resp)
}
return resp, nil
}
func (c *Client) refreshEndpoint() error {
// we want to keep the Timeout value from config.HttpClient
timeout := c.Config.HttpClient.Timeout
ctx := context.Background()
ctx = context.WithValue(ctx, oauth2.HTTPClient, c.Config.HttpClient)
endpoint, err := getInfo(c.Config.ApiAddress, oauth2.NewClient(ctx, nil))
if err != nil {
return errors.Wrap(err, "Could not get endpoints from the root call")
}
switch {
case c.Config.Token != "":
c.Config = getUserTokenAuth(ctx, c.Config, endpoint)
case c.Config.ClientID != "":
c.Config = getClientAuth(ctx, c.Config, endpoint)
default:
c.Config, err = getUserAuth(ctx, c.Config, endpoint)
if err != nil {
return err
}
}
// make sure original Timeout value will be used
if c.Config.HttpClient.Timeout != timeout {
c.Config.HttpClient.Timeout = timeout
}
c.Endpoint = *endpoint
return nil
}
// getUserTokenAuth initializes client credentials from existing bearer token.
func getUserTokenAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
// Token is expected to have no "bearer" prefix
token := &oauth2.Token{
AccessToken: config.Token,
TokenType: "Bearer"}
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config
}
func getClientAuth(ctx context.Context, config Config, endpoints *Endpoints) Config {
authConfig := &clientcredentials.Config{
ClientID: config.ClientID,
ClientSecret: config.ClientSecret,
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
}
config.TokenSource = authConfig.TokenSource(ctx)
config.HttpClient = authConfig.Client(ctx)
return config
}
func getUserAuth(ctx context.Context, config Config, endpoints *Endpoints) (Config, error) {
authConfig := &oauth2.Config{
ClientID: "cf",
Scopes: []string{""},
Endpoint: oauth2.Endpoint{
AuthURL: endpoints.Links.AuthEndpoint.URL + "/oauth/auth",
TokenURL: endpoints.Links.TokenEndpoint.URL + "/oauth/token",
},
}
if config.Origin != "" {
loginHint := LoginHint{config.Origin}
origin, err := json.Marshal(loginHint)
if err != nil {
return config, errors.Wrap(err, "Error creating login_hint")
}
val := url.Values{}
val.Set("login_hint", string(origin))
authConfig.Endpoint.TokenURL = fmt.Sprintf("%s?%s", authConfig.Endpoint.TokenURL, val.Encode())
}
token, err := authConfig.PasswordCredentialsToken(ctx, config.Username, config.Password)
if err != nil {
return config, errors.Wrap(err, "Error getting token")
}
config.tokenSourceDeadline = &token.Expiry
config.TokenSource = authConfig.TokenSource(ctx, token)
config.HttpClient = oauth2.NewClient(ctx, config.TokenSource)
return config, err
}
func getInfo(api string, httpClient *http.Client) (*Endpoints, error) {
var endpoints Endpoints
if api == "" {
return nil, fmt.Errorf("CF ApiAddress cannot be empty")
}
resp, err := httpClient.Get(api + "/")
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = decodeBody(resp, &endpoints)
if err != nil {
return nil, err
}
return &endpoints, err
}
func | () *http.Transport {
defaultTransport := http.DefaultTransport.(*http.Transport)
return &http.Transport{
Proxy: defaultTransport.Proxy,
TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
}
}
// decodeBody is used to JSON decode a body
func decodeBody(resp *http.Response, out interface{}) error | shallowDefaultTransport | identifier_name |
room_model.rs | ) -> anyhow::Result<&mut Room> {
let res = self.get_rooms_mut().get_mut(room_id);
if res.is_none() {
let s = format!("this room is not exit!room_id:{}", room_id);
anyhow::bail!(s)
}
Ok(res.unwrap())
}
///根据房间id获得房间的只读指针
fn get_ref_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&Room> {
let res = self.get_rooms_mut().get(room_id);
if res.is_none() {
anyhow::bail!("this room is not exit,room_id:{}", room_id)
}
Ok(res.unwrap())
}
}
///好友房结构体
#[derive(Clone, Default)]
pub struct CustomRoom {
pub rooms: HashMap<u32, Room>, //封装房间房间id->房间结构体实例
}
impl RoomModel for CustomRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Custom
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
res
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let user_id = owner.user_id;
let mut room = Room::new(owner.clone(), RoomType::Custom, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let room = self.rooms.get_mut(&room_id).unwrap();
//同志房间其他成员
room.room_add_member_notice(&user_id);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
room.remove_member(notice_type, user_id);
let mut slr = S_LEAVE_ROOM::new();
slr.set_is_succ(true);
room.send_2_client(
ClientCode::LeaveRoom,
*user_id,
slr.write_to_bytes().unwrap(),
);
let room_id = room.get_room_id();
Ok(room_id)
}
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
///匹配房数组结构封装体
#[derive(Default, Clone)]
pub struct MatchRooms {
pub match_rooms: HashMap<u8, MatchRoom>,
}
impl MatchRooms {
pub fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
for i in self.match_rooms.iter_mut() {
let res = i.1.rooms.get_mut(&room_id);
if res.is_some() {
return Some(res.unwrap());
}
}
None
}
pub fn rm_room(&mut self, battle_type: u8, room_id: u32) {
let match_room = self.match_rooms.get_mut(&battle_type);
if let Some(match_room) = match_room {
match_room.rm_room(&room_id);
}
}
///离开房间,离线也好,主动离开也好
pub fn leave(
&mut self,
battle_type: BattleType,
room_id: u32,
user_id: &u32,
) -> anyhow::Result<u32> {
let match_room = self.match_rooms.get_mut(&battle_type.into_u8());
if match_room.is_none() {
let str = format!("there is no battle_type:{:?}!", battle_type);
warn!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let match_room = match_room.unwrap();
let res = match_room.leave_room(MemberLeaveNoticeType::Leave as u8, &room_id, user_id);
res
}
pub fn get_match_room_mut(&mut self, battle_type: BattleType) -> &mut MatchRoom {
let res = self.match_rooms.get_mut(&battle_type.into_u8());
if res.is_none() {
let mr = MatchRoom {
battle_type: BattleType::OneVOneVOneVOne,
rooms: HashMap::new(),
room_cache: Vec::new(),
};
self.match_rooms.insert(battle_type.into_u8(), mr);
}
let res = self.match_rooms.get_mut(&battle_type.into_u8());
res.unwrap()
}
}
///匹配房结构体
#[derive(Clone)]
pub struct MatchRoom {
pub battle_type: BattleType, //战斗模式类型
pub rooms: HashMap<u32, Room>, //key:房间id value:房间结构体
pub room_cache: Vec<RoomCache>, //key:房间id value:房间人数
}
impl RoomModel for MatchRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Match
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
if res.is_none() {
return None;
}
let room = res.unwrap();
Some(room)
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let mut room = Room::new(owner, RoomType::Match, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = 1;
self.room_cache.push(rc);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
let room_id = *room_id;
let member_count = room.get_member_count();
room.remove_member(notice_type, user_id);
let need_remove = room.is_empty();
let now_count = room.get_member_count();
let mut need_add_cache = false;
//如果房间之前是满都,就给所有人取消准备
if room.get_state() == RoomState::Await
&& member_count == MEMBER_MAX as usize
&& now_count < member_count
{
let map = room.members.clone();
for id in map.keys() {
room.prepare_cancel(id, false);
}
if room.get_state() == RoomState::Await {
need_add_cache = true;
}
}
if need_remove {
return Ok(room_id);
}
let room_cache = self.get_room_cache_mut(&room_id);
if room_cache.is_some() {
let rc = room_cache.unwrap();
rc.count -= 1;
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
} else if room_cache.is_none() && need_add_cache {
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = now_count as u8;
self.room_cache.push(rc);
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
info!(
"玩家离开房间匹配房间,满足条件,将放进重新放入匹配队列,room_id:{}",
room_id
);
}
Ok(room_id)
}
///删除房间
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
self.remove_room_cache(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
impl MatchRoom {
pub fn get_room_cache_mut(&mut self, room_id: &u32) -> Option<&mut RoomCache> {
let res = self.room_cache.iter_mut().find(|x| x.room_id == *room_id);
res
}
///删除缓存房间
pub fn remove_room_cache(&mut self, room_id: &u32) {
let mut index = -1_isize;
for i in self.room_cache.iter() {
index += 1;
if i.room_id != *room_id {
continue;
}
break;
}
if ind | ex < 0 {
return;
}
| identifier_body | |
room_model.rs | (res.unwrap())
}
///根据房间id获得房间的只读指针
fn get_ref_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&Room> {
let res = self.get_rooms_mut().get(room_id);
if res.is_none() {
anyhow::bail!("this room is not exit,room_id:{}", room_id)
}
Ok(res.unwrap())
}
}
///好友房结构体
#[derive(Clone, Default)]
pub struct CustomRoom {
pub rooms: HashMap<u32, Room>, //封装房间房间id->房间结构体实例
}
impl RoomModel for CustomRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Custom
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
res
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let user_id = owner.user_id;
let mut room = Room::new(owner.clone(), RoomType::Custom, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let room = self.rooms.get_mut(&room_id).unwrap();
//同志房间其他成员
room.room_add_member_notice(&user_id);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
room.remove_member(notice_type, user_id);
let mut slr = S_LEAVE_ROOM::new();
slr.set_is_succ(true);
room.send_2_client(
ClientCode::LeaveRoom,
*user_id,
slr.write_to_bytes().unwrap(),
);
let room_id = room.get_room_id();
Ok(room_id)
}
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
///匹配房数组结构封装体
#[derive(Default, Clone)]
pub struct MatchRooms {
pub match_rooms: HashMap<u8, MatchRoom>,
}
impl MatchRooms {
pub fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
for i in self.match_rooms.iter_mut() {
let res = i.1.rooms.get_mut(&room_id);
if res.is_some() {
return Some(res.unwrap());
}
}
None
}
pub fn rm_room(&mut self, battle_type: u8, room_id: u32) {
let match_room = self.match_rooms.get_mut(&battle_type);
if let Some(match_room) = match_room {
match_room.rm_room(&room_id);
}
}
///离开房间,离线也好,主动离开也好
pub fn leave(
&mut self,
battle_type: BattleType,
room_id: u32,
user_id: &u32,
) -> anyhow::Result<u32> {
let match_room = self.match_rooms.get_mut(&battle_type.into_u8());
if match_room.is_none() {
let str = format!("there is no battle_type:{:?}!", battle_type);
warn!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let match_room = match_room.unwrap();
let res = match_room.leave_room(MemberLeaveNoticeType::Leave as u8, &room_id, user_id);
res
}
pub fn get_match_room_mut(&mut self, battle_type: BattleType) -> &mut MatchRoom {
let res = self.match_rooms.get_mut(&battle_type.into_u8());
if res.is_none() {
let mr = MatchRoom {
battle_type: BattleType::OneVOneVOneVOne,
rooms: HashMap::new(),
room_cache: Vec::new(),
};
self.match_rooms.insert(battle_type.into_u8(), mr);
}
let res = self.match_rooms.get_mut(&battle_type.into_u8());
res.unwrap()
}
}
///匹配房结构体
#[derive(Clone)]
pub struct MatchRoom {
pub battle_type: BattleType, //战斗模式类型
pub rooms: HashMap<u32, Room>, //key:房间id value:房间结构体
pub room_cache: Vec<RoomCache>, //key:房间id value:房间人数
}
impl RoomModel for MatchRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Match
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
if res.is_none() {
return None;
}
let room = res.unwrap();
Some(room)
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let mut room = Room::new(owner, RoomType::Match, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = 1;
self.room_cache.push(rc);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
let room_id = *room_id;
let member_count = room.get_member_count();
room.remove_member(notice_type, user_id);
let need_remove = room.is_empty();
let now_count = room.get_member_count();
let mut need_add_cache = false;
//如果房间之前是满都,就给所有人取消准备
if room.get_state() == RoomState::Await
&& member_count == MEMBER_MAX as usize
&& now_count < member_count
{
let map = room.members.clone();
for id in map.keys() {
room.prepare_cancel(id, false);
}
if room.get_state() == RoomState::Await {
need_add_cache = true;
}
}
if need_remove {
return Ok(room_id);
}
let room_cache = self.get_room_cache_mut(&room_id);
if room_cache.is_some() {
let rc = room_cache.unwrap();
rc.count -= 1;
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
} else if room_cache.is_none() && need_add_cache {
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = now_count as u8;
self.room_cache.push(rc);
//重新排序
self.room_cache.par_sort_by(|a, b| b.count.cmp(&a.count));
info!(
"玩家离开房间匹配房间,满足条件,将放进重新放入匹配队列,room_id:{}",
room_id
);
}
Ok(room_id)
}
///删除房间
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
self.remove_room_cache(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
impl MatchRoom {
pub fn get_room_cache_mut(&mut self, room_id: &u32) -> Option<&mut RoomCache> {
let res = self.room_cache.iter_mut().find(|x| x.room_id == *room_id);
res
}
///删除缓存房间
pub fn remove_room_cache(&mut self, room_id: &u32) {
let mut index = -1_isize;
for i in self.room_cache.iter() {
index += 1;
if i.room_id != *room_id {
continue;
}
break;
}
if index < 0 {
return;
}
self.room_cache.remove(index as usize);
}
///快速加入
pub fn quickly_start(
&mut self,
member: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> { |
let room | identifier_name | |
room_model.rs | Type::try_from(rs_pt.battle_type as u8).unwrap();
let is_open_ai = rs_pt.is_open_ai;
let victory_condition = rs_pt.victory_condition;
let turn_limit_time = rs_pt.turn_limit_time;
let season_id = rs_pt.season_id;
let rs = RoomSetting {
battle_type,
turn_limit_time,
season_id,
is_open_ai,
victory_condition,
};
rs
}
}
impl From<RoomSetting> for RoomSettingPt {
fn from(r: RoomSetting) -> Self {
let mut rsp = RoomSettingPt::new();
rsp.set_victory_condition(r.victory_condition);
rsp.set_battle_type(r.battle_type as u32);
rsp.set_season_id(r.season_id);
rsp.set_turn_limit_time(r.turn_limit_time);
rsp.set_is_open_ai(r.is_open_ai);
rsp
}
}
///房间缓存
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomCache {
room_id: u32,
count: u8,
}
pub trait RoomModel {
fn get_room_type(&self) -> RoomType;
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room>;
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32>;
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32>;
fn rm_room(&mut self, room_id: &u32);
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room>;
///根据房间id获得房间的可变指针
fn get_mut_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&mut Room> {
let res = self.get_rooms_mut().get_mut(room_id);
if res.is_none() {
let s = format!("this room is not exit!room_id:{}", room_id);
anyhow::bail!(s)
}
Ok(res.unwrap())
}
///根据房间id获得房间的只读指针
fn get_ref_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&Room> {
let res = self.get_rooms_mut().get(room_id);
if res.is_none() {
anyhow::bail!("this room is not exit,room_id:{}", room_id)
}
Ok(res.unwrap())
}
}
///好友房结构体
#[derive(Clone, Default)]
pub struct CustomRoom {
pub rooms: HashMap<u32, Room>, //封装房间房间id->房间结构体实例
}
impl RoomModel for CustomRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Custom
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
res
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let user_id = owner.user_id;
let mut room = Room::new(owner.clone(), RoomType::Custom, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let room = self.rooms.get_mut(&room_id).unwrap();
//同志房间其他成员
room.room_add_member_notice(&user_id);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
room.remove_member(notice_type, user_id);
let mut slr = S_LEAVE_ROOM::new();
slr.set_is_succ(true);
room.send_2_client(
ClientCode::LeaveRoom,
*user_id,
slr.write_to_bytes().unwrap(),
);
let room_id = room.get_room_id();
Ok(room_id)
}
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
///匹配房数组结构封装体
#[derive(Default, Clone)]
pub struct MatchRooms {
pub match_rooms: HashMap<u8, MatchRoom>,
}
impl MatchRooms {
pub fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
for i in self.match_rooms.iter_mut() {
let res = i.1.rooms.get_mut(&room_id);
if res.is_some() {
return Some(res.unwrap());
}
}
None
}
pub fn rm_room(&mut self, battle_type: u8, room_id: u32) {
let match_room = self.match_rooms.get_mut(&battle_type);
if let Some(match_room) = match_room {
match_room.rm_room(&room_id);
}
}
///离开房间,离线也好,主动离开也好
pub fn leave(
&mut self,
battle_type: BattleType,
room_id: u32,
user_id: &u32,
) -> anyhow::Result<u32> {
let match_room = self.match_rooms.get_mut(&battle_type.into_u8());
if match_room.is_none() {
let str = format!("there is no battle_type:{:?}!", battle_type);
warn!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let match_room = match_room.unwrap();
let res = match_room.leave_room(MemberLeaveNoticeType::Leave as u8, &room_id, user_id);
res
}
pub fn get_match_room_mut(&mut self, battle_type: BattleType) -> &mut MatchRoom {
let res = self.match_rooms.get_mut(&battle_type.into_u8());
if res.is_none() {
let mr = MatchRoom {
battle_type: BattleType::OneVOneVOneVOne,
rooms: HashMap::new(),
room_cache: Vec::new(),
};
self.match_rooms.insert(battle_type.into_u8(), mr);
}
let res = self.match_rooms.get_mut(&battle_type.into_u8());
res.unwrap()
}
}
///匹配房结构体
#[derive(Clone)]
pub struct MatchRoom {
pub battle_type: BattleType, //战斗模式类型
pub rooms: HashMap<u32, Room>, //key:房间id value:房间结构体
pub room_cache: Vec<RoomCache>, //key:房间id value:房间人数
}
impl RoomModel for MatchRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Match
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
if res.is_none() {
return None;
}
let room = res.unwrap();
Some(room)
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let mut room = Room::new(owner, RoomType::Match, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = 1;
self.room_cache.push(rc);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
let room_id = *room_id;
let member_count = room.get_member_count();
room.remove_member(notice_type, user_id);
let need_remove = room.is_empty();
let now_count = room.get_member_count();
let mut need_add_cache = false;
//如果房间之前是满都,就给所有人取消准备
if room.get_state() == RoomState::Await
&& member_count == MEMBER_MAX as usize
&& now_count < member_count
{
let map = room.members.clone();
for id in map.keys() {
room.prepare_cancel(id, false);
}
if room.get_state() == RoomState::Await {
need_add_cache = true;
}
}
if need_remove {
return Ok(room_id); | } | random_line_split | |
room_model.rs | res
}
pub fn into_u32(self) -> u32 {
let res: u8 = self.into();
res as u32
}
}
///房间设置
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomSetting {
pub battle_type: BattleType, //战斗类型
pub turn_limit_time: u32, //回合限制时间
pub season_id: u32, //赛季id
pub is_open_ai: bool, //是否开启ai
pub victory_condition: u32, //胜利条件
}
impl From<RoomSettingPt> for RoomSetting {
fn from(rs_pt: RoomSettingPt) -> Self {
let battle_type = BattleType::try_from(rs_pt.battle_type as u8).unwrap();
let is_open_ai = rs_pt.is_open_ai;
let victory_condition = rs_pt.victory_condition;
let turn_limit_time = rs_pt.turn_limit_time;
let season_id = rs_pt.season_id;
let rs = RoomSetting {
battle_type,
turn_limit_time,
season_id,
is_open_ai,
victory_condition,
};
rs
}
}
impl From<RoomSetting> for RoomSettingPt {
fn from(r: RoomSetting) -> Self {
let mut rsp = RoomSettingPt::new();
rsp.set_victory_condition(r.victory_condition);
rsp.set_battle_type(r.battle_type as u32);
rsp.set_season_id(r.season_id);
rsp.set_turn_limit_time(r.turn_limit_time);
rsp.set_is_open_ai(r.is_open_ai);
rsp
}
}
///房间缓存
#[derive(Debug, Copy, Clone, Default)]
pub struct RoomCache {
room_id: u32,
count: u8,
}
pub trait RoomModel {
fn get_room_type(&self) -> RoomType;
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room>;
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32>;
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32>;
fn rm_room(&mut self, room_id: &u32);
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room>;
///根据房间id获得房间的可变指针
fn get_mut_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&mut Room> {
let res = self.get_rooms_mut().get_mut(room_id);
if res.is_none() {
let s = format!("this room is not exit!room_id:{}", room_id);
anyhow::bail!(s)
}
Ok(res.unwrap())
}
///根据房间id获得房间的只读指针
fn get_ref_room_by_room_id(&mut self, room_id: &u32) -> anyhow::Result<&Room> {
let res = self.get_rooms_mut().get(room_id);
if res.is_none() {
anyhow::bail!("this room is not exit,room_id:{}", room_id)
}
Ok(res.unwrap())
}
}
///好友房结构体
#[derive(Clone, Default)]
pub struct CustomRoom {
pub rooms: HashMap<u32, Room>, //封装房间房间id->房间结构体实例
}
impl RoomModel for CustomRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Custom
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
res
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let user_id = owner.user_id;
let mut room = Room::new(owner.clone(), RoomType::Custom, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let room = self.rooms.get_mut(&room_id).unwrap();
//同志房间其他成员
room.room_add_member_notice(&user_id);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
room.remove_member(notice_type, user_id);
let mut slr = S_LEAVE_ROOM::new();
slr.set_is_succ(true);
room.send_2_client(
ClientCode::LeaveRoom,
*user_id,
slr.write_to_bytes().unwrap(),
);
let room_id = room.get_room_id();
Ok(room_id)
}
fn rm_room(&mut self, room_id: &u32) {
self.rooms.remove(room_id);
info!(
"删除房间,释放内存!room_type:{:?},room_id:{}",
self.get_room_type(),
room_id
);
}
fn get_rooms_mut(&mut self) -> &mut HashMap<u32, Room, RandomState> {
self.rooms.borrow_mut()
}
}
///匹配房数组结构封装体
#[derive(Default, Clone)]
pub struct MatchRooms {
pub match_rooms: HashMap<u8, MatchRoom>,
}
impl MatchRooms {
pub fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
for i in self.match_rooms.iter_mut() {
let res = i.1.rooms.get_mut(&room_id);
if res.is_some() {
return Some(res.unwrap());
}
}
None
}
pub fn rm_room(&mut self, battle_type: u8, room_id: u32) {
let match_room = self.match_rooms.get_mut(&battle_type);
if let Some(match_room) = match_room {
match_room.rm_room(&room | leave(
&mut self,
battle_type: BattleType,
room_id: u32,
user_id: &u32,
) -> anyhow::Result<u32> {
let match_room = self.match_rooms.get_mut(&battle_type.into_u8());
if match_room.is_none() {
let str = format!("there is no battle_type:{:?}!", battle_type);
warn!("{:?}", str.as_str());
anyhow::bail!("{:?}", str)
}
let match_room = match_room.unwrap();
let res = match_room.leave_room(MemberLeaveNoticeType::Leave as u8, &room_id, user_id);
res
}
pub fn get_match_room_mut(&mut self, battle_type: BattleType) -> &mut MatchRoom {
let res = self.match_rooms.get_mut(&battle_type.into_u8());
if res.is_none() {
let mr = MatchRoom {
battle_type: BattleType::OneVOneVOneVOne,
rooms: HashMap::new(),
room_cache: Vec::new(),
};
self.match_rooms.insert(battle_type.into_u8(), mr);
}
let res = self.match_rooms.get_mut(&battle_type.into_u8());
res.unwrap()
}
}
///匹配房结构体
#[derive(Clone)]
pub struct MatchRoom {
pub battle_type: BattleType, //战斗模式类型
pub rooms: HashMap<u32, Room>, //key:房间id value:房间结构体
pub room_cache: Vec<RoomCache>, //key:房间id value:房间人数
}
impl RoomModel for MatchRoom {
fn get_room_type(&self) -> RoomType {
RoomType::Match
}
fn get_room_mut(&mut self, room_id: &u32) -> Option<&mut Room> {
let res = self.rooms.get_mut(room_id);
if res.is_none() {
return None;
}
let room = res.unwrap();
Some(room)
}
///创建房间
fn create_room(
&mut self,
battle_type: BattleType,
owner: Member,
sender: TcpSender,
task_sender: crossbeam::Sender<Task>,
) -> anyhow::Result<u32> {
let mut room = Room::new(owner, RoomType::Match, sender, task_sender)?;
room.setting.battle_type = battle_type;
let room_id = room.get_room_id();
self.rooms.insert(room_id, room);
let mut rc = RoomCache::default();
rc.room_id = room_id;
rc.count = 1;
self.room_cache.push(rc);
Ok(room_id)
}
///离开房间
fn leave_room(&mut self, notice_type: u8, room_id: &u32, user_id: &u32) -> anyhow::Result<u32> {
let room = self.get_mut_room_by_room_id(room_id)?;
let room_id = *room | _id);
}
}
///离开房间,离线也好,主动离开也好
pub fn | conditional_block |
Console.py | spice-usbredir-auto-redirect-filter='-1,-1,-1-1,1' --spice-usbredir-redirect-on-connect='-1,-1,-1,-1,1' "
else:
arg_usb = "--spice-disable-usbredir"
if broadcast:
arg_bro = " --teacher "
else:
arg_bro = ""
try:
if len(Main.get_device_connected()) == 1:
arg_screen = ""
else :
w = os.popen("ps -ef | grep remote*", 'r')
ww = w.readlines()
w.close()
if len(ww) == 2:
arg_screen = "--extend"
else:
arg_screen = ""
except Exception as e:
Logger.error("extend screen failed: %s",e)
'''
try:
spice_secure = havclient.get_spice_secure(AdminShadow['admins'], self.vm, self.p_id)
if spice_secure:
try:
filename = '/tmp/ca-cert.pem'
addrip= Setting.getServer()
url = ''.join(["http://", addrip, ":5009/ca-cert.pem"])
info = urllib2.urlopen(url, timeout = 1)
f = open(filename, 'w')
while True:
buf = info.read(4096)
f.write(buf)
if buf == '':
break
Logger.info("Download ca-cert.pem succeed.")
finally:
info.close()
f.close()
arg_sec = '--spice-ca-file=/tmp/ca-cert.pem --spice-secure-channels=main,inputs --spice-host-subject="C=IL, L=Raanana, O=Red Hat, CN=my server"'
else:
arg_sec = ""
except Exception as e:
Logger.error('Get spice secure failed: %s' % str(e))
'''
arg = ' '.join([arg_usb, arg_bro, arg_screen])
return arg
def get_rdpsetting(self):
if Setting.getAllow_device().lower() == 'true':
arg_device = '/drive:USB,/run/media/root '
else:
arg_device = ''
#if Setting.getAuto_connect().lower() == 'true':
# arg_auto = '+auto-reconnect '
#else:
# arg_auto = ''
#if Setting.getHeadset_micro().lower() == 'true':
# arg_head_mic = '/sound:sys:pulse /microphone:sys:pulse '
#else:
# arg_head_mic = ''
if Setting.getRemotefx().lower() == 'true':
arg_rem = '/codec-cache:jpeg /gfx '
else:
arg_rem = ''
if Setting.getPublic().lower() == 'true':
arg_net = '/network:wan '
else:
arg_net = '/network:lan '
arg = ' '.join([arg_device, arg_rem, arg_net])
return arg
def stop(self):
self.cancel = True
def run(self):
import pdb
#pdb.set_trace()
try:
count = 0
win = self.window
wx.CallAfter(win.Update, 1, u'更新桌面云状态...')
if self.cancel:
return
if self.Type == 'JSP' or self.Type == 'UNKNOWN':
#import pdb
#pdb.set_trace()
vminfo = havclient.server_port(AdminShadow['admins'], self.p_id, self.vm, search_opts=None, all_tenants=None)
passwd = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#vmcipher = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#passwd = vmcipher.cipher
vmconsole = consoleInfo.ConsoleInfo(vminfo['console'])
host = vmconsole.host
port = vmconsole.port
dsport = vmconsole.tlsport
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s", public_ip)
dport=port
if public_ip == "false":
dhost=Wfile.Parsing_hosts(host)
try:
os.environ["SPICE_PROXY"] = ""
except:
Logger.error("Configure the environment variables failed!")
pass
elif public_ip == "true":
dhost=host
proxy_status, proxy_port = SpicePorxyRequests()
if proxy_status:
try:
os.environ["SPICE_PROXY"] = "http://%s:%s" % (Setting.getServer(),proxy_port)
except:
Logger.error("Configure the environment variables failed!")
else:
Util.MessageBox(self, '连接失败!\n\n系统管理员未设置公网代理!', u'错误', wx.OK | wx.ICON_ERROR)
return
wx.CallAfter(win.WorkFinished, u'获取桌面云信息... 成功')
wx.CallAfter(win.Update, 1, u'打开桌面云...')
argument = self.get_argument()
#cmdline = 'scdaemon spice://%s/?port=%s\&tls-port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,dsport,passwd,self.vm.name) + argument
cmdline = 'scdaemon spice://%s/?port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,passwd,self.vm.name) + argument
Logger.info("Spice Cmd:%s",cmdline)
elif self.Type == 'RDP':
user = Setting.getUser()
Logger.info("The user of VM is %s",user)
password = Setting.getCipher()
Logger.info("The password of user is %s",password)
ip = havclient.get_vm_ip(self.vm)
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s" , public_ip)
argu = self.get_rdpsetting()
if public_ip == 'false':
ipaddress = ip
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
elif public_ip == 'true':
ip, port = Wfile.getmapping_prot(ip, '3389')
ipaddress = ip + ':' + port
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
Logger.info("Rdp Cmd:%s",cmdline)
body={}
vmlogintime = Setting.getVMLoginTime()
clientip = Setting.getClientIP()
body['client_time'] = vmlogintime
body['client_ip'] = clientip
# try:
# havclient.connect_client_info(AdminShadow['admins'], self.p_id, self.vm.id, body)
# except:
# Logger.error("Send vmlogininfo failed!")
Logger.debug(cmdline)
ret = Util.RunConnectGuestWithLog(cmdline)
if self.cancel:
return
wx.CallAfter(win.WorkFinished, u'打开桌面云... 成功')
wx.CallAfter(win.Finish)
name=self.vm.name
desktop=name.encode('utf-8')
Setting.setDesktop(desktop)
Setting.save()
RestartDeviceRequests()
SaveVmLoginInfo()
self.stop()
self.ret = ret
except Exception, e:
self.ret = 1
self.msg = "%s : %s" %(SPICE_ERR_TABLE[1], e)
Logger.debug("Console Thread Ended!")
#loadIPConfig()
if __name__ == '__main__':
app = wx.PySimpleApp()
auth_url = 'http://192.168.8.150:5000/v2.0'
user = 'admin'
password = '123'
Session.login(auth_url, user, password, tenant=None, otp=None)
vms = havclient.vm_list(Session.User, search_opts=None, all_tenants=None)
vm = havclient.server_port(Session.User, vms[0], search_opts=None, all_tenants=None)
print '**************8 %s ' % vms
class Test(object):
def Update(self, value, msg):
print value, msg
def WorkFinished(self, msg):
print msg
def Finish(self):
pass
dlg = Test()
thread = LaunchThread(vm, dlg)
thread.start()
app.MainLoop()
thread.join()
for key in IP_TRANSLATE_TABLE.keys():
print key, '->', IP_TRANSLATE_TABLE[key]
| identifier_body | ||
Console.py | spice-usbredir-auto-redirect-filter='-1,-1,-1-1,1' --spice-usbredir-redirect-on-connect='-1,-1,-1,-1,1' "
else:
arg_usb = "--spice-disable-usbredir"
if broadcast:
arg_bro = " --teacher "
else:
arg_bro = ""
try:
if len(Main.get_device_connected()) == 1:
arg_screen = ""
else :
w = os.popen("ps -ef | grep remote*", 'r')
ww = w.readlines()
w.close()
if len(ww) == 2:
arg_screen = "--extend"
else:
arg_screen = ""
except Exception as e:
Logger.error("extend screen failed: %s",e)
'''
try:
spice_secure = havclient.get_spice_secure(AdminShadow['admins'], self.vm, self.p_id)
if spice_secure:
try:
filename = '/tmp/ca-cert.pem'
addrip= Setting.getServer()
url = ''.join(["http://", addrip, ":5009/ca-cert.pem"])
info = urllib2.urlopen(url, timeout = 1)
f = open(filename, 'w')
while True:
buf = info.read(4096)
f.write(buf)
if buf == '':
break
Logger.info("Download ca-cert.pem succeed.")
finally:
info.close()
f.close()
arg_sec = '--spice-ca-file=/tmp/ca-cert.pem --spice-secure-channels=main,inputs --spice-host-subject="C=IL, L=Raanana, O=Red Hat, CN=my server"'
else:
arg_sec = ""
except Exception as e:
Logger.error('Get spice secure failed: %s' % str(e))
'''
arg = ' '.join([arg_usb, arg_bro, arg_screen])
return arg
def get_rdpsetting(self):
if Setting.getAllow_device().lower() == 'true':
arg_device = '/drive:USB,/run/media/root '
else:
arg_device = ''
#if Setting.getAuto_connect().lower() == 'true':
# arg_auto = '+auto-reconnect '
#else:
# arg_auto = ''
#if Setting.getHeadset_micro().lower() == 'true':
# arg_head_mic = '/sound:sys:pulse /microphone:sys:pulse '
#else:
# arg_head_mic = ''
if Setting.getRemotefx().lower() == 'true':
arg_rem = '/codec-cache:jpeg /gfx '
else:
arg_rem = ''
if Setting.getPublic().lower() == 'true':
arg_net = '/network:wan '
else:
arg_net = '/network:lan '
arg = ' '.join([arg_device, arg_rem, arg_net])
return arg
def stop(self):
self.cancel = True
def run(self):
import pdb
#pdb.set_trace()
try:
count = 0
win = self.window
wx.CallAfter(win.Update, 1, u'更新桌面云状态...')
if self.cancel:
return
if self.Type == 'JSP' or self.Type == 'UNKNOWN':
#import pdb
#pdb.set_trace()
vminfo = havclient.server_port(AdminShadow['admins'], self.p_id, self.vm, search_opts=None, all_tenants=None)
passwd = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#vmcipher = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#passwd = vmcipher.cipher
vmconsole = consoleInfo.ConsoleInfo(vminfo['console'])
host = vmconsole.host
port = vmconsole.port
dsport = vmconsole.tlsport
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s", public_ip)
dport=port
if public_ip == "false":
dhost=Wfile.Parsing_hosts(host)
try:
os.environ["SPICE_PROXY"] = ""
except:
Logger.error("Configure the environment variables failed!")
pass
elif public_ip == "true":
dhost=host
proxy_status, proxy_port = SpicePorxyRequests()
if proxy_status:
try:
os.environ["SPICE_PROXY"] = "http://%s:%s" % (Setting.getServer(),proxy_port)
except:
Logger.error("Configure the environment variables failed!")
else:
Util.MessageBox(self, '连接失败!\n\n系统管理员未设置公网代理!', u'错误', wx.OK | wx.ICON_ERROR)
return
wx.CallAfter(win.WorkFinished, u'获取桌面云信息... 成功')
wx.CallAfter(win.Update, 1, u'打开桌面云...')
argument = self.get_argument()
#cmdline = 'scdaemon spice://%s/?port=%s\&tls-port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,dsport,passwd,self.vm.name) + argument
cmdline = 'scdaemon spice://%s/?port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,passwd,self.vm.name) + argument
Logger.info("Spice Cmd:%s",cmdline)
elif self.Type == 'RDP':
user = Setting.getUser()
Logger.info("The user of VM is %s",user)
password = Setting.getCipher()
Logger.info("The password of user is %s",password)
ip = havclient.get_vm_ip(self.vm)
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s" , public_ip)
argu = self.get_rdpsetting()
if public_ip == 'false':
ipaddress = ip
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
elif public_ip == 'true':
ip, port = Wfile.getmapping_prot(ip, '3389')
ipaddress = ip + ':' + port
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
Logger.info("Rdp Cmd:%s",cmdline)
body={}
vmlogintime = Setting.getVMLoginTime()
clientip = Setting.getClientIP()
body['client_time'] = vmlogintime
body['client_ip'] = clientip
# try:
# havclient.connect_client_info(AdminShadow['admins'], self.p_id, self.vm.id, body)
# except:
# Logger.error("Send vmlogininfo failed!")
Logger.debug(cmdline)
ret = Util.RunConnectGuestWithLog(cmdline)
if self.cancel:
return
wx.CallAfter(win.WorkFinished, u'打开桌面云... 成功')
wx.CallAfter(win.Finish)
name=self.vm.name
desktop=name.encode('utf-8')
Setting.setDesktop(desktop)
Setting.save()
RestartDeviceRequests()
SaveVmLoginInfo()
self.stop()
self.ret = ret
except Exception, e:
self.ret = 1
self.msg = "%s : %s" %(SPICE_ERR_TABLE[1], e)
Logger.debug("Console Thread Ended!")
#loadIPConfig()
if __name__ == '__main__':
app = wx.PySimpleApp()
auth_url = 'http://192.168.8.150:5000/v2.0'
user = 'admin'
password = '123'
Session.login(auth_url, user, password, tenant=None, otp=None)
vms = havclient.vm_list(Session.User, search_opts=None, all_tenants=None)
vm = havclient.server_port(Session.User, vms[0], search_opts=None, all_tenants=None)
print '**************8 %s ' % vms
class Test(object):
def Update(self, value, msg):
print value, msg
def WorkFinished(self, msg):
print msg
def Finish(self):
pass
dlg = Test()
thread = LaunchThread(vm, dlg)
thread.start()
app.MainLoop()
thread.join()
for key in IP_TRANSLATE_TABLE.keys():
print key, '->', IP_TRANSLATE_TABLE[key]
| identifier_name | ||
Console.py | %s was not allowed screen_broadcast by server", self.vm.name)
if usb:
arg_usb = " --spice-usbredir-auto-redirect-filter='-1,-1,-1-1,1' --spice-usbredir-redirect-on-connect='-1,-1,-1,-1,1' "
else:
arg_usb = "--spice-disable-usbredir"
if broadcast:
arg_bro = " --teacher "
else:
arg_bro = ""
try:
if len(Main.get_device_connected()) == 1:
arg_screen = ""
else :
w = os.popen("ps -ef | grep remote*", 'r')
ww = w.readlines()
w.close()
if len(ww) == 2:
arg_screen = "--extend"
else:
arg_screen = ""
except Exception as e:
Logger.error("extend screen failed: %s",e)
'''
try:
spice_secure = havclient.get_spice_secure(AdminShadow['admins'], self.vm, self.p_id)
if spice_secure:
try:
filename = '/tmp/ca-cert.pem'
addrip= Setting.getServer()
url = ''.join(["http://", addrip, ":5009/ca-cert.pem"])
info = urllib2.urlopen(url, timeout = 1)
f = open(filename, 'w')
while True:
buf = info.read(4096)
f.write(buf)
if buf == '':
break
Logger.info("Download ca-cert.pem succeed.")
finally:
info.close()
f.close()
arg_sec = '--spice-ca-file=/tmp/ca-cert.pem --spice-secure-channels=main,inputs --spice-host-subject="C=IL, L=Raanana, O=Red Hat, CN=my server"'
else:
arg_sec = ""
except Exception as e:
Logger.error('Get spice secure failed: %s' % str(e))
'''
arg = ' '.join([arg_usb, arg_bro, arg_screen])
return arg
def get_rdpsetting(self):
if Setting.getAllow_device().lower() == 'true':
arg_device = '/drive:USB,/run/media/root '
else:
arg_device = ''
#if Setting.getAuto_connect().lower() == 'true':
# arg_auto = '+auto-reconnect '
#else:
# arg_auto = ''
#if Setting.getHeadset_micro().lower() == 'true':
# arg_head_mic = '/sound:sys:pulse /microphone:sys:pulse '
#else:
# arg_head_mic = ''
if Setting.getRemotefx().lower() == 'true':
arg_rem = '/codec-cache:jpeg /gfx '
else:
arg_rem = ''
if Setting.getPublic().lower() == 'true':
arg_net = '/network:wan '
else:
arg_net = '/network:lan '
arg = ' '.join([arg_device, arg_rem, arg_net])
return arg
def stop(self):
self.cancel = True
def run(self):
import pdb
#pdb.set_trace()
try:
count = 0
win = self.window
wx.CallAfter(win.Update, 1, u'更新桌面云状态...')
if self.cancel:
return
if self.Type == 'JSP' or self.Type == 'UNKNOWN':
#import pdb
#pdb.set_trace()
vminfo = havclient.server_port(AdminShadow['admins'], self.p_id, self.vm, search_opts=None, all_tenants=None)
passwd = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#vmcipher = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#passwd = vmcipher.cipher
vmconsole = consoleInfo.ConsoleInfo(vminfo['console'])
host = vmconsole.host
port = vmconsole.port
dsport = vmconsole.tlsport
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s", public_ip)
dport=port
if public_ip == "false":
dhost=Wfile.Parsing_hosts(host)
try:
os.environ["SPICE_PROXY"] = ""
except:
Logger.error("Configure the environment variables failed!")
pass
elif public_ip == "true":
dhost=host
proxy_status, proxy_port = SpicePorxyRequests()
if proxy_status:
try:
os.environ["SPICE_PROXY"] = "http://%s:%s" % (Setting.getServer(),proxy_port)
except:
Logger.error("Configure the environment variables failed!")
else:
Util.MessageBox(self, '连接失败!\n\n系统管理员未设置公网代理!', u'错误', wx.OK | wx.ICON_ERROR)
return
wx.CallAfter(win.WorkFinished, u'获取桌面云信息... 成功')
wx.CallAfter(win.Update, 1, u'打开桌面云...')
argument = self.get_argument()
#cmdline = 'scdaemon spice://%s/?port=%s\&tls-port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,dsport,passwd,self.vm.name) + argument
cmdline = 'scdaemon spice://%s/?port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,passwd,self.vm.name) + argument
Logger.info("Spice Cmd:%s",cmdline)
elif self.Type == 'RDP':
user = Setting.getUser()
Logger.info("The user of VM is %s",user)
password = Setting.getCipher()
Logger.info("The password of user is %s",password)
ip = havclient.get_vm_ip(self.vm)
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s" , public_ip)
argu = self.get_rdpsetting()
if public_ip == 'false':
ipaddress = ip
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
elif public_ip == 'true':
ip, port = Wfile.getmapping_prot(ip, '3389')
ipaddress = ip + ':' + port
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
Logger.info("Rdp Cmd:%s",cmdline)
body={}
vmlogintime = Setting.getVMLoginTime()
clientip = Setting.getClientIP()
body['client_time'] = vmlogintime
body['client_ip'] = clientip
# try:
# havclient.connect_client_info(AdminShadow['admins'], self.p_id, self.vm.id, body)
# except:
# Logger.error("Send vmlogininfo failed!")
Logger.debug(cmdline)
ret = Util.RunConnectGuestWithLog(cmdline)
if self.cancel:
return
wx.CallAfter(win.WorkFinished, u'打开桌面云... 成功')
wx.CallAfter(win.Finish)
name=self.vm.name
desktop=name.encode('utf-8')
Setting.setDesktop(desktop)
Setting.save()
RestartDeviceRequests()
SaveVmLoginInfo()
self.stop()
self.ret = ret
except Exception, e:
self.ret = 1
self.msg = "%s : %s" %(SPICE_ERR_TABLE[1], e)
Logger.debug("Console Thread Ended!")
#loadIPConfig()
if __name__ == '__main__':
app = wx.PySimpleApp()
auth_url = 'http://192.168.8.150:5000/v2.0'
user = 'admin'
password = '123'
Session.login(auth_url, user, password, tenant=None, otp=None)
vms = havclient.vm_list(Session.User, search_opts=None, all_tenants=None)
vm = havclient.server_port(Session.User, vms[0], search_opts=None, all_tenants=None)
print '**************8 %s ' % vms
class Test(object):
def Update(self, value, msg):
print value, msg
def WorkFinished(self, msg):
print msg
def Finish(self):
pass
dlg = Test()
thread = LaunchThread(vm, dlg)
thread.start()
app.MainLoop()
thread.join() | random_line_split | ||
Console.py | 接字失败",
u"发送失败",
u"接收失败",
u"SSL过程失败",
u"内存不足",
u"代理超时",
u"代理错误",
u"版本匹配失败",
u"权限不足",
u"无效的参数",
u"命令行错误",
)
CA_DOWNLOAD_CACHE = []
RDP_PORT = 3389
def loadIPConfig():
if not os.access(IP_TRANSLATE_FILE, os.F_OK):
with open(IP_TRANSLATE_FILE, "w") as file:
file.write('# IP Translate File\n')
file.write('# Source IP Destination IP\n')
file.write('# \n')
file.write('# eg.\n')
file.write('# 10.10.10.1 100.10.10.1\n')
return
with open(IP_TRANSLATE_FILE, "r") as file:
lines = file.readlines()
for line in lines:
# skip comment line
if line.startswith('#') :
continue
splits = line.strip().split()
if len(splits) >= 2:
IP_TRANSLATE_TABLE[splits[0]] = splits[1]
class LaunchThread(threading.Thread):
def __init__(self, p_id, vm, Type, window):
threading.Thread.__init__(self)
self.window = window
self.p_id = p_id
self.vm = vm
self.Type = Type
self.cancel = False
self.ret = -1
self.msg = ""
def get_argument(self):
try:
control = havclient.get_control(AdminShadow['admins'], self.vm, self.p_id)
except :
Logger.error("Get control failed!")
try:
usb = control.get('usb', None)
Logger.info("The %s USB_policy was allowde by server." , self.vm.name)
except :
usb = False
Logger.error('The usb_policy has not been provided!')
try:
broadcast = control.get("allow_screen_broadcast", None)
Logger.info("The %s was allowed screen_broadcast by server", self.vm.name)
except:
Logger.error("The %s was not allowed screen_broadcast by server", self.vm.name)
if usb:
arg_usb = " --spice-usbredir-auto-redirect-filter='-1,-1,-1-1,1' --spice-usbredir-redirect-on-connect='-1,-1,-1,-1,1' "
else:
arg_usb = "--spice-disable-usbredir"
if broadcast:
arg_bro = " --teacher "
else:
arg_bro = ""
try:
if len(Main.get_device_connected()) == 1:
arg_screen = ""
else :
w = os.popen("ps -ef | grep remote*", 'r')
ww = w.readlines()
w.close()
if len(ww) == 2:
arg_screen = "--extend"
else:
arg_screen = ""
except Exception as e:
Logger.error("extend screen failed: %s",e)
'''
try:
spice_secure = havclient.get_spice_secure(AdminShadow['admins'], self.vm, self.p_id)
if spice_secure:
try:
filename = '/tmp/ca-cert.pem'
addrip= Setting.getServer()
url = ''.join(["http://", addrip, ":5009/ca-cert.pem"])
info = urllib2.urlopen(url, timeout = 1)
f = open(filename, 'w')
while True:
buf = info.read(4096)
f.write(buf)
if buf == '':
break
Logger.info("Download ca-cert.pem succeed.")
finally:
info.close()
f.close()
arg_sec = '--spice-ca-file=/tmp/ca-cert.pem --spice-secure-channels=main,inputs --spice-host-subject="C=IL, L=Raanana, O=Red Hat, CN=my server"'
else:
arg_sec = ""
except Exception as e:
Logger.error('Get spice secure failed: %s' % str(e))
'''
arg = ' '.join([arg_usb, arg_bro, arg_screen])
return arg
def get_rdpsetting(self):
if Setting.getAllow_device().lower() == 'true':
arg_device = '/drive:USB,/run/media/root '
else:
arg_device = ''
#if Setting.getAuto_connect().lower() == 'true':
# arg_auto = '+auto-reconnect '
#else:
# arg_auto = ''
#if Setting.getHeadset_micro().lower() == 'true':
# arg_head_mic = '/sound:sys:pulse /microphone:sys:pulse '
#else:
# arg_head_mic = ''
if Setting.getRemotefx().lower() == 'true':
arg_rem = '/codec-cache:jpeg /gfx '
else:
arg_rem = ''
if Setting.getPublic().lower() == 'true':
arg_net = '/network:wan '
else:
arg_net = '/network:lan '
arg = ' '.join([arg_device, arg_rem, arg_net])
return arg
def stop(self):
self.cancel = True
def run(self):
import pdb
| try:
count = 0
win = self.window
wx.CallAfter(win.Update, 1, u'更新桌面云状态...')
if self.cancel:
return
if self.Type == 'JSP' or self.Type == 'UNKNOWN':
#import pdb
#pdb.set_trace()
vminfo = havclient.server_port(AdminShadow['admins'], self.p_id, self.vm, search_opts=None, all_tenants=None)
passwd = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#vmcipher = havclient.get_cipher(FirstUser['firstuser'], self.p_id, self.vm)
#passwd = vmcipher.cipher
vmconsole = consoleInfo.ConsoleInfo(vminfo['console'])
host = vmconsole.host
port = vmconsole.port
dsport = vmconsole.tlsport
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s", public_ip)
dport=port
if public_ip == "false":
dhost=Wfile.Parsing_hosts(host)
try:
os.environ["SPICE_PROXY"] = ""
except:
Logger.error("Configure the environment variables failed!")
pass
elif public_ip == "true":
dhost=host
proxy_status, proxy_port = SpicePorxyRequests()
if proxy_status:
try:
os.environ["SPICE_PROXY"] = "http://%s:%s" % (Setting.getServer(),proxy_port)
except:
Logger.error("Configure the environment variables failed!")
else:
Util.MessageBox(self, '连接失败!\n\n系统管理员未设置公网代理!', u'错误', wx.OK | wx.ICON_ERROR)
return
wx.CallAfter(win.WorkFinished, u'获取桌面云信息... 成功')
wx.CallAfter(win.Update, 1, u'打开桌面云...')
argument = self.get_argument()
#cmdline = 'scdaemon spice://%s/?port=%s\&tls-port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,dsport,passwd,self.vm.name) + argument
cmdline = 'scdaemon spice://%s/?port=%s\&password=%s --hotkeys=exit-app=shift+f12,release-cursor=shift+f11 -f --title="%s"' % (dhost,dport,passwd,self.vm.name) + argument
Logger.info("Spice Cmd:%s",cmdline)
elif self.Type == 'RDP':
user = Setting.getUser()
Logger.info("The user of VM is %s",user)
password = Setting.getCipher()
Logger.info("The password of user is %s",password)
ip = havclient.get_vm_ip(self.vm)
public_ip = Setting.getPublic().lower()
Logger.info("The public_ip status is %s" , public_ip)
argu = self.get_rdpsetting()
if public_ip == 'false':
ipaddress = ip
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:pulse -f ' % (user, password, ipaddress) + argu
elif public_ip == 'true':
ip, port = Wfile.getmapping_prot(ip, '3389')
ipaddress = ip + ':' + port
Logger.info("The ipaddress of user is %s", ipaddress)
cmdline = 'xfreerdp /u:%s /p:%s /v:%s /cert-ignore -sec-tls /bpp:32 /gdi:hw /multimedia:decoder:gstreamer +auto-reconnect /sound:sys:pulse /microphone:sys:p | #pdb.set_trace()
| conditional_block |
products.js | with a spare strap. "
);
let p6 = new Product(
"Tommy Hilfiger",
1595,
"<img src='./../img/tommyblack.jpg'/>",
"Make a premium mark of style on your outfit with a timeless Tommy Hilfiger watch. Whether you're looking for classic leather, smart stainless steel or dressy silver watch, why not indulge in our range of men's and ladies watches and discover luxurious high fashion, sophistication in enduring designs that will inevitably stand the test of time."
);
let p7 = new Product(
"Emporio Armani",
2745,
"<img src='./../img/armanisilver.jpg'/>",
"Emporio Armani, as a brand, is known for its ready-to-wear and runway collections. It focuses on current trends and modern traits."
);
let p8 = new Product(
"Fossil",
1295,
"<img src='./../img/fossilsilver.jpg'/>",
"This watch really does remind me of Omega’s moonwatch. It’s not a direct homage but is certainly very similar in its styling. And really, it’s the appearance that attracted me to this watch. It is a mid-sized, racing chronograph with real retro charm."
);
let p9 = new Product(
"Thomas Sabo",
2625,
"<img src='./../img/thomasblack.jpg'/>",
"Black and silver men's watch: The Statement chronograph from THOMAS SABO convinces with a rebellious design. The dial of the dial, the ring of the case, the crown and the buttons of the case and also the detailed bracelet in stainless steel attract the eye thanks to its pyramid design and typically THOMAS SABO DNA."
);
let p10 = new Product(
"Braun",
2495,
"<img src='./../img/braunblack.jpg'/>",
"A collaborative design project that partners German design with British fashion. Braun and Paul Smith have teamed up on a limited-edition clock and watch project. Paul Smith is one of Britain’s foremost designers, renowned for his creative spirit, which combines tradition and modernity."
);
let p11 = new Product(
"Omega",
16305,
"<img src='./../img/omega.jpg'/>",
" The OMEGA Speedmaster is one of OMEGA’s most iconic timepieces. Having been a part of all six lunar missions, the legendary Speedmaster is an impressive representation of the brand’s adventurous pioneering spirit."
);
let p12 = new Product(
"Oris",
52900,
"<img src='./../img/oris.jpg'/>",
"The Aquis collection of diver’s watches offers real-world functionality in an innovative package, through a combination of considered design, high-performance materials and fit-for-purpose functions."
);
$(function () {
addProduct();
createProduct();
getFromLocalStorage();
updateCartTotalPrice();
notice();
let select = $(".form-control");
$(select).on("change", function (e) {
console.log($(this).val());
if ($(this).val() == "l2h") {
products.sort((a, b) => {
if (a.price > b.price) {
return 1;
}
if (a.price < b.price) {
return -1;
}
return 0;
});
createProduct();
} else if ($(this).val() == "h2l") {
products.reverse();
}
createProduct();
});
$("#buyButton").on("click", function () {
if (cartProducts.length <= 0) {
alert("Shopping cart is empty");
} else {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts.length >= 0) {
window.location.href = "./../html/checkout.html";
}
}
}
});
$("#dialog").dialog({
autoOpen: false,
position: { my: "right top", at: "right top", of: window },
show: {
effect: "blind",
duration: 1000,
},
hide: {
effect: "blind",
duration: 1000,
},
});
$("#opener").on("click", function () {
if (!$("#dialog").dialog("isOpen")) {
$("#dialog").dialog("open");
} else {
$("#dialog").dialog("close");
}
});
});
function addProduct() {
products.push(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12);
}
function createProduct() {
let productlist = document.getElementById("product-container");
productlist.innerHTML = "";
$.each(products, (i, product) => {
let container = $("<div>").addClass("product").attr("id", product.id);
$("<div>").addClass("image").html(product.image).appendTo(container);
$("<h3>").html(product.name).appendTo(container);
$("<p>")
.addClass("price")
.html(product.price + " " + "SEK")
.appendTo(container);
$("<a>")
.attr("href", "javascript:;")
.addClass("show_hide")
.attr("data-content", "toggle-text")
.attr("id", "atag")
.text("Read More")
.appendTo(container);
$("<p>")
.addClass("description")
.html(product.description)
.appendTo(container);
let addToCartButtons = $("<button>Add to Cart</button>")
.addClass("AddToCartButton")
.appendTo(container);
addToCartButtons.on("click", () => {
clickedAddToCart(products[i]);
});
container.appendTo($("#product-container"));
});
$(".description").hide();
$(".show_hide").on("click", function () {
let txt = $(".description").is(":visible") ? "Read More" : "Read Less";
$(".show_hide").text(txt);
$(this).next(".description").slideToggle(200);
});
}
function createShoppingCart() {
let shoppingcart = document.getElementById("shoppingCart-container");
shoppingcart.innerHTML = "";
$.each(cartProducts, (i, cartProduct) => {
let shoppingCartContainer = $("<div>")
.addClass("cartproduct")
.attr("id", cartProducts[i].product.id);
$("<div>")
.addClass("image")
.html(cartProducts[i].product.image)
.appendTo(shoppingCartContainer);
$("<h3>")
.html(cartProducts[i].product.name)
.appendTo(shoppingCartContainer);
$("<p>")
.html(cartProducts[i].product.price + " " + "SEK")
.appendTo(shoppingCartContainer);
let deleteButton = $("<button>Delete</button>")
.addClass("deleteButton")
.appendTo(shoppingCartContainer);
deleteButton.on("click", () => {
deleteCartProduct(cartProducts[i]);
});
let counterdiv = $("<div>")
.addClass("counterdiv")
.appendTo(shoppingCartContainer);
let displayCounter = $("<div>").addClass("counter").appendTo(counterdiv);
$("<p>")
.addClass("activeCount")
.html(cartProducts[i].qty)
.appendTo(displayCounter);
let minus = $("<button>-</button>")
.html('<i class="fas fa-minus-circle"></i>')
.addClass("subbtn")
.on("click", () => {
subtractOneProduct(cartProducts[i]);
});
minus.appendTo(counterdiv);
let add = $("<button>+</button>")
.addClass("addbtn")
.html('<i class="fas fa-plus-circle"></i>')
.on("click", () => {
addOneProduct(cartProducts[i]);
});
add.appendTo(counterdiv);
shoppingCartContainer.appendTo($("#shoppingCart-container"));
});
}
function addOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty++;
createShoppingCart();
}
if (cartProducts[i].qty > 1) {
let tempsum = cartProducts[i].qty * 1;
let total = tempsum * parseInt(cartProducts[i].product.price);
listOfTotal.push(total);
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
} else {
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
}
function subtractOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty--;
}
if (cartProducts[i].qty < 1) {
cartProducts.splice(i, 1);
cartProduct.product.inCart = false;
}
createShoppingCart();
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
function deleteCartProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts.splice(i, 1);
cartProduct.product.inCart = false;
}
createShoppingCart();
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts); | }
}
| random_line_split | |
products.js | src='./../img/timexbrown.jpg'/>",
"Metropolitan R has all the functionality you need including up to 2 weeks of battery life. This sleek design boasts an impressive AMOLED display that you can customize with over 20+ dial designs. Plus, 24/7 activity and sleep tracking for your health and fitness goals, on-board GPS, optical heart rate sensor, notifications and much more. This is the perfect smartwatch to fit your busy lifestyle. "
);
let p4 = new Product(
"Longines",
10725,
"<img src='./../img/longinessilver.jpg'/>",
"Conquest 24 has a simple yet appealing design. It features a robust package, with a 41 mm wide round steel case (with a screw-in back and a screw-in crown with protection) and a three-link bracelet (with the thickest central row) of the same material. The dial is available in three classic colors: black, silvered and blue; and it is dominated with two Roman numerals, a 24-hour scale and its attached red hand. For improved visibility, hands, numerals and indices are coated with luminescent SuperLuminova material."
);
let p5 = new Product(
"Hunters Race",
2495,
"<img src='./../img/huntersbrown.jpg'/>",
"Hunters Race is a curated collection of the finest timepieces designed in Auckland, New Zealand by father and son team Scott and Jack Ramsay. Scott has over thirty years experience in the watch industry and ensures that their watches are of the highest quality, along with providing a very professional back up service. The watch has a 12 month guarantee and comes in a beautiful gift box along with a spare strap. "
);
let p6 = new Product(
"Tommy Hilfiger",
1595,
"<img src='./../img/tommyblack.jpg'/>",
"Make a premium mark of style on your outfit with a timeless Tommy Hilfiger watch. Whether you're looking for classic leather, smart stainless steel or dressy silver watch, why not indulge in our range of men's and ladies watches and discover luxurious high fashion, sophistication in enduring designs that will inevitably stand the test of time."
);
let p7 = new Product(
"Emporio Armani",
2745,
"<img src='./../img/armanisilver.jpg'/>",
"Emporio Armani, as a brand, is known for its ready-to-wear and runway collections. It focuses on current trends and modern traits."
);
let p8 = new Product(
"Fossil",
1295,
"<img src='./../img/fossilsilver.jpg'/>",
"This watch really does remind me of Omega’s moonwatch. It’s not a direct homage but is certainly very similar in its styling. And really, it’s the appearance that attracted me to this watch. It is a mid-sized, racing chronograph with real retro charm."
);
let p9 = new Product(
"Thomas Sabo",
2625,
"<img src='./../img/thomasblack.jpg'/>",
"Black and silver men's watch: The Statement chronograph from THOMAS SABO convinces with a rebellious design. The dial of the dial, the ring of the case, the crown and the buttons of the case and also the detailed bracelet in stainless steel attract the eye thanks to its pyramid design and typically THOMAS SABO DNA."
);
let p10 = new Product(
"Braun",
2495,
"<img src='./../img/braunblack.jpg'/>",
"A collaborative design project that partners German design with British fashion. Braun and Paul Smith have teamed up on a limited-edition clock and watch project. Paul Smith is one of Britain’s foremost designers, renowned for his creative spirit, which combines tradition and modernity."
);
let p11 = new Product(
"Omega",
16305,
"<img src='./../img/omega.jpg'/>",
" The OMEGA Speedmaster is one of OMEGA’s most iconic timepieces. Having been a part of all six lunar missions, the legendary Speedmaster is an impressive representation of the brand’s adventurous pioneering spirit."
);
let p12 = new Product(
"Oris",
52900,
"<img src='./../img/oris.jpg'/>",
"The Aquis collection of diver’s watches offers real-world functionality in an innovative package, through a combination of considered design, high-performance materials and fit-for-purpose functions."
);
$(function () {
addProduct();
createProduct();
getFromLocalStorage();
updateCartTotalPrice();
notice();
let select = $(".form-control");
$(select).on("change", function (e) {
console.log($(this).val());
if ($(this).val() == "l2h") {
products.sort((a, b) => {
if (a.price > b.price) {
return 1;
}
if (a.price < b.price) {
return -1;
}
return 0;
});
createProduct();
} else if ($(this).val() == "h2l") {
products.reverse();
}
createProduct();
});
$("#buyButton").on("click", function () {
if (cartProducts.length <= 0) {
alert("Shopping cart is empty");
} else {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts.length >= 0) {
window.location.href = "./../html/checkout.html";
}
}
}
});
$("#dialog").dialog({
autoOpen: false,
position: { my: "right top", at: "right top", of: window },
show: {
effect: "blind",
duration: 1000,
},
hide: {
effect: "blind",
duration: 1000,
},
});
$("#opener").on("click", function () {
if (!$("#dialog").dialog("isOpen")) {
$("#dialog").dialog("open");
} else {
$("#dialog").dialog("close");
}
});
});
function addProduct() {
products.push(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12);
}
function createProduct() {
let product | .attr("id", "atag")
.text("Read More")
.appendTo(container);
$("<p>")
.addClass("description")
.html(product.description)
.appendTo(container);
let addToCartButtons = $("<button>Add to Cart</button>")
.addClass("AddToCartButton")
.appendTo(container);
addToCartButtons.on("click", () => {
clickedAddToCart(products[i]);
});
container.appendTo($("#product-container"));
});
$(".description").hide();
$(".show_hide").on("click", function () {
let txt = $(".description").is(":visible") ? "Read More" : "Read Less";
$(".show_hide").text(txt);
$(this).next(".description").slideToggle(200);
});
}
function crea
teShoppingCart() {
let shoppingcart = document.getElementById("shoppingCart-container");
shoppingcart.innerHTML = "";
$.each(cartProducts, (i, cartProduct) => {
let shoppingCartContainer = $("<div>")
.addClass("cartproduct")
.attr("id", cartProducts[i].product.id);
$("<div>")
.addClass("image")
.html(cartProducts[i].product.image)
.appendTo(shoppingCartContainer);
$("<h3>")
.html(cartProducts[i].product.name)
.appendTo(shoppingCartContainer);
$("<p>")
.html(cartProducts[i].product.price + " " + "SEK")
.appendTo(shoppingCartContainer);
let deleteButton = $("<button>Delete</button>")
.addClass("deleteButton")
.appendTo(shoppingCartContainer);
deleteButton.on("click", () => {
deleteCartProduct(cartProducts[i]);
});
let counterdiv = $("<div>")
.addClass("counterdiv")
.appendTo(shoppingCartContainer);
let displayCounter = $("<div>").addClass("counter").appendTo(counterdiv);
$("<p>")
.addClass("activeCount")
.html(cartProducts[i].qty)
.appendTo(displayCounter);
let minus = $("<button>-</button>")
.html('<i class="fas fa-minus-circle"></i>')
.addClass("subbtn")
.on("click", () => {
subtractOneProduct(cartProducts[i]);
});
minus.appendTo(counterdiv);
let add = $("<button>+</button>")
.addClass("addbtn")
.html('<i class="fas fa-plus-circle | list = document.getElementById("product-container");
productlist.innerHTML = "";
$.each(products, (i, product) => {
let container = $("<div>").addClass("product").attr("id", product.id);
$("<div>").addClass("image").html(product.image).appendTo(container);
$("<h3>").html(product.name).appendTo(container);
$("<p>")
.addClass("price")
.html(product.price + " " + "SEK")
.appendTo(container);
$("<a>")
.attr("href", "javascript:;")
.addClass("show_hide")
.attr("data-content", "toggle-text") | identifier_body |
products.js | Race is a curated collection of the finest timepieces designed in Auckland, New Zealand by father and son team Scott and Jack Ramsay. Scott has over thirty years experience in the watch industry and ensures that their watches are of the highest quality, along with providing a very professional back up service. The watch has a 12 month guarantee and comes in a beautiful gift box along with a spare strap. "
);
let p6 = new Product(
"Tommy Hilfiger",
1595,
"<img src='./../img/tommyblack.jpg'/>",
"Make a premium mark of style on your outfit with a timeless Tommy Hilfiger watch. Whether you're looking for classic leather, smart stainless steel or dressy silver watch, why not indulge in our range of men's and ladies watches and discover luxurious high fashion, sophistication in enduring designs that will inevitably stand the test of time."
);
let p7 = new Product(
"Emporio Armani",
2745,
"<img src='./../img/armanisilver.jpg'/>",
"Emporio Armani, as a brand, is known for its ready-to-wear and runway collections. It focuses on current trends and modern traits."
);
let p8 = new Product(
"Fossil",
1295,
"<img src='./../img/fossilsilver.jpg'/>",
"This watch really does remind me of Omega’s moonwatch. It’s not a direct homage but is certainly very similar in its styling. And really, it’s the appearance that attracted me to this watch. It is a mid-sized, racing chronograph with real retro charm."
);
let p9 = new Product(
"Thomas Sabo",
2625,
"<img src='./../img/thomasblack.jpg'/>",
"Black and silver men's watch: The Statement chronograph from THOMAS SABO convinces with a rebellious design. The dial of the dial, the ring of the case, the crown and the buttons of the case and also the detailed bracelet in stainless steel attract the eye thanks to its pyramid design and typically THOMAS SABO DNA."
);
let p10 = new Product(
"Braun",
2495,
"<img src='./../img/braunblack.jpg'/>",
"A collaborative design project that partners German design with British fashion. Braun and Paul Smith have teamed up on a limited-edition clock and watch project. Paul Smith is one of Britain’s foremost designers, renowned for his creative spirit, which combines tradition and modernity."
);
let p11 = new Product(
"Omega",
16305,
"<img src='./../img/omega.jpg'/>",
" The OMEGA Speedmaster is one of OMEGA’s most iconic timepieces. Having been a part of all six lunar missions, the legendary Speedmaster is an impressive representation of the brand’s adventurous pioneering spirit."
);
let p12 = new Product(
"Oris",
52900,
"<img src='./../img/oris.jpg'/>",
"The Aquis collection of diver’s watches offers real-world functionality in an innovative package, through a combination of considered design, high-performance materials and fit-for-purpose functions."
);
$(function () {
addProduct();
createProduct();
getFromLocalStorage();
updateCartTotalPrice();
notice();
let select = $(".form-control");
$(select).on("change", function (e) {
console.log($(this).val());
if ($(this).val() == "l2h") {
products.sort((a, b) => {
if (a.price > b.price) {
return 1;
}
if (a.price < b.price) {
return -1;
}
return 0;
});
createProduct();
} else if ($(this).val() == "h2l") {
products.reverse();
}
createProduct();
});
$("#buyButton").on("click", function () {
if (cartProducts.length <= 0) {
alert("Shopping cart is empty");
} else {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts.length >= 0) {
window.location.href = "./../html/checkout.html";
}
}
}
});
$("#dialog").dialog({
autoOpen: false,
position: { my: "right top", at: "right top", of: window },
show: {
effect: "blind",
duration: 1000,
},
hide: {
effect: "blind",
duration: 1000,
},
});
$("#opener").on("click", function () {
if (!$("#dialog").dialog("isOpen")) {
$("#dialog").dialog("open");
} else {
$("#dialog").dialog("close");
}
});
});
function addProduct() {
products.push(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12);
}
function createProduct() {
let productlist = document.getElementById("product-container");
productlist.innerHTML = "";
$.each(products, (i, product) => {
let container = $("<div>").addClass("product").attr("id", product.id);
$("<div>").addClass("image").html(product.image).appendTo(container);
$("<h3>").html(product.name).appendTo(container);
$("<p>")
.addClass("price")
.html(product.price + " " + "SEK")
.appendTo(container);
$("<a>")
.attr("href", "javascript:;")
.addClass("show_hide")
.attr("data-content", "toggle-text")
.attr("id", "atag")
.text("Read More")
.appendTo(container);
$("<p>")
.addClass("description")
.html(product.description)
.appendTo(container);
let addToCartButtons = $("<button>Add to Cart</button>")
.addClass("AddToCartButton")
.appendTo(container);
addToCartButtons.on("click", () => {
clickedAddToCart(products[i]);
});
container.appendTo($("#product-container"));
});
$(".description").hide();
$(".show_hide").on("click", function () {
let txt = $(".description").is(":visible") ? "Read More" : "Read Less";
$(".show_hide").text(txt);
$(this).next(".description").slideToggle(200);
});
}
function createShoppingCart() {
let shoppingcart = document.getElementById("shoppingCart-container");
shoppingcart.innerHTML = "";
$.each(cartProducts, (i, cartProduct) => {
let shoppingCartContainer = $("<div>")
.addClass("cartproduct")
.attr("id", cartProducts[i].product.id);
$("<div>")
.addClass("image")
.html(cartProducts[i].product.image)
.appendTo(shoppingCartContainer);
$("<h3>")
.html(cartProducts[i].product.name)
.appendTo(shoppingCartContainer);
$("<p>")
.html(cartProducts[i].product.price + " " + "SEK")
.appendTo(shoppingCartContainer);
let deleteButton = $("<button>Delete</button>")
.addClass("deleteButton")
.appendTo(shoppingCartContainer);
deleteButton.on("click", () => {
deleteCartProduct(cartProducts[i]);
});
let counterdiv = $("<div>")
.addClass("counterdiv")
.appendTo(shoppingCartContainer);
let displayCounter = $("<div>").addClass("counter").appendTo(counterdiv);
$("<p>")
.addClass("activeCount")
.html(cartProducts[i].qty)
.appendTo(displayCounter);
let minus = $("<button>-</button>")
.html('<i class="fas fa-minus-circle"></i>')
.addClass("subbtn")
.on("click", () => {
subtractOneProduct(cartProducts[i]);
});
minus.appendTo(counterdiv);
let add = $("<button>+</button>")
.addClass("addbtn")
.html('<i class="fas fa-plus-circle"></i>')
.on("click", () => {
addOneProduct(cartProducts[i]);
});
add.appendTo(counterdiv);
shoppingCartContainer.appendTo($("#shoppingCart-container"));
});
}
function addOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty++;
createShoppingCart();
}
if (cartProducts[i].qty > 1) {
let tempsum = cartProducts[i].qty * 1;
let total = tempsum * parseInt(cartProducts[i].product.price);
listOfTotal.push(total);
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
} else {
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
}
function subtractOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty--;
}
if (cartProducts[i].qty < 1) {
cartProducts.splice(i, 1);
cartProduct.product.inCart = false;
}
createShoppingCart();
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
function deleteCartProdu | ct(cartProduct) { | identifier_name | |
products.js | ",
2495,
"<img src='./../img/huntersbrown.jpg'/>",
"Hunters Race is a curated collection of the finest timepieces designed in Auckland, New Zealand by father and son team Scott and Jack Ramsay. Scott has over thirty years experience in the watch industry and ensures that their watches are of the highest quality, along with providing a very professional back up service. The watch has a 12 month guarantee and comes in a beautiful gift box along with a spare strap. "
);
let p6 = new Product(
"Tommy Hilfiger",
1595,
"<img src='./../img/tommyblack.jpg'/>",
"Make a premium mark of style on your outfit with a timeless Tommy Hilfiger watch. Whether you're looking for classic leather, smart stainless steel or dressy silver watch, why not indulge in our range of men's and ladies watches and discover luxurious high fashion, sophistication in enduring designs that will inevitably stand the test of time."
);
let p7 = new Product(
"Emporio Armani",
2745,
"<img src='./../img/armanisilver.jpg'/>",
"Emporio Armani, as a brand, is known for its ready-to-wear and runway collections. It focuses on current trends and modern traits."
);
let p8 = new Product(
"Fossil",
1295,
"<img src='./../img/fossilsilver.jpg'/>",
"This watch really does remind me of Omega’s moonwatch. It’s not a direct homage but is certainly very similar in its styling. And really, it’s the appearance that attracted me to this watch. It is a mid-sized, racing chronograph with real retro charm."
);
let p9 = new Product(
"Thomas Sabo",
2625,
"<img src='./../img/thomasblack.jpg'/>",
"Black and silver men's watch: The Statement chronograph from THOMAS SABO convinces with a rebellious design. The dial of the dial, the ring of the case, the crown and the buttons of the case and also the detailed bracelet in stainless steel attract the eye thanks to its pyramid design and typically THOMAS SABO DNA."
);
let p10 = new Product(
"Braun",
2495,
"<img src='./../img/braunblack.jpg'/>",
"A collaborative design project that partners German design with British fashion. Braun and Paul Smith have teamed up on a limited-edition clock and watch project. Paul Smith is one of Britain’s foremost designers, renowned for his creative spirit, which combines tradition and modernity."
);
let p11 = new Product(
"Omega",
16305,
"<img src='./../img/omega.jpg'/>",
" The OMEGA Speedmaster is one of OMEGA’s most iconic timepieces. Having been a part of all six lunar missions, the legendary Speedmaster is an impressive representation of the brand’s adventurous pioneering spirit."
);
let p12 = new Product(
"Oris",
52900,
"<img src='./../img/oris.jpg'/>",
"The Aquis collection of diver’s watches offers real-world functionality in an innovative package, through a combination of considered design, high-performance materials and fit-for-purpose functions."
);
$(function () {
addProduct();
createProduct();
getFromLocalStorage();
updateCartTotalPrice();
notice();
let select = $(".form-control");
$(select).on("change", function (e) {
console.log($(this).val());
if ($(this).val() == "l2h") {
products.sort((a, b) => {
if (a.price > b.price) {
return 1;
}
if (a.price < b.price) {
return -1;
}
return 0;
});
createProduct();
} else if ($(this).val() == "h2l") {
products.reverse();
}
createProduct();
});
$("#buyButton").on("click", function () {
if (cartProducts.length <= 0) {
alert("Shopping cart is empty");
} else {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts.length >= 0) {
window.location.href = "./../html/checkout.html";
}
}
}
});
$("#dialog").dialog({
autoOpen: false,
position: { my: "right top", at: "right top", of: window },
show: {
effect: "blind",
duration: 1000,
},
hide: {
effect: "blind",
duration: 1000,
},
});
$("#opener").on("click", function () {
if (!$("#dialog").dialog("isOpen")) {
$("#dialog").dialog("open");
} else {
$("#dialog").dialog("close");
}
});
});
function addProduct() {
products.push(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12);
}
function createProduct() {
let productlist = document.getElementById("product-container");
productlist.innerHTML = "";
$.each(products, (i, product) => {
let container = $("<div>").addClass("product").attr("id", product.id);
$("<div>").addClass("image").html(product.image).appendTo(container);
$("<h3>").html(product.name).appendTo(container);
$("<p>")
.addClass("price")
.html(product.price + " " + "SEK")
.appendTo(container);
$("<a>")
.attr("href", "javascript:;")
.addClass("show_hide")
.attr("data-content", "toggle-text")
.attr("id", "atag")
.text("Read More")
.appendTo(container);
$("<p>")
.addClass("description")
.html(product.description)
.appendTo(container);
let addToCartButtons = $("<button>Add to Cart</button>")
.addClass("AddToCartButton")
.appendTo(container);
addToCartButtons.on("click", () => {
clickedAddToCart(products[i]);
});
container.appendTo($("#product-container"));
});
$(".description").hide();
$(".show_hide").on("click", function () {
let txt = $(".description").is(":visible") ? "Read More" : "Read Less";
$(".show_hide").text(txt);
$(this).next(".description").slideToggle(200);
});
}
function createShoppingCart() {
let shoppingcart = document.getElementById("shoppingCart-container");
shoppingcart.innerHTML = "";
$.each(cartProducts, (i, cartProduct) => {
let shoppingCartContainer = $("<div>")
.addClass("cartproduct")
.attr("id", cartProducts[i].product.id);
$("<div>")
.addClass("image")
.html(cartProducts[i].product.image)
.appendTo(shoppingCartContainer);
$("<h3>")
.html(cartProducts[i].product.name)
.appendTo(shoppingCartContainer);
$("<p>")
.html(cartProducts[i].product.price + " " + "SEK")
.appendTo(shoppingCartContainer);
let deleteButton = $("<button>Delete</button>")
.addClass("deleteButton")
.appendTo(shoppingCartContainer);
deleteButton.on("click", () => {
deleteCartProduct(cartProducts[i]);
});
let counterdiv = $("<div>")
.addClass("counterdiv")
.appendTo(shoppingCartContainer);
let displayCounter = $("<div>").addClass("counter").appendTo(counterdiv);
$("<p>")
.addClass("activeCount")
.html(cartProducts[i].qty)
.appendTo(displayCounter);
let minus = $("<button>-</button>")
.html('<i class="fas fa-minus-circle"></i>')
.addClass("subbtn")
.on("click", () => {
subtractOneProduct(cartProducts[i]);
});
minus.appendTo(counterdiv);
let add = $("<button>+</button>")
.addClass("addbtn")
.html('<i class="fas fa-plus-circle"></i>')
.on("click", () => {
addOneProduct(cartProducts[i]);
});
add.appendTo(counterdiv);
shoppingCartContainer.appendTo($("#shoppingCart-container"));
});
}
function addOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty++;
createShoppingCart();
}
if (cartProducts[i].qty > 1) {
let tempsum = cartProducts[i].qty * 1;
let total = tempsum * parseInt(cartProducts[i].product.price);
listOfTotal.push(total);
updateCartTotalPrice();
addToLocalStorage(cartProducts);
notice();
} else {
updateCartTotalPrice();
notice();
addToLocalStorage(cartProducts);
}
}
}
function subtractOneProduct(cartProduct) {
for (let i = 0; i < cartProducts.length; i++) {
if (cartProducts[i].product.id == cartProduct.product.id) {
cartProducts[i].qty--;
}
if (cartProducts[i].qty < 1) {
cartPro | ducts.splice(i, 1);
cartProduct.product.inCart = false;
}
createShop | conditional_block | |
cli.py |
from six.moves import configparser
from six.moves import input
from tqdm import tqdm
from .api import OSF
from .exceptions import UnauthorizedException
from .utils import norm_remote_path, split_storage, makedirs, checksum
def config_from_file():
if os.path.exists(".osfcli.config"):
config_ = configparser.ConfigParser()
config_.read(".osfcli.config")
# for python2 compatibility
config = dict(config_.items('osf'))
else:
config = {}
return config
def config_from_env(config):
username = os.getenv("OSF_USERNAME")
if username is not None:
config['username'] = username
project = os.getenv("OSF_PROJECT")
if project is not None:
config['project'] = project
return config
def | (args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
# Command line options have precedence over environment variables,
# which have precedence over the config file.
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
# still None? We are in trouble
if args.project is None:
sys.exit('You have to specify a project ID via the command line,'
' configuration file or environment variable.')
password = None
if username is not None:
password = os.getenv("OSF_PASSWORD")
# Prompt user when password is not set
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit("Please set a username (run `osf -h` for details).")
else:
sys.exit("You are not authorized to access this project.")
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
# reading existing config file, convert to configparser object
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
# now we can start asking for new values
print('Provide a username for the config file [current username: {}]:'.format(
config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.format(
config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open(".osfcli.config", "w")
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project.
The output directory defaults to the current directory.
If the project is private you need to specify a username.
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, "wb") as f:
file_.write_to(f)
pbar.update()
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit("Local file %s already exists, not overwriting." % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print("Local file %s already matches remote." % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
# only fetching one file so we are done
break
@might_need_auth
def list_(args):
"""List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path))
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To upload a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError("Expected source ({}) to be a directory when "
"using recursive mode.".format(args.source))
# local name of the directory that is being uploaded
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
# build the remote path + fname
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force,
update=args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force,
update=args.update)
@might_need_auth
def remove(args):
"""Remove a file from the project's storage.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) | _get_username | identifier_name |
cli.py |
from six.moves import configparser
from six.moves import input
from tqdm import tqdm
from .api import OSF
from .exceptions import UnauthorizedException
from .utils import norm_remote_path, split_storage, makedirs, checksum
def config_from_file():
if os.path.exists(".osfcli.config"):
config_ = configparser.ConfigParser()
config_.read(".osfcli.config")
# for python2 compatibility
config = dict(config_.items('osf'))
else:
config = {}
return config
def config_from_env(config):
username = os.getenv("OSF_USERNAME")
if username is not None:
config['username'] = username
project = os.getenv("OSF_PROJECT")
if project is not None:
config['project'] = project
return config
def _get_username(args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
# Command line options have precedence over environment variables,
# which have precedence over the config file.
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
# still None? We are in trouble
if args.project is None:
sys.exit('You have to specify a project ID via the command line,'
' configuration file or environment variable.')
password = None
if username is not None:
password = os.getenv("OSF_PASSWORD")
# Prompt user when password is not set
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit("Please set a username (run `osf -h` for details).")
else:
sys.exit("You are not authorized to access this project.")
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
# reading existing config file, convert to configparser object
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
# now we can start asking for new values
print('Provide a username for the config file [current username: {}]:'.format(
config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.format(
config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open(".osfcli.config", "w")
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project.
The output directory defaults to the current directory.
If the project is private you need to specify a username.
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
|
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit("Local file %s already exists, not overwriting." % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print("Local file %s already matches remote." % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
# only fetching one file so we are done
break
@might_need_auth
def list_(args):
"""List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path))
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To upload a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError("Expected source ({}) to be a directory when "
"using recursive mode.".format(args.source))
# local name of the directory that is being uploaded
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
# build the remote path + fname
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force,
update=args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force,
update=args.update)
@might_need_auth
def remove(args):
"""Remove a file from the project's storage.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage | prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, "wb") as f:
file_.write_to(f)
pbar.update() | conditional_block |
cli.py |
from six.moves import configparser
from six.moves import input
from tqdm import tqdm
from .api import OSF
from .exceptions import UnauthorizedException
from .utils import norm_remote_path, split_storage, makedirs, checksum
def config_from_file():
if os.path.exists(".osfcli.config"):
config_ = configparser.ConfigParser()
config_.read(".osfcli.config")
# for python2 compatibility
config = dict(config_.items('osf'))
else:
config = {}
return config
def config_from_env(config):
username = os.getenv("OSF_USERNAME")
if username is not None:
config['username'] = username
project = os.getenv("OSF_PROJECT")
if project is not None:
config['project'] = project
return config
def _get_username(args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
# Command line options have precedence over environment variables,
# which have precedence over the config file.
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
# still None? We are in trouble
if args.project is None:
sys.exit('You have to specify a project ID via the command line,'
' configuration file or environment variable.')
password = None
if username is not None:
password = os.getenv("OSF_PASSWORD")
# Prompt user when password is not set
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit("Please set a username (run `osf -h` for details).")
else:
sys.exit("You are not authorized to access this project.")
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
# reading existing config file, convert to configparser object
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
# now we can start asking for new values
print('Provide a username for the config file [current username: {}]:'.format(
config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.format(
config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open(".osfcli.config", "w")
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project.
The output directory defaults to the current directory.
If the project is private you need to specify a username.
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, "wb") as f:
file_.write_to(f)
pbar.update()
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit("Local file %s already exists, not overwriting." % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print("Local file %s already matches remote." % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
# only fetching one file so we are done
break
@might_need_auth
def list_(args):
|
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To upload a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError("Expected source ({}) to be a directory when "
"using recursive mode.".format(args.source))
# local name of the directory that is being uploaded
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
# build the remote path + fname
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force,
update=args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force,
update=args.update)
@might_need_auth
def remove(args):
"""Remove a file from the project's storage.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage | """List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path)) | identifier_body |
cli.py |
from six.moves import configparser
from six.moves import input
from tqdm import tqdm
from .api import OSF
from .exceptions import UnauthorizedException
from .utils import norm_remote_path, split_storage, makedirs, checksum
def config_from_file():
if os.path.exists(".osfcli.config"):
config_ = configparser.ConfigParser()
config_.read(".osfcli.config")
# for python2 compatibility
config = dict(config_.items('osf'))
else:
config = {}
return config
def config_from_env(config):
username = os.getenv("OSF_USERNAME")
if username is not None:
config['username'] = username
project = os.getenv("OSF_PROJECT")
if project is not None:
config['project'] = project
return config
def _get_username(args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
# Command line options have precedence over environment variables,
# which have precedence over the config file.
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
# still None? We are in trouble
if args.project is None:
sys.exit('You have to specify a project ID via the command line,'
' configuration file or environment variable.')
password = None
if username is not None:
password = os.getenv("OSF_PASSWORD")
# Prompt user when password is not set
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit("Please set a username (run `osf -h` for details).")
else:
sys.exit("You are not authorized to access this project.")
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
# reading existing config file, convert to configparser object
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
# now we can start asking for new values
print('Provide a username for the config file [current username: {}]:'.format(
config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.format(
config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open(".osfcli.config", "w")
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project. |
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, "wb") as f:
file_.write_to(f)
pbar.update()
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit("Local file %s already exists, not overwriting." % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print("Local file %s already matches remote." % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
# only fetching one file so we are done
break
@might_need_auth
def list_(args):
"""List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path))
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To upload a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError("Expected source ({}) to be a directory when "
"using recursive mode.".format(args.source))
# local name of the directory that is being uploaded
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
# build the remote path + fname
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force,
update=args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force,
update=args.update)
@might_need_auth
def remove(args):
"""Remove a file from the project's storage.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osf |
The output directory defaults to the current directory.
If the project is private you need to specify a username. | random_line_split |
jax_sinusoid.py | self.validation_block_sizes = block_sizes
MAML.__init__(self, params)
def _get_model(self):
"""
Return jax network initialisation and forward method.
"""
layers = []
# inner / hidden network layers + non-linearities
for l in self.network_layers:
layers.append(Dense(l))
layers.append(Relu)
# output layer (no non-linearity)
layers.append(Dense(self.output_dimension))
# make jax stax object
model = stax.serial(*layers)
return model
def _get_optimiser(self):
"""
Return jax optimiser: initialisation, update method and parameter getter method.
Optimiser learning rate is given by config (meta_lr).
"""
return optimizers.adam(step_size=self.meta_lr)
def _get_priority_queue(self):
"""Initiate priority queue"""
if self.task_type == 'sin3d':
param_ranges = self.params.get(["priority_queue", "param_ranges_3d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_3d"])
elif self.task_type == 'sin2d':
param_ranges = self.params.get(["priority_queue", "param_ranges_2d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_2d"])
return SinePriorityQueue(
queue_resume=self.params.get(["resume", "priority_queue"]),
counts_resume=self.params.get(["resume", "queue_counts"]),
sample_type=self.params.get(["priority_queue", "sample_type"]),
block_sizes=block_sizes,
param_ranges=param_ranges,
initial_value=self.params.get(["priority_queue", "initial_value"]),
epsilon_start=self.params.get(["priority_queue", "epsilon_start"]),
epsilon_final=self.params.get(["priority_queue", "epsilon_final"]),
epsilon_decay_start=self.params.get(["priority_queue", "epsilon_decay_start"]),
epsilon_decay_rate=self.params.get(["priority_queue", "epsilon_decay_rate"]),
burn_in=self.params.get(["priority_queue", "burn_in"]),
save_path=self.checkpoint_path
)
def _sample_task(self, batch_size, validate=False, step_count=None):
"""
Sample specific task(s) from defined distribution of tasks
E.g. one specific sine function from family of sines
:param batch_size: number of tasks to sample
:param validate: whether or not tasks are being used for validation
:param step_count: step count during training
:return tasks: batch of tasks
:return task_indices: indices of priority queue associated with batch of tasks
:return task_probabilities: probabilities of tasks sampled being chosen a priori
Returns batch of sin functions shifted in x direction by a phase parameter sampled randomly between phase_bounds
(set by config) enlarged in the y direction by an amplitude parameter sampled randomly between amplitude_bounds
(also set by config). For 3d sine option, function is also squeezed in x direction by freuency parameter.
"""
tasks = []
task_probabilities = []
all_max_indices = [] if self.priority_sample else None
for _ in range(batch_size):
# sample a task from task distribution and generate x, y tensors for that task
if self.priority_sample and not validate:
# query queue for next task parameters
max_indices, task_parameters, task_probability = self.priority_queue.query(step=step_count)
all_max_indices.append(max_indices)
task_probabilities.append(task_probability)
# get epsilon value
epsilon = self.priority_queue.get_epsilon()
# get task from parameters returned from query
amplitude = task_parameters[0]
phase = task_parameters[1]
if self.task_type == 'sin3d':
frequency_scaling = task_parameters[2]
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
# compute metrics for tb logging
queue_count_loss_correlation = self.priority_queue.compute_count_loss_correlation()
queue_mean = np.mean(self.priority_queue.get_queue())
queue_std = np.std(self.priority_queue.get_queue())
# write to tensorboard
if epsilon:
self.writer.add_scalar('queue_metrics/epsilon', epsilon, step_count)
self.writer.add_scalar('queue_metrics/queue_correlation', queue_count_loss_correlation, step_count)
self.writer.add_scalar('queue_metrics/queue_mean', queue_mean, step_count)
self.writer.add_scalar('queue_metrics/queue_std', queue_std, step_count)
else:
# sample randomly (vanilla maml)
amplitude = random.uniform(self.amplitude_bounds[0], self.amplitude_bounds[1])
phase = random.uniform(self.phase_bounds[0], self.phase_bounds[1])
if self.task_type == 'sin3d':
frequency_scaling = random.uniform(self.frequency_bounds[0], self.frequency_bounds[1])
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
tasks.append(task)
return tasks, all_max_indices, task_probabilities
def _get_task_from_params(self, parameters: List) -> Any:
"""
Return sine function defined by parameters given
:param parameters: parameters defining the specific sin task in the distribution
:return modified_sin: sin function
(method differs from _sample_task in that it is not a random sample but
defined by parameters given)
"""
amplitude = parameters[0]
phase = parameters[1]
frequency_scaling = parameters[2]
def modified_sin(x):
return amplitude * np.sin(phase + frequency_scaling * x)
return modified_sin
def _generate_batch(self, tasks: List):
"""
Obtain batch of training examples from a list of tasks
:param tasks: list of tasks for which data points need to be sampled
:return x_batch: x points sampled from data
:return y_batch: y points associated with x_batch
"""
x_batch = np.stack([np.random.uniform(low=self.domain_bounds[0], high=self.domain_bounds[1], size=(self.inner_update_k, 1)) for _ in range(len(tasks))])
y_batch = np.stack([[tasks[t](x) for x in x_batch[t]] for t in range(len(tasks))])
return x_batch, y_batch
def _compute_loss(self, parameters, inputs, ground_truth):
"""
Computes loss of network
:param parameters: current weights of model
:param inputs: x data
:param ground_truth: y_data
:return loss: loss on ground truth vs output of network applied to inputs
"""
predictions = self.network_forward(parameters, inputs)
loss = np.mean((ground_truth - predictions) ** 2)
return loss
def _visualise(self, model_iterations, task, validation_x, validation_y, save_name, visualise_all=True):
"""
Visualise qualitative run.
:param validation_model_iterations: parameters of model after successive fine-tuning steps
:param val_task: task being evaluated
:param validation_x_batch: k data points fed to model for finetuning
:param validation_y_batch: ground truth data associated with validation_x_batch
:param save_name: name of file to be saved
:param visualise_all: whether to visualise all fine-tuning steps or just final
"""
# ground truth
plot_x = np.linspace(self.domain_bounds[0], self.domain_bounds[1], 100)
plot_y_ground_truth = [task(xi) for xi in plot_x]
fig = plt.figure()
plt.plot(plot_x, plot_y_ground_truth, label="Ground Truth")
final_plot_y_prediction = self.network_forward(model_iterations[-1], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, final_plot_y_prediction, linestyle='dashed', linewidth=3.0, label='Fine-tuned MAML final update')
no_tuning_y_prediction = self.network_forward(model_iterations[0], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, no_tuning_y_prediction, linestyle='dashed', linewidth=3.0, label='Untuned MAML prediction')
if visualise_all:
for i, (model_iteration) in enumerate(model_iterations[1:-1]):
plot_y_prediction = self.network_forward(model_iteration, plot_x.reshape(len(plot | self.device = device
self.task_type = params.get('task_type')
# extract relevant task-specific parameters
self.amplitude_bounds = params.get(['sin2d', 'amplitude_bounds'])
self.domain_bounds = params.get(['sin2d', 'domain_bounds'])
degree_phase_bounds = params.get(['sin2d', 'phase_bounds']) # phase given in degrees
if self.task_type == 'sin3d':
self.frequency_bounds = params.get(['sin3d', 'frequency_bounds'])
block_sizes = params.get(['sin3d', 'fixed_val_blocks'])
else:
block_sizes = params.get(['sin2d', 'fixed_val_blocks'])
# convert phase bounds/ fixed_val_interval from degrees to radians
self.phase_bounds = [
degree_phase_bounds[0] * (2 * np.pi) / 360, degree_phase_bounds[1] * (2 * np.pi) / 360
]
block_sizes[1] = block_sizes[1] * (2 * np.pi) / 360 | identifier_body | |
jax_sinusoid.py | # output layer (no non-linearity)
layers.append(Dense(self.output_dimension))
# make jax stax object
model = stax.serial(*layers)
return model
def _get_optimiser(self):
"""
Return jax optimiser: initialisation, update method and parameter getter method.
Optimiser learning rate is given by config (meta_lr).
"""
return optimizers.adam(step_size=self.meta_lr)
def _get_priority_queue(self):
"""Initiate priority queue"""
if self.task_type == 'sin3d':
param_ranges = self.params.get(["priority_queue", "param_ranges_3d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_3d"])
elif self.task_type == 'sin2d':
param_ranges = self.params.get(["priority_queue", "param_ranges_2d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_2d"])
return SinePriorityQueue(
queue_resume=self.params.get(["resume", "priority_queue"]),
counts_resume=self.params.get(["resume", "queue_counts"]),
sample_type=self.params.get(["priority_queue", "sample_type"]),
block_sizes=block_sizes,
param_ranges=param_ranges,
initial_value=self.params.get(["priority_queue", "initial_value"]),
epsilon_start=self.params.get(["priority_queue", "epsilon_start"]),
epsilon_final=self.params.get(["priority_queue", "epsilon_final"]),
epsilon_decay_start=self.params.get(["priority_queue", "epsilon_decay_start"]),
epsilon_decay_rate=self.params.get(["priority_queue", "epsilon_decay_rate"]),
burn_in=self.params.get(["priority_queue", "burn_in"]),
save_path=self.checkpoint_path
)
def _sample_task(self, batch_size, validate=False, step_count=None):
"""
Sample specific task(s) from defined distribution of tasks
E.g. one specific sine function from family of sines |
:param batch_size: number of tasks to sample
:param validate: whether or not tasks are being used for validation
:param step_count: step count during training
:return tasks: batch of tasks
:return task_indices: indices of priority queue associated with batch of tasks
:return task_probabilities: probabilities of tasks sampled being chosen a priori
Returns batch of sin functions shifted in x direction by a phase parameter sampled randomly between phase_bounds
(set by config) enlarged in the y direction by an amplitude parameter sampled randomly between amplitude_bounds
(also set by config). For 3d sine option, function is also squeezed in x direction by freuency parameter.
"""
tasks = []
task_probabilities = []
all_max_indices = [] if self.priority_sample else None
for _ in range(batch_size):
# sample a task from task distribution and generate x, y tensors for that task
if self.priority_sample and not validate:
# query queue for next task parameters
max_indices, task_parameters, task_probability = self.priority_queue.query(step=step_count)
all_max_indices.append(max_indices)
task_probabilities.append(task_probability)
# get epsilon value
epsilon = self.priority_queue.get_epsilon()
# get task from parameters returned from query
amplitude = task_parameters[0]
phase = task_parameters[1]
if self.task_type == 'sin3d':
frequency_scaling = task_parameters[2]
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
# compute metrics for tb logging
queue_count_loss_correlation = self.priority_queue.compute_count_loss_correlation()
queue_mean = np.mean(self.priority_queue.get_queue())
queue_std = np.std(self.priority_queue.get_queue())
# write to tensorboard
if epsilon:
self.writer.add_scalar('queue_metrics/epsilon', epsilon, step_count)
self.writer.add_scalar('queue_metrics/queue_correlation', queue_count_loss_correlation, step_count)
self.writer.add_scalar('queue_metrics/queue_mean', queue_mean, step_count)
self.writer.add_scalar('queue_metrics/queue_std', queue_std, step_count)
else:
# sample randomly (vanilla maml)
amplitude = random.uniform(self.amplitude_bounds[0], self.amplitude_bounds[1])
phase = random.uniform(self.phase_bounds[0], self.phase_bounds[1])
if self.task_type == 'sin3d':
frequency_scaling = random.uniform(self.frequency_bounds[0], self.frequency_bounds[1])
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
tasks.append(task)
return tasks, all_max_indices, task_probabilities
def _get_task_from_params(self, parameters: List) -> Any:
"""
Return sine function defined by parameters given
:param parameters: parameters defining the specific sin task in the distribution
:return modified_sin: sin function
(method differs from _sample_task in that it is not a random sample but
defined by parameters given)
"""
amplitude = parameters[0]
phase = parameters[1]
frequency_scaling = parameters[2]
def modified_sin(x):
return amplitude * np.sin(phase + frequency_scaling * x)
return modified_sin
def _generate_batch(self, tasks: List):
"""
Obtain batch of training examples from a list of tasks
:param tasks: list of tasks for which data points need to be sampled
:return x_batch: x points sampled from data
:return y_batch: y points associated with x_batch
"""
x_batch = np.stack([np.random.uniform(low=self.domain_bounds[0], high=self.domain_bounds[1], size=(self.inner_update_k, 1)) for _ in range(len(tasks))])
y_batch = np.stack([[tasks[t](x) for x in x_batch[t]] for t in range(len(tasks))])
return x_batch, y_batch
def _compute_loss(self, parameters, inputs, ground_truth):
"""
Computes loss of network
:param parameters: current weights of model
:param inputs: x data
:param ground_truth: y_data
:return loss: loss on ground truth vs output of network applied to inputs
"""
predictions = self.network_forward(parameters, inputs)
loss = np.mean((ground_truth - predictions) ** 2)
return loss
def _visualise(self, model_iterations, task, validation_x, validation_y, save_name, visualise_all=True):
"""
Visualise qualitative run.
:param validation_model_iterations: parameters of model after successive fine-tuning steps
:param val_task: task being evaluated
:param validation_x_batch: k data points fed to model for finetuning
:param validation_y_batch: ground truth data associated with validation_x_batch
:param save_name: name of file to be saved
:param visualise_all: whether to visualise all fine-tuning steps or just final
"""
# ground truth
plot_x = np.linspace(self.domain_bounds[0], self.domain_bounds[1], 100)
plot_y_ground_truth = [task(xi) for xi in plot_x]
fig = plt.figure()
plt.plot(plot_x, plot_y_ground_truth, label="Ground Truth")
final_plot_y_prediction = self.network_forward(model_iterations[-1], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, final_plot_y_prediction, linestyle='dashed', linewidth=3.0, label='Fine-tuned MAML final update')
no_tuning_y_prediction = self.network_forward(model_iterations[0], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, no_tuning_y_prediction, linestyle='dashed', linewidth=3.0, label='Untuned MAML prediction')
if visualise_all:
for i, (model_iteration) in enumerate(model_iterations[1:-1]):
plot_y_prediction = self.network_forward(model_iteration, plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, plot_y_prediction, linestyle='dashed') #, label='Fine-tuned MAML {} update'.format(i))
plt.scatter(validation_x, validation_y, marker='o', label='K Points')
plt.title("Validation of Sinusoid Meta-Regression")
plt.xlabel(r"x")
plt.ylabel(r"sin(x)")
plt.legend()
# fig.savefig(self.params.get("checkpoint_path") + save_name)
plt.close()
return fig
def _get_fixed_validation_tasks(self):
"""
If using fixed validation this method returns a set of tasks that are
equally spread across the task distribution space.
"""
# mesh of equally partitioned state space
if self.task_type == 'sin3d':
amplitude_spectrum, phase_spectrum, frequency_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase_bounds[0]:self.phase_bounds[1]:self.validation_block_sizes[1],
self.frequency_bounds[0]:self.frequency_bounds[1]:self.validation_block_sizes[2]
]
parameter_space_tuples = np.vstack((amplitude_spectrum.flatten(), phase_spectrum.flatten(), frequency_spectrum.flatten())).T
else:
amplitude_spectrum, phase_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase | random_line_split | |
jax_sinusoid.py | # output layer (no non-linearity)
layers.append(Dense(self.output_dimension))
# make jax stax object
model = stax.serial(*layers)
return model
def _get_optimiser(self):
"""
Return jax optimiser: initialisation, update method and parameter getter method.
Optimiser learning rate is given by config (meta_lr).
"""
return optimizers.adam(step_size=self.meta_lr)
def _get_priority_queue(self):
"""Initiate priority queue"""
if self.task_type == 'sin3d':
param_ranges = self.params.get(["priority_queue", "param_ranges_3d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_3d"])
elif self.task_type == 'sin2d':
param_ranges = self.params.get(["priority_queue", "param_ranges_2d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_2d"])
return SinePriorityQueue(
queue_resume=self.params.get(["resume", "priority_queue"]),
counts_resume=self.params.get(["resume", "queue_counts"]),
sample_type=self.params.get(["priority_queue", "sample_type"]),
block_sizes=block_sizes,
param_ranges=param_ranges,
initial_value=self.params.get(["priority_queue", "initial_value"]),
epsilon_start=self.params.get(["priority_queue", "epsilon_start"]),
epsilon_final=self.params.get(["priority_queue", "epsilon_final"]),
epsilon_decay_start=self.params.get(["priority_queue", "epsilon_decay_start"]),
epsilon_decay_rate=self.params.get(["priority_queue", "epsilon_decay_rate"]),
burn_in=self.params.get(["priority_queue", "burn_in"]),
save_path=self.checkpoint_path
)
def _sample_task(self, batch_size, validate=False, step_count=None):
"""
Sample specific task(s) from defined distribution of tasks
E.g. one specific sine function from family of sines
:param batch_size: number of tasks to sample
:param validate: whether or not tasks are being used for validation
:param step_count: step count during training
:return tasks: batch of tasks
:return task_indices: indices of priority queue associated with batch of tasks
:return task_probabilities: probabilities of tasks sampled being chosen a priori
Returns batch of sin functions shifted in x direction by a phase parameter sampled randomly between phase_bounds
(set by config) enlarged in the y direction by an amplitude parameter sampled randomly between amplitude_bounds
(also set by config). For 3d sine option, function is also squeezed in x direction by freuency parameter.
"""
tasks = []
task_probabilities = []
all_max_indices = [] if self.priority_sample else None
for _ in range(batch_size):
# sample a task from task distribution and generate x, y tensors for that task
| # compute metrics for tb logging
queue_count_loss_correlation = self.priority_queue.compute_count_loss_correlation()
queue_mean = np.mean(self.priority_queue.get_queue())
queue_std = np.std(self.priority_queue.get_queue())
# write to tensorboard
if epsilon:
self.writer.add_scalar('queue_metrics/epsilon', epsilon, step_count)
self.writer.add_scalar('queue_metrics/queue_correlation', queue_count_loss_correlation, step_count)
self.writer.add_scalar('queue_metrics/queue_mean', queue_mean, step_count)
self.writer.add_scalar('queue_metrics/queue_std', queue_std, step_count)
else:
# sample randomly (vanilla maml)
amplitude = random.uniform(self.amplitude_bounds[0], self.amplitude_bounds[1])
phase = random.uniform(self.phase_bounds[0], self.phase_bounds[1])
if self.task_type == 'sin3d':
frequency_scaling = random.uniform(self.frequency_bounds[0], self.frequency_bounds[1])
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
tasks.append(task)
return tasks, all_max_indices, task_probabilities
def _get_task_from_params(self, parameters: List) -> Any:
"""
Return sine function defined by parameters given
:param parameters: parameters defining the specific sin task in the distribution
:return modified_sin: sin function
(method differs from _sample_task in that it is not a random sample but
defined by parameters given)
"""
amplitude = parameters[0]
phase = parameters[1]
frequency_scaling = parameters[2]
def modified_sin(x):
return amplitude * np.sin(phase + frequency_scaling * x)
return modified_sin
def _generate_batch(self, tasks: List):
"""
Obtain batch of training examples from a list of tasks
:param tasks: list of tasks for which data points need to be sampled
:return x_batch: x points sampled from data
:return y_batch: y points associated with x_batch
"""
x_batch = np.stack([np.random.uniform(low=self.domain_bounds[0], high=self.domain_bounds[1], size=(self.inner_update_k, 1)) for _ in range(len(tasks))])
y_batch = np.stack([[tasks[t](x) for x in x_batch[t]] for t in range(len(tasks))])
return x_batch, y_batch
def _compute_loss(self, parameters, inputs, ground_truth):
"""
Computes loss of network
:param parameters: current weights of model
:param inputs: x data
:param ground_truth: y_data
:return loss: loss on ground truth vs output of network applied to inputs
"""
predictions = self.network_forward(parameters, inputs)
loss = np.mean((ground_truth - predictions) ** 2)
return loss
def _visualise(self, model_iterations, task, validation_x, validation_y, save_name, visualise_all=True):
"""
Visualise qualitative run.
:param validation_model_iterations: parameters of model after successive fine-tuning steps
:param val_task: task being evaluated
:param validation_x_batch: k data points fed to model for finetuning
:param validation_y_batch: ground truth data associated with validation_x_batch
:param save_name: name of file to be saved
:param visualise_all: whether to visualise all fine-tuning steps or just final
"""
# ground truth
plot_x = np.linspace(self.domain_bounds[0], self.domain_bounds[1], 100)
plot_y_ground_truth = [task(xi) for xi in plot_x]
fig = plt.figure()
plt.plot(plot_x, plot_y_ground_truth, label="Ground Truth")
final_plot_y_prediction = self.network_forward(model_iterations[-1], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, final_plot_y_prediction, linestyle='dashed', linewidth=3.0, label='Fine-tuned MAML final update')
no_tuning_y_prediction = self.network_forward(model_iterations[0], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, no_tuning_y_prediction, linestyle='dashed', linewidth=3.0, label='Untuned MAML prediction')
if visualise_all:
for i, (model_iteration) in enumerate(model_iterations[1:-1]):
plot_y_prediction = self.network_forward(model_iteration, plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, plot_y_prediction, linestyle='dashed') #, label='Fine-tuned MAML {} update'.format(i))
plt.scatter(validation_x, validation_y, marker='o', label='K Points')
plt.title("Validation of Sinusoid Meta-Regression")
plt.xlabel(r"x")
plt.ylabel(r"sin(x)")
plt.legend()
# fig.savefig(self.params.get("checkpoint_path") + save_name)
plt.close()
return fig
def _get_fixed_validation_tasks(self):
"""
If using fixed validation this method returns a set of tasks that are
equally spread across the task distribution space.
"""
# mesh of equally partitioned state space
if self.task_type == 'sin3d':
amplitude_spectrum, phase_spectrum, frequency_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase_bounds[0]:self.phase_bounds[1]:self.validation_block_sizes[1],
self.frequency_bounds[0]:self.frequency_bounds[1]:self.validation_block_sizes[2]
]
parameter_space_tuples = np.vstack((amplitude_spectrum.flatten(), phase_spectrum.flatten(), frequency_spectrum.flatten())).T
else:
amplitude_spectrum, phase_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase_bounds | if self.priority_sample and not validate:
# query queue for next task parameters
max_indices, task_parameters, task_probability = self.priority_queue.query(step=step_count)
all_max_indices.append(max_indices)
task_probabilities.append(task_probability)
# get epsilon value
epsilon = self.priority_queue.get_epsilon()
# get task from parameters returned from query
amplitude = task_parameters[0]
phase = task_parameters[1]
if self.task_type == 'sin3d':
frequency_scaling = task_parameters[2]
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
| conditional_block |
jax_sinusoid.py | # output layer (no non-linearity)
layers.append(Dense(self.output_dimension))
# make jax stax object
model = stax.serial(*layers)
return model
def _get_optimiser(self):
"""
Return jax optimiser: initialisation, update method and parameter getter method.
Optimiser learning rate is given by config (meta_lr).
"""
return optimizers.adam(step_size=self.meta_lr)
def _get_priority_queue(self):
"""Initiate priority queue"""
if self.task_type == 'sin3d':
param_ranges = self.params.get(["priority_queue", "param_ranges_3d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_3d"])
elif self.task_type == 'sin2d':
param_ranges = self.params.get(["priority_queue", "param_ranges_2d"])
block_sizes = self.params.get(["priority_queue", "block_sizes_2d"])
return SinePriorityQueue(
queue_resume=self.params.get(["resume", "priority_queue"]),
counts_resume=self.params.get(["resume", "queue_counts"]),
sample_type=self.params.get(["priority_queue", "sample_type"]),
block_sizes=block_sizes,
param_ranges=param_ranges,
initial_value=self.params.get(["priority_queue", "initial_value"]),
epsilon_start=self.params.get(["priority_queue", "epsilon_start"]),
epsilon_final=self.params.get(["priority_queue", "epsilon_final"]),
epsilon_decay_start=self.params.get(["priority_queue", "epsilon_decay_start"]),
epsilon_decay_rate=self.params.get(["priority_queue", "epsilon_decay_rate"]),
burn_in=self.params.get(["priority_queue", "burn_in"]),
save_path=self.checkpoint_path
)
def _sample_task(self, batch_size, validate=False, step_count=None):
"""
Sample specific task(s) from defined distribution of tasks
E.g. one specific sine function from family of sines
:param batch_size: number of tasks to sample
:param validate: whether or not tasks are being used for validation
:param step_count: step count during training
:return tasks: batch of tasks
:return task_indices: indices of priority queue associated with batch of tasks
:return task_probabilities: probabilities of tasks sampled being chosen a priori
Returns batch of sin functions shifted in x direction by a phase parameter sampled randomly between phase_bounds
(set by config) enlarged in the y direction by an amplitude parameter sampled randomly between amplitude_bounds
(also set by config). For 3d sine option, function is also squeezed in x direction by freuency parameter.
"""
tasks = []
task_probabilities = []
all_max_indices = [] if self.priority_sample else None
for _ in range(batch_size):
# sample a task from task distribution and generate x, y tensors for that task
if self.priority_sample and not validate:
# query queue for next task parameters
max_indices, task_parameters, task_probability = self.priority_queue.query(step=step_count)
all_max_indices.append(max_indices)
task_probabilities.append(task_probability)
# get epsilon value
epsilon = self.priority_queue.get_epsilon()
# get task from parameters returned from query
amplitude = task_parameters[0]
phase = task_parameters[1]
if self.task_type == 'sin3d':
frequency_scaling = task_parameters[2]
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
# compute metrics for tb logging
queue_count_loss_correlation = self.priority_queue.compute_count_loss_correlation()
queue_mean = np.mean(self.priority_queue.get_queue())
queue_std = np.std(self.priority_queue.get_queue())
# write to tensorboard
if epsilon:
self.writer.add_scalar('queue_metrics/epsilon', epsilon, step_count)
self.writer.add_scalar('queue_metrics/queue_correlation', queue_count_loss_correlation, step_count)
self.writer.add_scalar('queue_metrics/queue_mean', queue_mean, step_count)
self.writer.add_scalar('queue_metrics/queue_std', queue_std, step_count)
else:
# sample randomly (vanilla maml)
amplitude = random.uniform(self.amplitude_bounds[0], self.amplitude_bounds[1])
phase = random.uniform(self.phase_bounds[0], self.phase_bounds[1])
if self.task_type == 'sin3d':
frequency_scaling = random.uniform(self.frequency_bounds[0], self.frequency_bounds[1])
else:
frequency_scaling = 1.
parameters = [amplitude, phase, frequency_scaling]
task = self._get_task_from_params(parameters=parameters)
tasks.append(task)
return tasks, all_max_indices, task_probabilities
def | (self, parameters: List) -> Any:
"""
Return sine function defined by parameters given
:param parameters: parameters defining the specific sin task in the distribution
:return modified_sin: sin function
(method differs from _sample_task in that it is not a random sample but
defined by parameters given)
"""
amplitude = parameters[0]
phase = parameters[1]
frequency_scaling = parameters[2]
def modified_sin(x):
return amplitude * np.sin(phase + frequency_scaling * x)
return modified_sin
def _generate_batch(self, tasks: List):
"""
Obtain batch of training examples from a list of tasks
:param tasks: list of tasks for which data points need to be sampled
:return x_batch: x points sampled from data
:return y_batch: y points associated with x_batch
"""
x_batch = np.stack([np.random.uniform(low=self.domain_bounds[0], high=self.domain_bounds[1], size=(self.inner_update_k, 1)) for _ in range(len(tasks))])
y_batch = np.stack([[tasks[t](x) for x in x_batch[t]] for t in range(len(tasks))])
return x_batch, y_batch
def _compute_loss(self, parameters, inputs, ground_truth):
"""
Computes loss of network
:param parameters: current weights of model
:param inputs: x data
:param ground_truth: y_data
:return loss: loss on ground truth vs output of network applied to inputs
"""
predictions = self.network_forward(parameters, inputs)
loss = np.mean((ground_truth - predictions) ** 2)
return loss
def _visualise(self, model_iterations, task, validation_x, validation_y, save_name, visualise_all=True):
"""
Visualise qualitative run.
:param validation_model_iterations: parameters of model after successive fine-tuning steps
:param val_task: task being evaluated
:param validation_x_batch: k data points fed to model for finetuning
:param validation_y_batch: ground truth data associated with validation_x_batch
:param save_name: name of file to be saved
:param visualise_all: whether to visualise all fine-tuning steps or just final
"""
# ground truth
plot_x = np.linspace(self.domain_bounds[0], self.domain_bounds[1], 100)
plot_y_ground_truth = [task(xi) for xi in plot_x]
fig = plt.figure()
plt.plot(plot_x, plot_y_ground_truth, label="Ground Truth")
final_plot_y_prediction = self.network_forward(model_iterations[-1], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, final_plot_y_prediction, linestyle='dashed', linewidth=3.0, label='Fine-tuned MAML final update')
no_tuning_y_prediction = self.network_forward(model_iterations[0], plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, no_tuning_y_prediction, linestyle='dashed', linewidth=3.0, label='Untuned MAML prediction')
if visualise_all:
for i, (model_iteration) in enumerate(model_iterations[1:-1]):
plot_y_prediction = self.network_forward(model_iteration, plot_x.reshape(len(plot_x), 1))
plt.plot(plot_x, plot_y_prediction, linestyle='dashed') #, label='Fine-tuned MAML {} update'.format(i))
plt.scatter(validation_x, validation_y, marker='o', label='K Points')
plt.title("Validation of Sinusoid Meta-Regression")
plt.xlabel(r"x")
plt.ylabel(r"sin(x)")
plt.legend()
# fig.savefig(self.params.get("checkpoint_path") + save_name)
plt.close()
return fig
def _get_fixed_validation_tasks(self):
"""
If using fixed validation this method returns a set of tasks that are
equally spread across the task distribution space.
"""
# mesh of equally partitioned state space
if self.task_type == 'sin3d':
amplitude_spectrum, phase_spectrum, frequency_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase_bounds[0]:self.phase_bounds[1]:self.validation_block_sizes[1],
self.frequency_bounds[0]:self.frequency_bounds[1]:self.validation_block_sizes[2]
]
parameter_space_tuples = np.vstack((amplitude_spectrum.flatten(), phase_spectrum.flatten(), frequency_spectrum.flatten())).T
else:
amplitude_spectrum, phase_spectrum = np.mgrid[
self.amplitude_bounds[0]:self.amplitude_bounds[1]:self.validation_block_sizes[0],
self.phase_bounds | _get_task_from_params | identifier_name |
wsrest_face_detect_kinect.py | .http_get_token_in_header(web))
# resposta.
if server_timeout<=0 or client_timeout<=0:
web.ctx.status='401 Unauthorized'
return
t0 = int(time.time() * 1000)
# Chamada get sem parametro.
if not name:
web.header('Content-Type', 'application/xml')
# Detectar faces.
result = self.detect_and_draw()
# resource_xml = xml_util.dict_to_rdfxml(result, "face_detect")
# Gravar no BD quando detectar faces.
if result['num_faces'] > 0:
# db_util.persist_resource(self.db, 'face_detect', resource_xml)
# log no servico stm
resource_rdfxml=xml_util.dict_to_rdfxml(result,"face_detect")
xml_response=http_util.http_request('post'\
,global_data.host_stm,"/"\
,None,global_data.access_token,resource_rdfxml)
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
return resource_xml
# Chamada com nome do arquivo solicitado.
else:
full_image_path = (snapshot_faces_dir + name)
try:
# retornando a partir do arquivo gravado.
f=open(full_image_path,"rb")
web.header('Content-Type', 'image/png')
return f.read()
except IOError:
# arquivo nao existe.
web.debug("no file")
web.header('Content-Type', 'application/xml')
return "<error>file not found</error>"
finally:
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
def detect_and_draw(self):#, resource_params):
# web.debug('detect draw')
t0 = int(time.time() * 1000)
global init_opencv, cascade, capture, storage
if init_opencv:
init_opencv = False
# the OpenCV API says this function is obsolete, but we can't
# cast the output of cvLoad to a HaarClassifierCascade, so use this anyways
# the size parameter is ignored
cascade = cvLoadHaarClassifierCascade( cascade_name, cvSize(1,1) );
if not cascade:
web.debug("ERROR: Could not load classifier cascade")
raise ValueError('[ERRO] Could not load classifier cascade.')
frame_copy = None
# Buscando imagem atual do servico kinect (rgb snapshot).
# inserindo o token no header
req=urllib2.Request(self.url_wsr_kinect_rgb)
req.add_header("Authenticate",("token="+global_data.access_token))
file=urllib2.urlopen(req)
# file = urllib.urlopen(self.url_wsr_kinect_rgb)
# close?
# urllib2 module sends HTTP/1.1 requests with Connection:close header included.
# Convertendo para formato PIL
im=cStringIO.StringIO(file.read())
pil_img=Image.open(im)
# Convertendo de formato PIL para IPL-OpenCv.
frame = adaptors.PIL2Ipl(pil_img)
if( not frame ):
web.debug("[INFO] Not a frame")
if( not frame_copy ):
frame_copy = cvCreateImage( cvSize(frame.width,frame.height), \
IPL_DEPTH_8U, frame.nChannels )
if( frame.origin == IPL_ORIGIN_TL ):
cvCopy( frame, frame_copy )
else:
cvFlip( frame, frame_copy, 0 )
img = frame_copy
# Criando copia default do recurso passado como parametro.
global resource
resource_result = {}
# Identificador unico da resposta.
id_response = int(time.time() * 10000) # decimo de milesimo de segundo.
resource_result['id'] = id_response
# Populando recurso resultado.
# resource_result['_max_num_faces'] = resource_params['_max_num_faces']
resource_result["image_full"] = str(id_response) + "_full.png"
resource_result['screen_width'] = str(img.width)
resource_result['screen_height'] = str(img.height)
# allocate temporary images
gray = cvCreateImage( cvSize(img.width,img.height), 8, 1 )
small_img = cvCreateImage( cvSize( cvRound (img.width/image_scale), \
cvRound (img.height/image_scale)), 8, 1 )
# convert color input image to grayscale
cvCvtColor( img, gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( gray, small_img, CV_INTER_LINEAR )
#
cvEqualizeHist( small_img, small_img )
cvClearMemStorage( storage )
if( cascade ):
# t = cvGetTickCount()
t = int(time.time() * 1000)
# for i in range(1,6):
# cvClearMemStorage( storage )
faces = cvHaarDetectObjects( small_img, cascade, storage,
haar_scale, min_neighbors, haar_flags, min_size )
# t = cvGetTickCount() - t
t = int(time.time() * 1000) - t
# print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.))
web.debug('detection time = ' + str(t) + 'ms image_scale = ' \
+ str(image_scale))
faces_dict={}
num_faces = 0
# loop das faces detectadas
if faces:
for r in faces:
# pt1 = cvPoint( int(r.x*image_scale), int(r.y*image_scale))
# pt2 = cvPoint( int((r.x+r.width)*image_scale), int((r.y+r.height)*image_scale) )
x1 = int(r.x*image_scale)
x2 = int((r.x+r.width)*image_scale)
dx1 = int(r.width*image_scale*0.12)
dx2 = (-1)*int(r.width*image_scale*0.13)
lx = int(x2+dx2-(x1+dx1))
ly = int(lx * 1.2 )
# ly = int(y2+dy2-(y1+dy1))
y1 = int(r.y*image_scale)
dy1 = int(r.height*image_scale*0.17)
pt1 = cvPoint( x1 + dx1 , y1 + dy1 )
y2 = y1+dy1 + ly
# y2 = int((r.y+r.height)*image_scale)
dy2 = 0 # int(r.height*image_scale*0.05)
pt2 = cvPoint( x2 + dx2 , y2 + dy2 )
# cvRectangle( gray, pt1, pt2, CV_RGB(255,0,0), 2, 8, 0 );
face_name="face"+str(num_faces)
faces_dict[face_name+"_x"]=str(x1+dx1)
faces_dict[face_name+"_y"]=str(y1+dy1)
faces_dict[face_name+"_sx"]=str(int(x2+dx2-(x1+dx1)))
faces_dict[face_name+"_sy"]=str(int(y2+dy2-(y1+dy1)))
# sub-image
sub_image = cvCreateImage( \
(lx, ly), 8, 1) # Parameters overwritten anyway...
sub_image = cvGetSubRect( img \
, ( x1+dx1, y1+dy1 , lx, ly ) )
sub_gray = cvCreateImage( \
cvSize(sub_image.width,sub_image.height), 8, 1 )
final_gray = cvCreateImage( \
cvSize(125, 150), 8, 1 )
# convert color input image to grayscale
cvCvtColor( sub_image, sub_gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( sub_gray, final_gray, CV_INTER_LINEAR )
#
# cvEqualizeHist( final_gray, final_gray )
# Gravando faces detectadas.
#cvSaveImage("/home/marcus/Desktop/faces/"
# cvSaveImage("/var/www/portal/images/faces/"
filename = snapshot_faces_dir \
+ str(id_response) + "_face" + str(num_faces) + ".png"
cvSaveImage(filename, final_gray)
faces_dict[face_name+"_filename"]=filename
num_faces = num_faces + 1
web.debug('faces count = ' + str(num_faces))
# web.debug('details of faces: ' + str(faces_dict))
# gravar imagem completa.
if num_faces > 0:
filename = snapshot_faces_dir \
+ str(id_response) + "_full" + ".png"
cvSaveImage(filename, img)
resource_result['num_faces'] = num_faces
resource_result['faces'] = faces_dict
t0 = int(time.time() * 1000) - t0 | # web.debug('detect draw total time: ' + str(t0) + " ms")
resource_result['response_time'] = t0
| random_line_split | |
wsrest_face_detect_kinect.py | , flags=CV_HAAR_DO_CANNY_PRUNING,
# min_size=<minimum possible face size
min_size = cvSize(20,20)
image_scale = 1.3
haar_scale = 1.2
min_neighbors = 2
haar_flags = CV_HAAR_DO_CANNY_PRUNING #0
init_opencv = True
urls = (
'/(.*)', 'index'
)
web.config.debug = True # Debug true causa problema quando usado junto com sessoes.
snapshot_faces_dir = os.environ['HOME'] + "/robot_/faces/"
resource_name = "FaceDetection"
resource = { \
"status" : "ready", \
"num_faces" : 0, \
"screen_width" : "", \
"screen_height" : "", \
"response_time" : 0, \
"id" : 0 \
}
# ToDo: implementar seguranca de acesso usando sessao.
#session = web.session.Session(app, web.session.DiskStore('sessions'))
#, initializer={'logged_in': False})
class index:
# ========== Inicializacao unica por classe =================
# Variaveis declaradas aqui sao usadas na classe usando self.
url_wsr_kinect_rgb = "http://localhost:8094/rgb_image/"
# # Conectando com o BD.
# db = db_util.connect()
# ============ Final da Inicializacao unica por classe =================
def GET(self, name):
# web.header('Content-Type', 'application/xml')
global resource
# autorizacao.
server_timeout,client_timeout,client_username\
=http_util.http_authorization(global_data.host_auth\
,global_data.access_token\
,http_util.http_get_token_in_header(web))
# resposta.
if server_timeout<=0 or client_timeout<=0:
web.ctx.status='401 Unauthorized'
return
t0 = int(time.time() * 1000)
# Chamada get sem parametro.
if not name:
web.header('Content-Type', 'application/xml')
# Detectar faces.
result = self.detect_and_draw()
# resource_xml = xml_util.dict_to_rdfxml(result, "face_detect")
# Gravar no BD quando detectar faces.
if result['num_faces'] > 0:
# db_util.persist_resource(self.db, 'face_detect', resource_xml)
# log no servico stm
resource_rdfxml=xml_util.dict_to_rdfxml(result,"face_detect")
xml_response=http_util.http_request('post'\
,global_data.host_stm,"/"\
,None,global_data.access_token,resource_rdfxml)
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
return resource_xml
# Chamada com nome do arquivo solicitado.
else:
full_image_path = (snapshot_faces_dir + name)
try:
# retornando a partir do arquivo gravado.
f=open(full_image_path,"rb")
web.header('Content-Type', 'image/png')
return f.read()
except IOError:
# arquivo nao existe.
web.debug("no file")
web.header('Content-Type', 'application/xml')
return "<error>file not found</error>"
finally:
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
def detect_and_draw(self):#, resource_params):
# web.debug('detect draw')
| req.add_header("Authenticate",("token="+global_data.access_token))
file=urllib2.urlopen(req)
# file = urllib.urlopen(self.url_wsr_kinect_rgb)
# close?
# urllib2 module sends HTTP/1.1 requests with Connection:close header included.
# Convertendo para formato PIL
im=cStringIO.StringIO(file.read())
pil_img=Image.open(im)
# Convertendo de formato PIL para IPL-OpenCv.
frame = adaptors.PIL2Ipl(pil_img)
if( not frame ):
web.debug("[INFO] Not a frame")
if( not frame_copy ):
frame_copy = cvCreateImage( cvSize(frame.width,frame.height), \
IPL_DEPTH_8U, frame.nChannels )
if( frame.origin == IPL_ORIGIN_TL ):
cvCopy( frame, frame_copy )
else:
cvFlip( frame, frame_copy, 0 )
img = frame_copy
# Criando copia default do recurso passado como parametro.
global resource
resource_result = {}
# Identificador unico da resposta.
id_response = int(time.time() * 10000) # decimo de milesimo de segundo.
resource_result['id'] = id_response
# Populando recurso resultado.
# resource_result['_max_num_faces'] = resource_params['_max_num_faces']
resource_result["image_full"] = str(id_response) + "_full.png"
resource_result['screen_width'] = str(img.width)
resource_result['screen_height'] = str(img.height)
# allocate temporary images
gray = cvCreateImage( cvSize(img.width,img.height), 8, 1 )
small_img = cvCreateImage( cvSize( cvRound (img.width/image_scale), \
cvRound (img.height/image_scale)), 8, 1 )
# convert color input image to grayscale
cvCvtColor( img, gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( gray, small_img, CV_INTER_LINEAR )
#
cvEqualizeHist( small_img, small_img )
cvClearMemStorage( storage )
if( cascade ):
# t = cvGetTickCount()
t = int(time.time() * 1000)
# for i in range(1,6):
# cvClearMemStorage( storage )
faces = cvHaarDetectObjects( small_img, cascade, storage,
haar_scale, min_neighbors, haar_flags, min_size )
# t = cvGetTickCount() - t
t = int(time.time() * 1000) - t
# print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.))
web.debug('detection time = ' + str(t) + 'ms image_scale = ' \
+ str(image_scale))
faces_dict={}
num_faces = 0
# loop das faces detectadas
if faces:
for r in faces:
# pt1 = cvPoint( int(r.x*image_scale), int(r.y*image_scale))
# pt2 = cvPoint( int((r.x+r.width)*image_scale), int((r.y+r.height)*image_scale) )
x1 = int(r.x*image_scale)
x2 = int((r.x+r.width)*image_scale)
dx1 = int(r.width*image_scale*0.12)
dx2 = (-1)*int(r.width*image_scale*0.13)
lx = int(x2+dx2-(x1+dx1))
ly = int(lx * 1.2 )
# ly = int(y2+dy2-(y1+dy1))
y1 = int(r.y*image_scale)
dy1 = int(r.height*image_scale*0.17)
pt1 = cvPoint( x1 + dx1 , y1 + dy1 )
y2 = y1+dy1 + ly
# y2 = int((r.y+r.height)*image_scale)
dy2 = 0 # int(r.height*image_scale*0.05)
pt2 = cvPoint( x2 + dx2 , y2 + dy2 )
# cvRectangle( gray, pt1, pt2, CV_RGB(255,0,0), 2, 8, 0 );
face_name="face"+str(num_faces)
faces_dict[face_name+"_x"]=str(x1+dx1)
faces_dict[face_name+"_y"]=str(y1+dy1)
faces_dict[face_name+"_sx"]=str(int(x2+dx2-(x1+dx1)))
faces_dict[face_name+"_sy"]=str(int(y2+dy2-(y1+dy1)))
# sub-image
sub_image = cvCreateImage( \
(lx, ly), 8, 1) # Parameters overwritten anyway...
sub_image | t0 = int(time.time() * 1000)
global init_opencv, cascade, capture, storage
if init_opencv:
init_opencv = False
# the OpenCV API says this function is obsolete, but we can't
# cast the output of cvLoad to a HaarClassifierCascade, so use this anyways
# the size parameter is ignored
cascade = cvLoadHaarClassifierCascade( cascade_name, cvSize(1,1) );
if not cascade:
web.debug("ERROR: Could not load classifier cascade")
raise ValueError('[ERRO] Could not load classifier cascade.')
frame_copy = None
# Buscando imagem atual do servico kinect (rgb snapshot).
# inserindo o token no header
req=urllib2.Request(self.url_wsr_kinect_rgb) | identifier_body |
wsrest_face_detect_kinect.py | , flags=CV_HAAR_DO_CANNY_PRUNING,
# min_size=<minimum possible face size
min_size = cvSize(20,20)
image_scale = 1.3
haar_scale = 1.2
min_neighbors = 2
haar_flags = CV_HAAR_DO_CANNY_PRUNING #0
init_opencv = True
urls = (
'/(.*)', 'index'
)
web.config.debug = True # Debug true causa problema quando usado junto com sessoes.
snapshot_faces_dir = os.environ['HOME'] + "/robot_/faces/"
resource_name = "FaceDetection"
resource = { \
"status" : "ready", \
"num_faces" : 0, \
"screen_width" : "", \
"screen_height" : "", \
"response_time" : 0, \
"id" : 0 \
}
# ToDo: implementar seguranca de acesso usando sessao.
#session = web.session.Session(app, web.session.DiskStore('sessions'))
#, initializer={'logged_in': False})
class index:
# ========== Inicializacao unica por classe =================
# Variaveis declaradas aqui sao usadas na classe usando self.
url_wsr_kinect_rgb = "http://localhost:8094/rgb_image/"
# # Conectando com o BD.
# db = db_util.connect()
# ============ Final da Inicializacao unica por classe =================
def GET(self, name):
# web.header('Content-Type', 'application/xml')
global resource
# autorizacao.
server_timeout,client_timeout,client_username\
=http_util.http_authorization(global_data.host_auth\
,global_data.access_token\
,http_util.http_get_token_in_header(web))
# resposta.
if server_timeout<=0 or client_timeout<=0:
web.ctx.status='401 Unauthorized'
return
t0 = int(time.time() * 1000)
# Chamada get sem parametro.
if not name:
web.header('Content-Type', 'application/xml')
# Detectar faces.
result = self.detect_and_draw()
# resource_xml = xml_util.dict_to_rdfxml(result, "face_detect")
# Gravar no BD quando detectar faces.
if result['num_faces'] > 0:
# db_util.persist_resource(self.db, 'face_detect', resource_xml)
# log no servico stm
resource_rdfxml=xml_util.dict_to_rdfxml(result,"face_detect")
xml_response=http_util.http_request('post'\
,global_data.host_stm,"/"\
,None,global_data.access_token,resource_rdfxml)
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
return resource_xml
# Chamada com nome do arquivo solicitado.
else:
full_image_path = (snapshot_faces_dir + name)
try:
# retornando a partir do arquivo gravado.
f=open(full_image_path,"rb")
web.header('Content-Type', 'image/png')
return f.read()
except IOError:
# arquivo nao existe.
web.debug("no file")
web.header('Content-Type', 'application/xml')
return "<error>file not found</error>"
finally:
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
def detect_and_draw(self):#, resource_params):
# web.debug('detect draw')
t0 = int(time.time() * 1000)
global init_opencv, cascade, capture, storage
if init_opencv:
init_opencv = False
# the OpenCV API says this function is obsolete, but we can't
# cast the output of cvLoad to a HaarClassifierCascade, so use this anyways
# the size parameter is ignored
cascade = cvLoadHaarClassifierCascade( cascade_name, cvSize(1,1) );
if not cascade:
web.debug("ERROR: Could not load classifier cascade")
raise ValueError('[ERRO] Could not load classifier cascade.')
frame_copy = None
# Buscando imagem atual do servico kinect (rgb snapshot).
# inserindo o token no header
req=urllib2.Request(self.url_wsr_kinect_rgb)
req.add_header("Authenticate",("token="+global_data.access_token))
file=urllib2.urlopen(req)
# file = urllib.urlopen(self.url_wsr_kinect_rgb)
# close?
# urllib2 module sends HTTP/1.1 requests with Connection:close header included.
# Convertendo para formato PIL
im=cStringIO.StringIO(file.read())
pil_img=Image.open(im)
# Convertendo de formato PIL para IPL-OpenCv.
frame = adaptors.PIL2Ipl(pil_img)
if( not frame ):
web.debug("[INFO] Not a frame")
if( not frame_copy ):
frame_copy = cvCreateImage( cvSize(frame.width,frame.height), \
IPL_DEPTH_8U, frame.nChannels )
if( frame.origin == IPL_ORIGIN_TL ):
cvCopy( frame, frame_copy )
else:
cvFlip( frame, frame_copy, 0 )
img = frame_copy
# Criando copia default do recurso passado como parametro.
global resource
resource_result = {}
# Identificador unico da resposta.
id_response = int(time.time() * 10000) # decimo de milesimo de segundo.
resource_result['id'] = id_response
# Populando recurso resultado.
# resource_result['_max_num_faces'] = resource_params['_max_num_faces']
resource_result["image_full"] = str(id_response) + "_full.png"
resource_result['screen_width'] = str(img.width)
resource_result['screen_height'] = str(img.height)
# allocate temporary images
gray = cvCreateImage( cvSize(img.width,img.height), 8, 1 )
small_img = cvCreateImage( cvSize( cvRound (img.width/image_scale), \
cvRound (img.height/image_scale)), 8, 1 )
# convert color input image to grayscale
cvCvtColor( img, gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( gray, small_img, CV_INTER_LINEAR )
#
cvEqualizeHist( small_img, small_img )
cvClearMemStorage( storage )
if( cascade ):
# t = cvGetTickCount()
t = int(time.time() * 1000)
# for i in range(1,6):
# cvClearMemStorage( storage )
faces = cvHaarDetectObjects( small_img, cascade, storage,
haar_scale, min_neighbors, haar_flags, min_size )
# t = cvGetTickCount() - t
t = int(time.time() * 1000) - t
# print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.))
web.debug('detection time = ' + str(t) + 'ms image_scale = ' \
+ str(image_scale))
faces_dict={}
num_faces = 0
# loop das faces detectadas
if faces:
| faces_dict[face_name+"_x"]=str(x1+dx1)
faces_dict[face_name+"_y"]=str(y1+dy1)
faces_dict[face_name+"_sx"]=str(int(x2+dx2-(x1+dx1)))
faces_dict[face_name+"_sy"]=str(int(y2+dy2-(y1+dy1)))
# sub-image
sub_image = cvCreateImage( \
(lx, ly), 8, 1) # Parameters overwritten anyway...
sub_image | for r in faces:
# pt1 = cvPoint( int(r.x*image_scale), int(r.y*image_scale))
# pt2 = cvPoint( int((r.x+r.width)*image_scale), int((r.y+r.height)*image_scale) )
x1 = int(r.x*image_scale)
x2 = int((r.x+r.width)*image_scale)
dx1 = int(r.width*image_scale*0.12)
dx2 = (-1)*int(r.width*image_scale*0.13)
lx = int(x2+dx2-(x1+dx1))
ly = int(lx * 1.2 )
# ly = int(y2+dy2-(y1+dy1))
y1 = int(r.y*image_scale)
dy1 = int(r.height*image_scale*0.17)
pt1 = cvPoint( x1 + dx1 , y1 + dy1 )
y2 = y1+dy1 + ly
# y2 = int((r.y+r.height)*image_scale)
dy2 = 0 # int(r.height*image_scale*0.05)
pt2 = cvPoint( x2 + dx2 , y2 + dy2 )
# cvRectangle( gray, pt1, pt2, CV_RGB(255,0,0), 2, 8, 0 );
face_name="face"+str(num_faces) | conditional_block |
wsrest_face_detect_kinect.py | , flags=CV_HAAR_DO_CANNY_PRUNING,
# min_size=<minimum possible face size
min_size = cvSize(20,20)
image_scale = 1.3
haar_scale = 1.2
min_neighbors = 2
haar_flags = CV_HAAR_DO_CANNY_PRUNING #0
init_opencv = True
urls = (
'/(.*)', 'index'
)
web.config.debug = True # Debug true causa problema quando usado junto com sessoes.
snapshot_faces_dir = os.environ['HOME'] + "/robot_/faces/"
resource_name = "FaceDetection"
resource = { \
"status" : "ready", \
"num_faces" : 0, \
"screen_width" : "", \
"screen_height" : "", \
"response_time" : 0, \
"id" : 0 \
}
# ToDo: implementar seguranca de acesso usando sessao.
#session = web.session.Session(app, web.session.DiskStore('sessions'))
#, initializer={'logged_in': False})
class | :
# ========== Inicializacao unica por classe =================
# Variaveis declaradas aqui sao usadas na classe usando self.
url_wsr_kinect_rgb = "http://localhost:8094/rgb_image/"
# # Conectando com o BD.
# db = db_util.connect()
# ============ Final da Inicializacao unica por classe =================
def GET(self, name):
# web.header('Content-Type', 'application/xml')
global resource
# autorizacao.
server_timeout,client_timeout,client_username\
=http_util.http_authorization(global_data.host_auth\
,global_data.access_token\
,http_util.http_get_token_in_header(web))
# resposta.
if server_timeout<=0 or client_timeout<=0:
web.ctx.status='401 Unauthorized'
return
t0 = int(time.time() * 1000)
# Chamada get sem parametro.
if not name:
web.header('Content-Type', 'application/xml')
# Detectar faces.
result = self.detect_and_draw()
# resource_xml = xml_util.dict_to_rdfxml(result, "face_detect")
# Gravar no BD quando detectar faces.
if result['num_faces'] > 0:
# db_util.persist_resource(self.db, 'face_detect', resource_xml)
# log no servico stm
resource_rdfxml=xml_util.dict_to_rdfxml(result,"face_detect")
xml_response=http_util.http_request('post'\
,global_data.host_stm,"/"\
,None,global_data.access_token,resource_rdfxml)
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
return resource_xml
# Chamada com nome do arquivo solicitado.
else:
full_image_path = (snapshot_faces_dir + name)
try:
# retornando a partir do arquivo gravado.
f=open(full_image_path,"rb")
web.header('Content-Type', 'image/png')
return f.read()
except IOError:
# arquivo nao existe.
web.debug("no file")
web.header('Content-Type', 'application/xml')
return "<error>file not found</error>"
finally:
web.debug("GET time: " + str(int(time.time() * 1000) - t0) + "ms")
def detect_and_draw(self):#, resource_params):
# web.debug('detect draw')
t0 = int(time.time() * 1000)
global init_opencv, cascade, capture, storage
if init_opencv:
init_opencv = False
# the OpenCV API says this function is obsolete, but we can't
# cast the output of cvLoad to a HaarClassifierCascade, so use this anyways
# the size parameter is ignored
cascade = cvLoadHaarClassifierCascade( cascade_name, cvSize(1,1) );
if not cascade:
web.debug("ERROR: Could not load classifier cascade")
raise ValueError('[ERRO] Could not load classifier cascade.')
frame_copy = None
# Buscando imagem atual do servico kinect (rgb snapshot).
# inserindo o token no header
req=urllib2.Request(self.url_wsr_kinect_rgb)
req.add_header("Authenticate",("token="+global_data.access_token))
file=urllib2.urlopen(req)
# file = urllib.urlopen(self.url_wsr_kinect_rgb)
# close?
# urllib2 module sends HTTP/1.1 requests with Connection:close header included.
# Convertendo para formato PIL
im=cStringIO.StringIO(file.read())
pil_img=Image.open(im)
# Convertendo de formato PIL para IPL-OpenCv.
frame = adaptors.PIL2Ipl(pil_img)
if( not frame ):
web.debug("[INFO] Not a frame")
if( not frame_copy ):
frame_copy = cvCreateImage( cvSize(frame.width,frame.height), \
IPL_DEPTH_8U, frame.nChannels )
if( frame.origin == IPL_ORIGIN_TL ):
cvCopy( frame, frame_copy )
else:
cvFlip( frame, frame_copy, 0 )
img = frame_copy
# Criando copia default do recurso passado como parametro.
global resource
resource_result = {}
# Identificador unico da resposta.
id_response = int(time.time() * 10000) # decimo de milesimo de segundo.
resource_result['id'] = id_response
# Populando recurso resultado.
# resource_result['_max_num_faces'] = resource_params['_max_num_faces']
resource_result["image_full"] = str(id_response) + "_full.png"
resource_result['screen_width'] = str(img.width)
resource_result['screen_height'] = str(img.height)
# allocate temporary images
gray = cvCreateImage( cvSize(img.width,img.height), 8, 1 )
small_img = cvCreateImage( cvSize( cvRound (img.width/image_scale), \
cvRound (img.height/image_scale)), 8, 1 )
# convert color input image to grayscale
cvCvtColor( img, gray, CV_BGR2GRAY )
# scale input image for faster processing
cvResize( gray, small_img, CV_INTER_LINEAR )
#
cvEqualizeHist( small_img, small_img )
cvClearMemStorage( storage )
if( cascade ):
# t = cvGetTickCount()
t = int(time.time() * 1000)
# for i in range(1,6):
# cvClearMemStorage( storage )
faces = cvHaarDetectObjects( small_img, cascade, storage,
haar_scale, min_neighbors, haar_flags, min_size )
# t = cvGetTickCount() - t
t = int(time.time() * 1000) - t
# print "detection time = %gms" % (t/(cvGetTickFrequency()*1000.))
web.debug('detection time = ' + str(t) + 'ms image_scale = ' \
+ str(image_scale))
faces_dict={}
num_faces = 0
# loop das faces detectadas
if faces:
for r in faces:
# pt1 = cvPoint( int(r.x*image_scale), int(r.y*image_scale))
# pt2 = cvPoint( int((r.x+r.width)*image_scale), int((r.y+r.height)*image_scale) )
x1 = int(r.x*image_scale)
x2 = int((r.x+r.width)*image_scale)
dx1 = int(r.width*image_scale*0.12)
dx2 = (-1)*int(r.width*image_scale*0.13)
lx = int(x2+dx2-(x1+dx1))
ly = int(lx * 1.2 )
# ly = int(y2+dy2-(y1+dy1))
y1 = int(r.y*image_scale)
dy1 = int(r.height*image_scale*0.17)
pt1 = cvPoint( x1 + dx1 , y1 + dy1 )
y2 = y1+dy1 + ly
# y2 = int((r.y+r.height)*image_scale)
dy2 = 0 # int(r.height*image_scale*0.05)
pt2 = cvPoint( x2 + dx2 , y2 + dy2 )
# cvRectangle( gray, pt1, pt2, CV_RGB(255,0,0), 2, 8, 0 );
face_name="face"+str(num_faces)
faces_dict[face_name+"_x"]=str(x1+dx1)
faces_dict[face_name+"_y"]=str(y1+dy1)
faces_dict[face_name+"_sx"]=str(int(x2+dx2-(x1+dx1)))
faces_dict[face_name+"_sy"]=str(int(y2+dy2-(y1+dy1)))
# sub-image
sub_image = cvCreateImage( \
(lx, ly), 8, 1) # Parameters overwritten anyway...
sub | index | identifier_name |
platformAccessory.ts | .config.updateInterval !== undefined) {
this.updateInterval = this.platform.config.updateInterval * 1000;
}
this.accessory.getService(this.platform.Service.AccessoryInformation)!
.setCharacteristic(this.platform.Characteristic.Manufacturer, 'Extron, Inc.')
.setCharacteristic(this.platform.Characteristic.Model, 'Crosspoint ULTRA 88 HVA')
.setCharacteristic(this.platform.Characteristic.SerialNumber, this.platform.config.serialNumber);
this.avService = this.accessory.getService(this.platform.Service.Television)
|| this.accessory.addService(this.platform.Service.Television);
this.setupPresets();
const uuid = this.platform.api.hap.uuid.generate('homebridge:extron-matrix-switch' + accessory.context.device.displayName);
this.accessory.UUID = uuid;
this.accessory.category = this.platform.api.hap.Categories.AUDIO_RECEIVER;
this.avService.setCharacteristic(this.platform.Characteristic.Name, 'Extron');
this.avService.setCharacteristic(this.platform.Characteristic.SleepDiscoveryMode,
this.platform.Characteristic.SleepDiscoveryMode.ALWAYS_DISCOVERABLE);
this.avService.getCharacteristic(this.platform.Characteristic.Active)
.onSet(this.setOnOffState.bind(this))
.onGet(this.getOnOffState.bind(this));
this.getOnOffState();
this.updatePresetStatus();
this.avService.getCharacteristic(this.platform.Characteristic.ActiveIdentifier)
.onSet(async (value) => {
// the value will be the value you set for the Identifier Characteristic
// on the Input Source service that was selected - see input sources below.
await this.changeInput(parseInt(value.toString()));
});
// set the service name, this is what is displayed as the default name on the Home app
// in this example we are using the name we stored in the `accessory.context` in the `discoverDevices` method.
this.avService.setCharacteristic(this.platform.Characteristic.Name, accessory.context.device.displayName);
// This is required to be implemented, so currently it does not do anything useful.
this.avService.getCharacteristic(this.platform.Characteristic.RemoteKey)
.onSet((newValue) => {
switch(newValue) {
default: {
break;
}
}
});
// Setup lock
this.platform.config.lockLevel === 'level1' ? '1X' : '2X';
this.lockService = this.accessory.getService(this.platform.Service.LockMechanism)
|| this.accessory.addService(this.platform.Service.LockMechanism);
this.lockService.setCharacteristic(this.platform.Characteristic.Name, accessory.context.device.displayName);
this.lockService.getCharacteristic(this.platform.Characteristic.LockCurrentState)
.onGet(this.getCurrentLockState.bind(this)); | this.lockService.getCharacteristic(this.platform.Characteristic.LockTargetState)
.onSet(this.setTargetLockState.bind(this))
.onGet(this.getCurrentLockState.bind(this));
this.updateLockStatus();
// Set interval to keep HomeKit updated on the current state of power, input states,
// and lock status.
setInterval(async () => {
await this.getOnOffState();
await this.updatePresetStatus();
await this.updateLockStatus();
}, this.updateInterval);
this.platform.api.publishExternalAccessories(PLUGIN_NAME, [this.accessory]);
}
/**
* Setup all presets from the plugin configuration. Note, currently all presets are setup
* as Component (YPbPr) video inputs, but this does not appear to make a difference in
* Apple's Home.app at this time.
*
* Note that TypeScript/JavaScript and HomeKit 0-based arrays, but for user-convenience
* any labelled inputs are 1-based.
*/
setupPresets() {
if(!this.presetsConfigured) {
const presets = this.platform.config.presets as ExtronPreset[];
presets.forEach(async (name, i) => {
const inputService = this.accessory.getService('input' + i) ||
this.accessory.addService(this.platform.Service.InputSource, 'input' + i, name.name);
this.platform.log.info('Adding input %s as number %s', name.name, i);
inputService
.setCharacteristic(this.platform.Characteristic.Identifier, i)
.setCharacteristic(this.platform.Characteristic.ConfiguredName, name.name)
.setCharacteristic(this.platform.Characteristic.Name, name.name)
.setCharacteristic(this.platform.Characteristic.IsConfigured, this.platform.Characteristic.IsConfigured.CONFIGURED)
.setCharacteristic(this.platform.Characteristic.InputSourceType, this.platform.Characteristic.InputSourceType.COMPONENT_VIDEO)
.setCharacteristic(this.platform.Characteristic.CurrentVisibilityState,
this.platform.Characteristic.CurrentVisibilityState.SHOWN);
this.avService.addLinkedService(inputService);
});
this.presetsConfigured = true;
}
}
// #region Power Status
/**
* Returns the current state of the physical Extron unit itself.
* @returns The characterstic status of the Extron unit itself.
*/
async getOnOffState(): Promise<number> {
const muteResponse = await this.telnetCommand('WVM' + String.fromCharCode(13));
this.platform.log.debug('GET: Getting status of unit: %s', muteResponse);
// All zeroes indicates there are not active mutes; otherwise, mutes are in effect.
// Hence, if there are any other values in the response, we assume the unit is off.
const activeFromUnit = !muteResponse.split('').some(x => x === '1' || x === '2' || x === '3');
this.platform.log.debug('GET: Start Update Active State');
this.avService.updateCharacteristic(this.platform.Characteristic.Active, activeFromUnit);
return activeFromUnit ?
this.platform.Characteristic.Active.ACTIVE :
this.platform.Characteristic.Active.INACTIVE;
}
/**
* Sets the user-requested on or off state of the Extron unit.
* @param value The user requested on or off state.
*/
async setOnOffState(value: CharacteristicValue) {
this.platform.log.debug('SET: Setting status from HomeKit to: %s', value);
const command = (value === 1) ? '0*B' : '1*B';
const response = await this.telnetCommand(command);
this.platform.log.debug('SET: Received status of %s', response);
let active = this.platform.Characteristic.Active.INACTIVE;
if(response === 'Vmt0') {
active = this.platform.Characteristic.Active.ACTIVE;
this.platform.log.debug('SET: Unit is active');
}
this.avService.updateCharacteristic(this.platform.Characteristic.Active, active);
}
// #endregion Power Status
//#region Input Status
/**
* Updates the HomeKit staet of the preset that is currently active on the Extron
* unit itself.
*
* Note that when setting the current preset within HomeKit, the currentPreset value
* is decremented to handle 0- and 1-based arrays appropriately.
*/
async updatePresetStatus() {
// Preset "0" is the last set preset #, so query it to get the current state.
//const extronPreset = await this.getPreset(0);
const currentPreset = await this.telnetCommand('W0*1*1VC' + String.fromCharCode(13));
const currentExtronVideoPreset = parseInt(currentPreset.split(' ')[0]);
if(currentExtronVideoPreset !== this.currentPreset) {
this.currentPreset = currentExtronVideoPreset;
this.avService.updateCharacteristic(this.platform.Characteristic.ActiveIdentifier, --this.currentPreset);
}
}
/**
* Handles swiching of presets on Extron unit implemented using the HomeKit InputService.
* Will throw out errors if an out-of-bounds number is specified for an index that does
* not exist.
* @param value The HomeKit-specified input number to switch to.
*/
async changeInput(value: number) {
const newValue = value + 1;
this.platform.log.info('set Active Identifier => setNewValue: ' + newValue);
try {
const response = await this.telnetCommand(newValue + '.');
const responseIndex = newValue < 10 ? '0' + newValue : newValue.toString();
if(response === 'Rpr' + responseIndex) {
this.platform.log.info('Switched to preset ' + newValue + ': got response ' + response);
this.currentPreset = newValue;
this.avService.updateCharacteristic(this.platform.Characteristic.ActiveIdentifier, value);
} else {
switch(response) {
case 'E11':
this.platform.log.info('Preset number %s is out of range of this unit', newValue);
break;
default:
this.platform.log.info('Response does not match: %s with a string length of ', response, response.length);
}
}
} catch(error) {
this.platform.log.error('Error: ' + error);
}
}
//#endregion Input Status
//#region Lock Status
/**
* Returns the current panel lock status from the unit itself.
* @returns The current panel lock status.
*/
async updateLockStatus() : Promise<CharacteristicValue> {
const response = await this.telnetCommand('X');
if(response === '0') {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNSECURED);
this.lockService.updateCharacteristic(this.platform.Characteristic.LockTargetState,
this.platform.Characteristic.LockTargetState.UNSECURED);
return this.platform.Characteristic.LockCurrentState.UNSECURED;
} | random_line_split | |
platformAccessory.ts | .updateInterval !== undefined) {
this.updateInterval = this.platform.config.updateInterval * 1000;
}
this.accessory.getService(this.platform.Service.AccessoryInformation)!
.setCharacteristic(this.platform.Characteristic.Manufacturer, 'Extron, Inc.')
.setCharacteristic(this.platform.Characteristic.Model, 'Crosspoint ULTRA 88 HVA')
.setCharacteristic(this.platform.Characteristic.SerialNumber, this.platform.config.serialNumber);
this.avService = this.accessory.getService(this.platform.Service.Television)
|| this.accessory.addService(this.platform.Service.Television);
this.setupPresets();
const uuid = this.platform.api.hap.uuid.generate('homebridge:extron-matrix-switch' + accessory.context.device.displayName);
this.accessory.UUID = uuid;
this.accessory.category = this.platform.api.hap.Categories.AUDIO_RECEIVER;
this.avService.setCharacteristic(this.platform.Characteristic.Name, 'Extron');
this.avService.setCharacteristic(this.platform.Characteristic.SleepDiscoveryMode,
this.platform.Characteristic.SleepDiscoveryMode.ALWAYS_DISCOVERABLE);
this.avService.getCharacteristic(this.platform.Characteristic.Active)
.onSet(this.setOnOffState.bind(this))
.onGet(this.getOnOffState.bind(this));
this.getOnOffState();
this.updatePresetStatus();
this.avService.getCharacteristic(this.platform.Characteristic.ActiveIdentifier)
.onSet(async (value) => {
// the value will be the value you set for the Identifier Characteristic
// on the Input Source service that was selected - see input sources below.
await this.changeInput(parseInt(value.toString()));
});
// set the service name, this is what is displayed as the default name on the Home app
// in this example we are using the name we stored in the `accessory.context` in the `discoverDevices` method.
this.avService.setCharacteristic(this.platform.Characteristic.Name, accessory.context.device.displayName);
// This is required to be implemented, so currently it does not do anything useful.
this.avService.getCharacteristic(this.platform.Characteristic.RemoteKey)
.onSet((newValue) => {
switch(newValue) {
default: {
break;
}
}
});
// Setup lock
this.platform.config.lockLevel === 'level1' ? '1X' : '2X';
this.lockService = this.accessory.getService(this.platform.Service.LockMechanism)
|| this.accessory.addService(this.platform.Service.LockMechanism);
this.lockService.setCharacteristic(this.platform.Characteristic.Name, accessory.context.device.displayName);
this.lockService.getCharacteristic(this.platform.Characteristic.LockCurrentState)
.onGet(this.getCurrentLockState.bind(this));
this.lockService.getCharacteristic(this.platform.Characteristic.LockTargetState)
.onSet(this.setTargetLockState.bind(this))
.onGet(this.getCurrentLockState.bind(this));
this.updateLockStatus();
// Set interval to keep HomeKit updated on the current state of power, input states,
// and lock status.
setInterval(async () => {
await this.getOnOffState();
await this.updatePresetStatus();
await this.updateLockStatus();
}, this.updateInterval);
this.platform.api.publishExternalAccessories(PLUGIN_NAME, [this.accessory]);
}
/**
* Setup all presets from the plugin configuration. Note, currently all presets are setup
* as Component (YPbPr) video inputs, but this does not appear to make a difference in
* Apple's Home.app at this time.
*
* Note that TypeScript/JavaScript and HomeKit 0-based arrays, but for user-convenience
* any labelled inputs are 1-based.
*/
setupPresets() {
if(!this.presetsConfigured) {
const presets = this.platform.config.presets as ExtronPreset[];
presets.forEach(async (name, i) => {
const inputService = this.accessory.getService('input' + i) ||
this.accessory.addService(this.platform.Service.InputSource, 'input' + i, name.name);
this.platform.log.info('Adding input %s as number %s', name.name, i);
inputService
.setCharacteristic(this.platform.Characteristic.Identifier, i)
.setCharacteristic(this.platform.Characteristic.ConfiguredName, name.name)
.setCharacteristic(this.platform.Characteristic.Name, name.name)
.setCharacteristic(this.platform.Characteristic.IsConfigured, this.platform.Characteristic.IsConfigured.CONFIGURED)
.setCharacteristic(this.platform.Characteristic.InputSourceType, this.platform.Characteristic.InputSourceType.COMPONENT_VIDEO)
.setCharacteristic(this.platform.Characteristic.CurrentVisibilityState,
this.platform.Characteristic.CurrentVisibilityState.SHOWN);
this.avService.addLinkedService(inputService);
});
this.presetsConfigured = true;
}
}
// #region Power Status
/**
* Returns the current state of the physical Extron unit itself.
* @returns The characterstic status of the Extron unit itself.
*/
async | (): Promise<number> {
const muteResponse = await this.telnetCommand('WVM' + String.fromCharCode(13));
this.platform.log.debug('GET: Getting status of unit: %s', muteResponse);
// All zeroes indicates there are not active mutes; otherwise, mutes are in effect.
// Hence, if there are any other values in the response, we assume the unit is off.
const activeFromUnit = !muteResponse.split('').some(x => x === '1' || x === '2' || x === '3');
this.platform.log.debug('GET: Start Update Active State');
this.avService.updateCharacteristic(this.platform.Characteristic.Active, activeFromUnit);
return activeFromUnit ?
this.platform.Characteristic.Active.ACTIVE :
this.platform.Characteristic.Active.INACTIVE;
}
/**
* Sets the user-requested on or off state of the Extron unit.
* @param value The user requested on or off state.
*/
async setOnOffState(value: CharacteristicValue) {
this.platform.log.debug('SET: Setting status from HomeKit to: %s', value);
const command = (value === 1) ? '0*B' : '1*B';
const response = await this.telnetCommand(command);
this.platform.log.debug('SET: Received status of %s', response);
let active = this.platform.Characteristic.Active.INACTIVE;
if(response === 'Vmt0') {
active = this.platform.Characteristic.Active.ACTIVE;
this.platform.log.debug('SET: Unit is active');
}
this.avService.updateCharacteristic(this.platform.Characteristic.Active, active);
}
// #endregion Power Status
//#region Input Status
/**
* Updates the HomeKit staet of the preset that is currently active on the Extron
* unit itself.
*
* Note that when setting the current preset within HomeKit, the currentPreset value
* is decremented to handle 0- and 1-based arrays appropriately.
*/
async updatePresetStatus() {
// Preset "0" is the last set preset #, so query it to get the current state.
//const extronPreset = await this.getPreset(0);
const currentPreset = await this.telnetCommand('W0*1*1VC' + String.fromCharCode(13));
const currentExtronVideoPreset = parseInt(currentPreset.split(' ')[0]);
if(currentExtronVideoPreset !== this.currentPreset) {
this.currentPreset = currentExtronVideoPreset;
this.avService.updateCharacteristic(this.platform.Characteristic.ActiveIdentifier, --this.currentPreset);
}
}
/**
* Handles swiching of presets on Extron unit implemented using the HomeKit InputService.
* Will throw out errors if an out-of-bounds number is specified for an index that does
* not exist.
* @param value The HomeKit-specified input number to switch to.
*/
async changeInput(value: number) {
const newValue = value + 1;
this.platform.log.info('set Active Identifier => setNewValue: ' + newValue);
try {
const response = await this.telnetCommand(newValue + '.');
const responseIndex = newValue < 10 ? '0' + newValue : newValue.toString();
if(response === 'Rpr' + responseIndex) {
this.platform.log.info('Switched to preset ' + newValue + ': got response ' + response);
this.currentPreset = newValue;
this.avService.updateCharacteristic(this.platform.Characteristic.ActiveIdentifier, value);
} else {
switch(response) {
case 'E11':
this.platform.log.info('Preset number %s is out of range of this unit', newValue);
break;
default:
this.platform.log.info('Response does not match: %s with a string length of ', response, response.length);
}
}
} catch(error) {
this.platform.log.error('Error: ' + error);
}
}
//#endregion Input Status
//#region Lock Status
/**
* Returns the current panel lock status from the unit itself.
* @returns The current panel lock status.
*/
async updateLockStatus() : Promise<CharacteristicValue> {
const response = await this.telnetCommand('X');
if(response === '0') {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNSECURED);
this.lockService.updateCharacteristic(this.platform.Characteristic.LockTargetState,
this.platform.Characteristic.LockTargetState.UNSECURED);
return this.platform.Characteristic.LockCurrentState.UNSECURED;
} | getOnOffState | identifier_name |
platformAccessory.ts | this.getOnOffState();
this.updatePresetStatus();
this.avService.getCharacteristic(this.platform.Characteristic.ActiveIdentifier)
.onSet(async (value) => {
// the value will be the value you set for the Identifier Characteristic
// on the Input Source service that was selected - see input sources below.
await this.changeInput(parseInt(value.toString()));
});
// set the service name, this is what is displayed as the default name on the Home app
// in this example we are using the name we stored in the `accessory.context` in the `discoverDevices` method.
this.avService.setCharacteristic(this.platform.Characteristic.Name, accessory.context.device.displayName);
// This is required to be implemented, so currently it does not do anything useful.
this.avService.getCharacteristic(this.platform.Characteristic.RemoteKey)
.onSet((newValue) => {
switch(newValue) {
default: {
break;
}
}
});
// Setup lock
this.platform.config.lockLevel === 'level1' ? '1X' : '2X';
this.lockService = this.accessory.getService(this.platform.Service.LockMechanism)
|| this.accessory.addService(this.platform.Service.LockMechanism);
this.lockService.setCharacteristic(this.platform.Characteristic.Name, accessory.context.device.displayName);
this.lockService.getCharacteristic(this.platform.Characteristic.LockCurrentState)
.onGet(this.getCurrentLockState.bind(this));
this.lockService.getCharacteristic(this.platform.Characteristic.LockTargetState)
.onSet(this.setTargetLockState.bind(this))
.onGet(this.getCurrentLockState.bind(this));
this.updateLockStatus();
// Set interval to keep HomeKit updated on the current state of power, input states,
// and lock status.
setInterval(async () => {
await this.getOnOffState();
await this.updatePresetStatus();
await this.updateLockStatus();
}, this.updateInterval);
this.platform.api.publishExternalAccessories(PLUGIN_NAME, [this.accessory]);
}
/**
* Setup all presets from the plugin configuration. Note, currently all presets are setup
* as Component (YPbPr) video inputs, but this does not appear to make a difference in
* Apple's Home.app at this time.
*
* Note that TypeScript/JavaScript and HomeKit 0-based arrays, but for user-convenience
* any labelled inputs are 1-based.
*/
setupPresets() {
if(!this.presetsConfigured) {
const presets = this.platform.config.presets as ExtronPreset[];
presets.forEach(async (name, i) => {
const inputService = this.accessory.getService('input' + i) ||
this.accessory.addService(this.platform.Service.InputSource, 'input' + i, name.name);
this.platform.log.info('Adding input %s as number %s', name.name, i);
inputService
.setCharacteristic(this.platform.Characteristic.Identifier, i)
.setCharacteristic(this.platform.Characteristic.ConfiguredName, name.name)
.setCharacteristic(this.platform.Characteristic.Name, name.name)
.setCharacteristic(this.platform.Characteristic.IsConfigured, this.platform.Characteristic.IsConfigured.CONFIGURED)
.setCharacteristic(this.platform.Characteristic.InputSourceType, this.platform.Characteristic.InputSourceType.COMPONENT_VIDEO)
.setCharacteristic(this.platform.Characteristic.CurrentVisibilityState,
this.platform.Characteristic.CurrentVisibilityState.SHOWN);
this.avService.addLinkedService(inputService);
});
this.presetsConfigured = true;
}
}
// #region Power Status
/**
* Returns the current state of the physical Extron unit itself.
* @returns The characterstic status of the Extron unit itself.
*/
async getOnOffState(): Promise<number> {
const muteResponse = await this.telnetCommand('WVM' + String.fromCharCode(13));
this.platform.log.debug('GET: Getting status of unit: %s', muteResponse);
// All zeroes indicates there are not active mutes; otherwise, mutes are in effect.
// Hence, if there are any other values in the response, we assume the unit is off.
const activeFromUnit = !muteResponse.split('').some(x => x === '1' || x === '2' || x === '3');
this.platform.log.debug('GET: Start Update Active State');
this.avService.updateCharacteristic(this.platform.Characteristic.Active, activeFromUnit);
return activeFromUnit ?
this.platform.Characteristic.Active.ACTIVE :
this.platform.Characteristic.Active.INACTIVE;
}
/**
* Sets the user-requested on or off state of the Extron unit.
* @param value The user requested on or off state.
*/
async setOnOffState(value: CharacteristicValue) {
this.platform.log.debug('SET: Setting status from HomeKit to: %s', value);
const command = (value === 1) ? '0*B' : '1*B';
const response = await this.telnetCommand(command);
this.platform.log.debug('SET: Received status of %s', response);
let active = this.platform.Characteristic.Active.INACTIVE;
if(response === 'Vmt0') {
active = this.platform.Characteristic.Active.ACTIVE;
this.platform.log.debug('SET: Unit is active');
}
this.avService.updateCharacteristic(this.platform.Characteristic.Active, active);
}
// #endregion Power Status
//#region Input Status
/**
* Updates the HomeKit staet of the preset that is currently active on the Extron
* unit itself.
*
* Note that when setting the current preset within HomeKit, the currentPreset value
* is decremented to handle 0- and 1-based arrays appropriately.
*/
async updatePresetStatus() {
// Preset "0" is the last set preset #, so query it to get the current state.
//const extronPreset = await this.getPreset(0);
const currentPreset = await this.telnetCommand('W0*1*1VC' + String.fromCharCode(13));
const currentExtronVideoPreset = parseInt(currentPreset.split(' ')[0]);
if(currentExtronVideoPreset !== this.currentPreset) {
this.currentPreset = currentExtronVideoPreset;
this.avService.updateCharacteristic(this.platform.Characteristic.ActiveIdentifier, --this.currentPreset);
}
}
/**
* Handles swiching of presets on Extron unit implemented using the HomeKit InputService.
* Will throw out errors if an out-of-bounds number is specified for an index that does
* not exist.
* @param value The HomeKit-specified input number to switch to.
*/
async changeInput(value: number) {
const newValue = value + 1;
this.platform.log.info('set Active Identifier => setNewValue: ' + newValue);
try {
const response = await this.telnetCommand(newValue + '.');
const responseIndex = newValue < 10 ? '0' + newValue : newValue.toString();
if(response === 'Rpr' + responseIndex) {
this.platform.log.info('Switched to preset ' + newValue + ': got response ' + response);
this.currentPreset = newValue;
this.avService.updateCharacteristic(this.platform.Characteristic.ActiveIdentifier, value);
} else {
switch(response) {
case 'E11':
this.platform.log.info('Preset number %s is out of range of this unit', newValue);
break;
default:
this.platform.log.info('Response does not match: %s with a string length of ', response, response.length);
}
}
} catch(error) {
this.platform.log.error('Error: ' + error);
}
}
//#endregion Input Status
//#region Lock Status
/**
* Returns the current panel lock status from the unit itself.
* @returns The current panel lock status.
*/
async updateLockStatus() : Promise<CharacteristicValue> {
const response = await this.telnetCommand('X');
if(response === '0') {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNSECURED);
this.lockService.updateCharacteristic(this.platform.Characteristic.LockTargetState,
this.platform.Characteristic.LockTargetState.UNSECURED);
return this.platform.Characteristic.LockCurrentState.UNSECURED;
} else {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.SECURED);
this.lockService.updateCharacteristic(this.platform.Characteristic.LockTargetState,
this.platform.Characteristic.LockTargetState.SECURED);
return this.platform.Characteristic.LockCurrentState.SECURED;
}
}
/**
* Sets the user-requested lock status.
* @param value The target value of what the lock should be.
*/
async setTargetLockState(value: CharacteristicValue) {
if(value === this.platform.Characteristic.LockTargetState.SECURED) {
const response = await this.telnetCommand(this.lockingCode);
if(response[3] === this.lockingCode[0]) {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.SECURED);
this.platform.log.info('Lock returned a response of %s', response);
} else | {
this.lockService.updateCharacteristic(this.platform.Characteristic.LockCurrentState,
this.platform.Characteristic.LockCurrentState.UNKNOWN);
this.platform.log.debug('Locking response was %s, Expected Exe%s', response, this.lockingCode[0]);
} | conditional_block | |
registry.ts | configuration
*
* Throws an error in case the configuration is incorrect
* FIXME(tonyo): rewrite this with JSON schemas
*
* @param checksums Raw configuration
*/
protected castChecksums(checksums: any): ChecksumEntry[] {
if (!checksums) {
return [];
}
if (!(checksums instanceof Array)) {
throw new ConfigurationError(
'Invalid type of "checksums": should be an array'
);
}
const resultChecksums: ChecksumEntry[] = [];
checksums.forEach(item => {
if (typeof item !== 'object' || !item.algorithm || !item.format) {
throw new ConfigurationError(
`Invalid checksum type: ${JSON.stringify(item)}`
);
}
// FIXME(tonyo): this is ugly as hell :(
// This all has to be replaced with JSON schema
if (
!(Object as any).values(HashAlgorithm).includes(item.algorithm) ||
!(Object as any).values(HashOutputFormat).includes(item.format)
) {
throw new ConfigurationError(
`Invalid checksum attributes: ${JSON.stringify(item)}`
);
}
resultChecksums.push({ algorithm: item.algorithm, format: item.format });
});
return resultChecksums;
}
/**
* Extracts Registry target options from the raw configuration
*/
public getRegistryConfig(): RegistryConfig {
const registryType = this.config.type;
if (
[RegistryPackageType.APP, RegistryPackageType.SDK].indexOf(
registryType
) === -1
) {
throw new ConfigurationError(
`Invalid registry type specified: "${registryType}"`
);
}
let urlTemplate;
if (registryType === RegistryPackageType.APP) {
urlTemplate = this.config.urlTemplate;
if (urlTemplate && typeof urlTemplate !== 'string') {
throw new ConfigurationError(
`Invalid "urlTemplate" specified: ${urlTemplate}`
);
}
}
const releaseConfig = this.config.config;
if (!releaseConfig) {
throw new ConfigurationError(
'Cannot find configuration dictionary for release registry'
);
}
const canonicalName = releaseConfig.canonical;
if (!canonicalName) {
throw new ConfigurationError(
'Canonical name not found in the configuration'
);
}
const linkPrereleases = this.config.linkPrereleases || false;
if (typeof linkPrereleases !== 'boolean') {
throw new ConfigurationError('Invlaid type of "linkPrereleases"');
}
const checksums = this.castChecksums(this.config.checksums);
const onlyIfPresentStr = this.config.onlyIfPresent || undefined;
let onlyIfPresent;
if (onlyIfPresentStr) {
if (typeof onlyIfPresentStr !== 'string') {
throw new ConfigurationError('Invalid type of "onlyIfPresent"');
}
onlyIfPresent = stringToRegexp(onlyIfPresentStr);
}
return {
canonicalName,
checksums,
linkPrereleases,
onlyIfPresent,
registryRemote: DEFAULT_REGISTRY_REMOTE,
type: registryType,
urlTemplate,
};
}
/**
* Creates a symlink, overwriting the existing one
*
* @param target Target path
* @param newFile Path to the new symlink
*/
public forceSymlink(target: string, newFile: string): void {
if (fs.existsSync(newFile)) {
fs.unlinkSync(newFile);
}
fs.symlinkSync(target, newFile);
}
/**
* Create symbolic links to the new version file
*
* "latest.json" link is not updated if the new version is "older" (e.g., it's
* a patch release for an older major version).
*
* @param versionFilePath Path to the new version file
* @param newVersion The new version
* @param oldVersion The previous latest version
*/
public createSymlinks(
versionFilePath: string,
newVersion: string,
oldVersion?: string
): void {
const parsedNewVersion = parseVersion(newVersion) || undefined;
if (!parsedNewVersion) {
throw new ConfigurationError(
`Cannot parse version: "${parsedNewVersion}"`
);
}
const parsedOldVersion =
(oldVersion ? parseVersion(oldVersion) : undefined) || undefined;
const baseVersionName = path.basename(versionFilePath);
const packageDir = path.dirname(versionFilePath);
// link latest, but only if the new version is "newer"
if (
parsedOldVersion &&
!versionGreaterOrEqualThan(parsedNewVersion, parsedOldVersion)
) {
logger.warn(
`Not updating the latest version file: current version is "${oldVersion}", new version is "${newVersion}"`
);
} else {
logger.debug(
`Changing symlink for "latest.json" from version "${oldVersion}" to "${newVersion}"`
);
this.forceSymlink(baseVersionName, path.join(packageDir, 'latest.json'));
}
// link major
const majorVersionLink = `${parsedNewVersion.major}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, majorVersionLink));
// link minor
const minorVersionLink = `${parsedNewVersion.major}.${parsedNewVersion.minor}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, minorVersionLink));
}
/**
* Returns the path to the SDK, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The SDK's canonical name
* @returns The SDK path
*/
public getSdkPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
return [registryDir, 'packages'].concat(packageDirs).join(path.sep);
}
/**
* Returns the path to the app, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
* @returns The app path
*/
public getAppPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
if (packageDirs[0] !== 'app') {
throw new ConfigurationError(
`Invalid canonical entry for an app: ${canonical}`
);
}
return [registryDir, 'apps'].concat(packageDirs.slice(1)).join(path.sep);
}
/**
* Returns the path to the package from its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
*/
public getPackageDirPath(registryDir: string, canonical: string): string |
/**
* Adds file URLs to the manifest
*
* URL template is taken from "urlTemplate" configuration argument
*
* FIXME(tonyo): LEGACY function, left for compatibility, replaced by addFilesData
*
* @param manifest Package manifest
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addFileLinks(
manifest: { [key: string]: any },
version: string,
revision: string
): Promise<void> {
if (!this.registryConfig.urlTemplate) {
return;
}
const artifacts = await this.getArtifactsForRevision(revision);
if (artifacts.length === 0) {
logger.warn('No artifacts found, not adding any links to the manifest');
return;
}
const fileUrls: { [_: string]: string } = {};
for (const artifact of artifacts) {
fileUrls[artifact.filename] = renderTemplateSafe(
this.registryConfig.urlTemplate,
{
file: artifact.filename,
revision,
version,
}
);
}
logger.debug(
`Writing file urls to the manifest, files found: ${artifacts.length}`
);
manifest.file_urls = fileUrls;
}
/**
* Extends the artifact entry with additional information
*
* Information and checksums and download URLs are added here
*
* @param artifact Artifact
* @param version The new version
* @param revision Git commit SHA to be published
*
*/
public async getArtifactData(
artifact: RemoteArtifact,
version: string,
revision: string
): Promise<any> {
const artifactData: any = {};
if (this.registryConfig.urlTemplate) {
artifactData.url = renderTemplateSafe(this.registryConfig.urlTemplate, {
file: artifact.filename,
revision,
version,
});
}
if (this.registryConfig.checksums.length > 0) {
const fileChecksums: { [key: string]: string } = {};
for (const checksumType of this.registryConfig.checksums) {
const { algorithm, format } = checksumType;
const checksum = await this.artifactProvider.getChecksum(
artifact,
algorithm,
| {
if (this.registryConfig.type === RegistryPackageType.SDK) {
return this.getSdkPackagePath(registryDir, canonical);
} else if (this.registryConfig.type === RegistryPackageType.APP) {
return this.getAppPackagePath(registryDir, canonical);
} else {
throw new ConfigurationError(
`Unknown registry package type: ${this.registryConfig.type}`
);
}
} | identifier_body |
registry.ts | configuration
*
* Throws an error in case the configuration is incorrect
* FIXME(tonyo): rewrite this with JSON schemas
*
* @param checksums Raw configuration
*/
protected | (checksums: any): ChecksumEntry[] {
if (!checksums) {
return [];
}
if (!(checksums instanceof Array)) {
throw new ConfigurationError(
'Invalid type of "checksums": should be an array'
);
}
const resultChecksums: ChecksumEntry[] = [];
checksums.forEach(item => {
if (typeof item !== 'object' || !item.algorithm || !item.format) {
throw new ConfigurationError(
`Invalid checksum type: ${JSON.stringify(item)}`
);
}
// FIXME(tonyo): this is ugly as hell :(
// This all has to be replaced with JSON schema
if (
!(Object as any).values(HashAlgorithm).includes(item.algorithm) ||
!(Object as any).values(HashOutputFormat).includes(item.format)
) {
throw new ConfigurationError(
`Invalid checksum attributes: ${JSON.stringify(item)}`
);
}
resultChecksums.push({ algorithm: item.algorithm, format: item.format });
});
return resultChecksums;
}
/**
* Extracts Registry target options from the raw configuration
*/
public getRegistryConfig(): RegistryConfig {
const registryType = this.config.type;
if (
[RegistryPackageType.APP, RegistryPackageType.SDK].indexOf(
registryType
) === -1
) {
throw new ConfigurationError(
`Invalid registry type specified: "${registryType}"`
);
}
let urlTemplate;
if (registryType === RegistryPackageType.APP) {
urlTemplate = this.config.urlTemplate;
if (urlTemplate && typeof urlTemplate !== 'string') {
throw new ConfigurationError(
`Invalid "urlTemplate" specified: ${urlTemplate}`
);
}
}
const releaseConfig = this.config.config;
if (!releaseConfig) {
throw new ConfigurationError(
'Cannot find configuration dictionary for release registry'
);
}
const canonicalName = releaseConfig.canonical;
if (!canonicalName) {
throw new ConfigurationError(
'Canonical name not found in the configuration'
);
}
const linkPrereleases = this.config.linkPrereleases || false;
if (typeof linkPrereleases !== 'boolean') {
throw new ConfigurationError('Invlaid type of "linkPrereleases"');
}
const checksums = this.castChecksums(this.config.checksums);
const onlyIfPresentStr = this.config.onlyIfPresent || undefined;
let onlyIfPresent;
if (onlyIfPresentStr) {
if (typeof onlyIfPresentStr !== 'string') {
throw new ConfigurationError('Invalid type of "onlyIfPresent"');
}
onlyIfPresent = stringToRegexp(onlyIfPresentStr);
}
return {
canonicalName,
checksums,
linkPrereleases,
onlyIfPresent,
registryRemote: DEFAULT_REGISTRY_REMOTE,
type: registryType,
urlTemplate,
};
}
/**
* Creates a symlink, overwriting the existing one
*
* @param target Target path
* @param newFile Path to the new symlink
*/
public forceSymlink(target: string, newFile: string): void {
if (fs.existsSync(newFile)) {
fs.unlinkSync(newFile);
}
fs.symlinkSync(target, newFile);
}
/**
* Create symbolic links to the new version file
*
* "latest.json" link is not updated if the new version is "older" (e.g., it's
* a patch release for an older major version).
*
* @param versionFilePath Path to the new version file
* @param newVersion The new version
* @param oldVersion The previous latest version
*/
public createSymlinks(
versionFilePath: string,
newVersion: string,
oldVersion?: string
): void {
const parsedNewVersion = parseVersion(newVersion) || undefined;
if (!parsedNewVersion) {
throw new ConfigurationError(
`Cannot parse version: "${parsedNewVersion}"`
);
}
const parsedOldVersion =
(oldVersion ? parseVersion(oldVersion) : undefined) || undefined;
const baseVersionName = path.basename(versionFilePath);
const packageDir = path.dirname(versionFilePath);
// link latest, but only if the new version is "newer"
if (
parsedOldVersion &&
!versionGreaterOrEqualThan(parsedNewVersion, parsedOldVersion)
) {
logger.warn(
`Not updating the latest version file: current version is "${oldVersion}", new version is "${newVersion}"`
);
} else {
logger.debug(
`Changing symlink for "latest.json" from version "${oldVersion}" to "${newVersion}"`
);
this.forceSymlink(baseVersionName, path.join(packageDir, 'latest.json'));
}
// link major
const majorVersionLink = `${parsedNewVersion.major}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, majorVersionLink));
// link minor
const minorVersionLink = `${parsedNewVersion.major}.${parsedNewVersion.minor}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, minorVersionLink));
}
/**
* Returns the path to the SDK, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The SDK's canonical name
* @returns The SDK path
*/
public getSdkPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
return [registryDir, 'packages'].concat(packageDirs).join(path.sep);
}
/**
* Returns the path to the app, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
* @returns The app path
*/
public getAppPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
if (packageDirs[0] !== 'app') {
throw new ConfigurationError(
`Invalid canonical entry for an app: ${canonical}`
);
}
return [registryDir, 'apps'].concat(packageDirs.slice(1)).join(path.sep);
}
/**
* Returns the path to the package from its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
*/
public getPackageDirPath(registryDir: string, canonical: string): string {
if (this.registryConfig.type === RegistryPackageType.SDK) {
return this.getSdkPackagePath(registryDir, canonical);
} else if (this.registryConfig.type === RegistryPackageType.APP) {
return this.getAppPackagePath(registryDir, canonical);
} else {
throw new ConfigurationError(
`Unknown registry package type: ${this.registryConfig.type}`
);
}
}
/**
* Adds file URLs to the manifest
*
* URL template is taken from "urlTemplate" configuration argument
*
* FIXME(tonyo): LEGACY function, left for compatibility, replaced by addFilesData
*
* @param manifest Package manifest
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addFileLinks(
manifest: { [key: string]: any },
version: string,
revision: string
): Promise<void> {
if (!this.registryConfig.urlTemplate) {
return;
}
const artifacts = await this.getArtifactsForRevision(revision);
if (artifacts.length === 0) {
logger.warn('No artifacts found, not adding any links to the manifest');
return;
}
const fileUrls: { [_: string]: string } = {};
for (const artifact of artifacts) {
fileUrls[artifact.filename] = renderTemplateSafe(
this.registryConfig.urlTemplate,
{
file: artifact.filename,
revision,
version,
}
);
}
logger.debug(
`Writing file urls to the manifest, files found: ${artifacts.length}`
);
manifest.file_urls = fileUrls;
}
/**
* Extends the artifact entry with additional information
*
* Information and checksums and download URLs are added here
*
* @param artifact Artifact
* @param version The new version
* @param revision Git commit SHA to be published
*
*/
public async getArtifactData(
artifact: RemoteArtifact,
version: string,
revision: string
): Promise<any> {
const artifactData: any = {};
if (this.registryConfig.urlTemplate) {
artifactData.url = renderTemplateSafe(this.registryConfig.urlTemplate, {
file: artifact.filename,
revision,
version,
});
}
if (this.registryConfig.checksums.length > 0) {
const fileChecksums: { [key: string]: string } = {};
for (const checksumType of this.registryConfig.checksums) {
const { algorithm, format } = checksumType;
const checksum = await this.artifactProvider.getChecksum(
artifact,
algorithm,
| castChecksums | identifier_name |
registry.ts | configuration
*
* Throws an error in case the configuration is incorrect
* FIXME(tonyo): rewrite this with JSON schemas
*
* @param checksums Raw configuration
*/
protected castChecksums(checksums: any): ChecksumEntry[] {
if (!checksums) {
return [];
}
if (!(checksums instanceof Array)) {
throw new ConfigurationError(
'Invalid type of "checksums": should be an array'
);
}
const resultChecksums: ChecksumEntry[] = [];
checksums.forEach(item => {
if (typeof item !== 'object' || !item.algorithm || !item.format) {
throw new ConfigurationError(
`Invalid checksum type: ${JSON.stringify(item)}`
);
}
// FIXME(tonyo): this is ugly as hell :(
// This all has to be replaced with JSON schema
if (
!(Object as any).values(HashAlgorithm).includes(item.algorithm) ||
!(Object as any).values(HashOutputFormat).includes(item.format)
) {
throw new ConfigurationError(
`Invalid checksum attributes: ${JSON.stringify(item)}`
);
}
resultChecksums.push({ algorithm: item.algorithm, format: item.format });
});
return resultChecksums;
}
/**
* Extracts Registry target options from the raw configuration
*/
public getRegistryConfig(): RegistryConfig {
const registryType = this.config.type;
if (
[RegistryPackageType.APP, RegistryPackageType.SDK].indexOf(
registryType
) === -1
) |
let urlTemplate;
if (registryType === RegistryPackageType.APP) {
urlTemplate = this.config.urlTemplate;
if (urlTemplate && typeof urlTemplate !== 'string') {
throw new ConfigurationError(
`Invalid "urlTemplate" specified: ${urlTemplate}`
);
}
}
const releaseConfig = this.config.config;
if (!releaseConfig) {
throw new ConfigurationError(
'Cannot find configuration dictionary for release registry'
);
}
const canonicalName = releaseConfig.canonical;
if (!canonicalName) {
throw new ConfigurationError(
'Canonical name not found in the configuration'
);
}
const linkPrereleases = this.config.linkPrereleases || false;
if (typeof linkPrereleases !== 'boolean') {
throw new ConfigurationError('Invlaid type of "linkPrereleases"');
}
const checksums = this.castChecksums(this.config.checksums);
const onlyIfPresentStr = this.config.onlyIfPresent || undefined;
let onlyIfPresent;
if (onlyIfPresentStr) {
if (typeof onlyIfPresentStr !== 'string') {
throw new ConfigurationError('Invalid type of "onlyIfPresent"');
}
onlyIfPresent = stringToRegexp(onlyIfPresentStr);
}
return {
canonicalName,
checksums,
linkPrereleases,
onlyIfPresent,
registryRemote: DEFAULT_REGISTRY_REMOTE,
type: registryType,
urlTemplate,
};
}
/**
* Creates a symlink, overwriting the existing one
*
* @param target Target path
* @param newFile Path to the new symlink
*/
public forceSymlink(target: string, newFile: string): void {
if (fs.existsSync(newFile)) {
fs.unlinkSync(newFile);
}
fs.symlinkSync(target, newFile);
}
/**
* Create symbolic links to the new version file
*
* "latest.json" link is not updated if the new version is "older" (e.g., it's
* a patch release for an older major version).
*
* @param versionFilePath Path to the new version file
* @param newVersion The new version
* @param oldVersion The previous latest version
*/
public createSymlinks(
versionFilePath: string,
newVersion: string,
oldVersion?: string
): void {
const parsedNewVersion = parseVersion(newVersion) || undefined;
if (!parsedNewVersion) {
throw new ConfigurationError(
`Cannot parse version: "${parsedNewVersion}"`
);
}
const parsedOldVersion =
(oldVersion ? parseVersion(oldVersion) : undefined) || undefined;
const baseVersionName = path.basename(versionFilePath);
const packageDir = path.dirname(versionFilePath);
// link latest, but only if the new version is "newer"
if (
parsedOldVersion &&
!versionGreaterOrEqualThan(parsedNewVersion, parsedOldVersion)
) {
logger.warn(
`Not updating the latest version file: current version is "${oldVersion}", new version is "${newVersion}"`
);
} else {
logger.debug(
`Changing symlink for "latest.json" from version "${oldVersion}" to "${newVersion}"`
);
this.forceSymlink(baseVersionName, path.join(packageDir, 'latest.json'));
}
// link major
const majorVersionLink = `${parsedNewVersion.major}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, majorVersionLink));
// link minor
const minorVersionLink = `${parsedNewVersion.major}.${parsedNewVersion.minor}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, minorVersionLink));
}
/**
* Returns the path to the SDK, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The SDK's canonical name
* @returns The SDK path
*/
public getSdkPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
return [registryDir, 'packages'].concat(packageDirs).join(path.sep);
}
/**
* Returns the path to the app, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
* @returns The app path
*/
public getAppPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
if (packageDirs[0] !== 'app') {
throw new ConfigurationError(
`Invalid canonical entry for an app: ${canonical}`
);
}
return [registryDir, 'apps'].concat(packageDirs.slice(1)).join(path.sep);
}
/**
* Returns the path to the package from its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
*/
public getPackageDirPath(registryDir: string, canonical: string): string {
if (this.registryConfig.type === RegistryPackageType.SDK) {
return this.getSdkPackagePath(registryDir, canonical);
} else if (this.registryConfig.type === RegistryPackageType.APP) {
return this.getAppPackagePath(registryDir, canonical);
} else {
throw new ConfigurationError(
`Unknown registry package type: ${this.registryConfig.type}`
);
}
}
/**
* Adds file URLs to the manifest
*
* URL template is taken from "urlTemplate" configuration argument
*
* FIXME(tonyo): LEGACY function, left for compatibility, replaced by addFilesData
*
* @param manifest Package manifest
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addFileLinks(
manifest: { [key: string]: any },
version: string,
revision: string
): Promise<void> {
if (!this.registryConfig.urlTemplate) {
return;
}
const artifacts = await this.getArtifactsForRevision(revision);
if (artifacts.length === 0) {
logger.warn('No artifacts found, not adding any links to the manifest');
return;
}
const fileUrls: { [_: string]: string } = {};
for (const artifact of artifacts) {
fileUrls[artifact.filename] = renderTemplateSafe(
this.registryConfig.urlTemplate,
{
file: artifact.filename,
revision,
version,
}
);
}
logger.debug(
`Writing file urls to the manifest, files found: ${artifacts.length}`
);
manifest.file_urls = fileUrls;
}
/**
* Extends the artifact entry with additional information
*
* Information and checksums and download URLs are added here
*
* @param artifact Artifact
* @param version The new version
* @param revision Git commit SHA to be published
*
*/
public async getArtifactData(
artifact: RemoteArtifact,
version: string,
revision: string
): Promise<any> {
const artifactData: any = {};
if (this.registryConfig.urlTemplate) {
artifactData.url = renderTemplateSafe(this.registryConfig.urlTemplate, {
file: artifact.filename,
revision,
version,
});
}
if (this.registryConfig.checksums.length > 0) {
const fileChecksums: { [key: string]: string } = {};
for (const checksumType of this.registryConfig.checksums) {
const { algorithm, format } = checksumType;
const checksum = await this.artifactProvider.getChecksum(
artifact,
algorithm,
| {
throw new ConfigurationError(
`Invalid registry type specified: "${registryType}"`
);
} | conditional_block |
registry.ts | s configuration
*
* Throws an error in case the configuration is incorrect
* FIXME(tonyo): rewrite this with JSON schemas
*
* @param checksums Raw configuration
*/
protected castChecksums(checksums: any): ChecksumEntry[] {
if (!checksums) {
return [];
}
if (!(checksums instanceof Array)) {
throw new ConfigurationError(
'Invalid type of "checksums": should be an array'
);
}
const resultChecksums: ChecksumEntry[] = [];
checksums.forEach(item => {
if (typeof item !== 'object' || !item.algorithm || !item.format) {
throw new ConfigurationError( | `Invalid checksum type: ${JSON.stringify(item)}`
);
}
// FIXME(tonyo): this is ugly as hell :(
// This all has to be replaced with JSON schema
if (
!(Object as any).values(HashAlgorithm).includes(item.algorithm) ||
!(Object as any).values(HashOutputFormat).includes(item.format)
) {
throw new ConfigurationError(
`Invalid checksum attributes: ${JSON.stringify(item)}`
);
}
resultChecksums.push({ algorithm: item.algorithm, format: item.format });
});
return resultChecksums;
}
/**
* Extracts Registry target options from the raw configuration
*/
public getRegistryConfig(): RegistryConfig {
const registryType = this.config.type;
if (
[RegistryPackageType.APP, RegistryPackageType.SDK].indexOf(
registryType
) === -1
) {
throw new ConfigurationError(
`Invalid registry type specified: "${registryType}"`
);
}
let urlTemplate;
if (registryType === RegistryPackageType.APP) {
urlTemplate = this.config.urlTemplate;
if (urlTemplate && typeof urlTemplate !== 'string') {
throw new ConfigurationError(
`Invalid "urlTemplate" specified: ${urlTemplate}`
);
}
}
const releaseConfig = this.config.config;
if (!releaseConfig) {
throw new ConfigurationError(
'Cannot find configuration dictionary for release registry'
);
}
const canonicalName = releaseConfig.canonical;
if (!canonicalName) {
throw new ConfigurationError(
'Canonical name not found in the configuration'
);
}
const linkPrereleases = this.config.linkPrereleases || false;
if (typeof linkPrereleases !== 'boolean') {
throw new ConfigurationError('Invlaid type of "linkPrereleases"');
}
const checksums = this.castChecksums(this.config.checksums);
const onlyIfPresentStr = this.config.onlyIfPresent || undefined;
let onlyIfPresent;
if (onlyIfPresentStr) {
if (typeof onlyIfPresentStr !== 'string') {
throw new ConfigurationError('Invalid type of "onlyIfPresent"');
}
onlyIfPresent = stringToRegexp(onlyIfPresentStr);
}
return {
canonicalName,
checksums,
linkPrereleases,
onlyIfPresent,
registryRemote: DEFAULT_REGISTRY_REMOTE,
type: registryType,
urlTemplate,
};
}
/**
* Creates a symlink, overwriting the existing one
*
* @param target Target path
* @param newFile Path to the new symlink
*/
public forceSymlink(target: string, newFile: string): void {
if (fs.existsSync(newFile)) {
fs.unlinkSync(newFile);
}
fs.symlinkSync(target, newFile);
}
/**
* Create symbolic links to the new version file
*
* "latest.json" link is not updated if the new version is "older" (e.g., it's
* a patch release for an older major version).
*
* @param versionFilePath Path to the new version file
* @param newVersion The new version
* @param oldVersion The previous latest version
*/
public createSymlinks(
versionFilePath: string,
newVersion: string,
oldVersion?: string
): void {
const parsedNewVersion = parseVersion(newVersion) || undefined;
if (!parsedNewVersion) {
throw new ConfigurationError(
`Cannot parse version: "${parsedNewVersion}"`
);
}
const parsedOldVersion =
(oldVersion ? parseVersion(oldVersion) : undefined) || undefined;
const baseVersionName = path.basename(versionFilePath);
const packageDir = path.dirname(versionFilePath);
// link latest, but only if the new version is "newer"
if (
parsedOldVersion &&
!versionGreaterOrEqualThan(parsedNewVersion, parsedOldVersion)
) {
logger.warn(
`Not updating the latest version file: current version is "${oldVersion}", new version is "${newVersion}"`
);
} else {
logger.debug(
`Changing symlink for "latest.json" from version "${oldVersion}" to "${newVersion}"`
);
this.forceSymlink(baseVersionName, path.join(packageDir, 'latest.json'));
}
// link major
const majorVersionLink = `${parsedNewVersion.major}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, majorVersionLink));
// link minor
const minorVersionLink = `${parsedNewVersion.major}.${parsedNewVersion.minor}.json`;
this.forceSymlink(baseVersionName, path.join(packageDir, minorVersionLink));
}
/**
* Returns the path to the SDK, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The SDK's canonical name
* @returns The SDK path
*/
public getSdkPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
return [registryDir, 'packages'].concat(packageDirs).join(path.sep);
}
/**
* Returns the path to the app, given its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
* @returns The app path
*/
public getAppPackagePath(registryDir: string, canonical: string): string {
const packageDirs = parseCanonical(canonical);
if (packageDirs[0] !== 'app') {
throw new ConfigurationError(
`Invalid canonical entry for an app: ${canonical}`
);
}
return [registryDir, 'apps'].concat(packageDirs.slice(1)).join(path.sep);
}
/**
* Returns the path to the package from its canonical name
*
* @param registryDir The path to the local registry
* @param canonical The app's canonical name
*/
public getPackageDirPath(registryDir: string, canonical: string): string {
if (this.registryConfig.type === RegistryPackageType.SDK) {
return this.getSdkPackagePath(registryDir, canonical);
} else if (this.registryConfig.type === RegistryPackageType.APP) {
return this.getAppPackagePath(registryDir, canonical);
} else {
throw new ConfigurationError(
`Unknown registry package type: ${this.registryConfig.type}`
);
}
}
/**
* Adds file URLs to the manifest
*
* URL template is taken from "urlTemplate" configuration argument
*
* FIXME(tonyo): LEGACY function, left for compatibility, replaced by addFilesData
*
* @param manifest Package manifest
* @param version The new version
* @param revision Git commit SHA to be published
*/
public async addFileLinks(
manifest: { [key: string]: any },
version: string,
revision: string
): Promise<void> {
if (!this.registryConfig.urlTemplate) {
return;
}
const artifacts = await this.getArtifactsForRevision(revision);
if (artifacts.length === 0) {
logger.warn('No artifacts found, not adding any links to the manifest');
return;
}
const fileUrls: { [_: string]: string } = {};
for (const artifact of artifacts) {
fileUrls[artifact.filename] = renderTemplateSafe(
this.registryConfig.urlTemplate,
{
file: artifact.filename,
revision,
version,
}
);
}
logger.debug(
`Writing file urls to the manifest, files found: ${artifacts.length}`
);
manifest.file_urls = fileUrls;
}
/**
* Extends the artifact entry with additional information
*
* Information and checksums and download URLs are added here
*
* @param artifact Artifact
* @param version The new version
* @param revision Git commit SHA to be published
*
*/
public async getArtifactData(
artifact: RemoteArtifact,
version: string,
revision: string
): Promise<any> {
const artifactData: any = {};
if (this.registryConfig.urlTemplate) {
artifactData.url = renderTemplateSafe(this.registryConfig.urlTemplate, {
file: artifact.filename,
revision,
version,
});
}
if (this.registryConfig.checksums.length > 0) {
const fileChecksums: { [key: string]: string } = {};
for (const checksumType of this.registryConfig.checksums) {
const { algorithm, format } = checksumType;
const checksum = await this.artifactProvider.getChecksum(
artifact,
algorithm,
| random_line_split | |
Ngrams.py | _twitter_rw(question):
import re
twitter_rw = ['rt', 'dm']
cleansed_question = ' ' + question + ' '
for rw in twitter_rw:
cleansed_question = re.sub(r'\W(?:%s)\W' % rw, ' ', cleansed_question, flags=re.IGNORECASE)
return cleansed_question.strip()
def _remove_urls(question):
import re
cleansed_question = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' ', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_smilies(question):
import re
smileys = [': /+','>:[+', ':-(+', ':(+', ':-c', ':-<+', ':-[+', ':{', '>.>', ':o',
'<.<', '>.<', ':\\+', ':\\\'(+', ':\\\'(+', ':\'(+', ';\'(+', '>:\\', '>:/+',
':-/+', ':-.', ':\\+', '=/+', '=\\+', ':S', ':/+', ':$', '\*cry\*', '-_-',
';-)+', ':D', ';D', ';;)+', ':-x', ':x', '^-^', 'x)', 'è_é','haha','hehe', 'u_u',
'0:-)', ':o)', ':*', ':-*', '8-}', '=P~', '>:D<', '<:-P', ';))',
'xD', '>.<','u_u',
'>:]+', ':3', ':c', ':>', '=]', '8)+', '=)+', ':}+', ':^)+', '>:D', ':-D', '8-D', 'x-D', 'X-D', '=-D', '=D', '=-3', '8-)+',
'>;]+', '*-)+', '*)+', ';-]+', ';]+', ';^)+', '>:P', ':-P', ':P', 'X-P', ':-p', '=p', ':-Þ', ':Þ', ':-b', '>:o', '>:O', ':-O',
'°o°', '°O°', 'o_O', 'o.O', '8-0', 'o_O', 'x)+', '^^+', ':=)', '((?::|;|=)(?:-)?(?:\)|D|P))']
regex = '|'.join(['(\s%s\s)' % i for i in [re.escape(smiley) for smiley in smileys]])
cleansed_question = re.sub(regex, ' ', question, flags=re.MULTILINE|re.I)
return cleansed_question.strip()
def _remove_hashtags(question):
import re
question = question.strip()
hash_tags = [tag for tag in question.split() if tag.startswith('#')]
if not hash_tags:
return question
# check the position of the hash tags within the question and decide whether to clean and keep it
# or to remove it
import sys
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove all hash tags being after the last question mark
for tag in hash_tags:
tag_pos = question.find(tag)
if tag_pos > last_question_mark_pos:
# completely remove hashtag if they are after the last qm or just at the beginning of the tweet
question = question.replace(tag, ' ')
question = question.strip()
last_question_mark_pos = question.rfind('?')
else:
# just remove the hashtag sign
question = question.replace(tag, tag[1:])
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove extra spaces and return
pattern = re.compile(r'\s+')
question = re.sub(pattern, ' ', question.strip())
return question
def find_urls(content, scheme):
current = content.find(scheme)
while current != -1:
end = content.find(' ', current)
if end == -1:
end = co | while content[end-1] in [',', '.', ';', ':', '?']:
end -= 1
yield (current, end)
current = end
current = content.find(scheme, current)
class SmileyParser:
def __init__(self):
import re
western_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
':-)', ':)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}', ':^)',
':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D', '=-3', '=3', 'B^D',
':-))',
'>:[', ':-(', ':(', ':-c', ':c', ':-<', ':<', ':-[', ':[', ':{',
':-||', ':@', '>:(',
":'-(", ":'(",
":'-)", ":')",
'D:<', 'D:', 'D8', 'D;', 'D=', 'DX', 'v.v', "D-':",
'>:O', ':-O', ':O', '8-0',
':*', ':^*',
';-)', ';)', '*-)', '*)', ';-]', ';]', ';D', ';^)', ':-,',
'>:P', ':-P', ':P', 'X-P', 'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b',
'>:\\', '>:/', ':-/', ':-.', ':/', ':\\', '=/', '=\\', ':L', '=L', ':S', '>.<',
':|', ':-|',
':$',
':-X', ':X', ':-#', ':#',
'O:-)', '0:-3', '0:3', '0:-)', '0:)', '0;^)',
'>:)', '>;)', '>:-)',
'}:-)', '}:)', '3:-)', '3:)',
'o/\o', '^5', '>_>^', '^<_<',
'|;-)', '|-O',
':-&', ':&',
'#-)',
'%-)', '%)',
':-###..', ':###..',
'\\o/', '/o\\'
'*\\0/*',
# additional
':=)', ';=)', ';))', ':))', ';;)'
]
eastern_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
]
unicode_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
u'\u2639', u'\u263a', u'\u263b',
u'\u1f600', u'\u1f601', u'\u1f602', u'\u1f603', u'\u1f604', u'\u1f605', u'\u1f606', u'\u1f607',
u'\u1f608', u'\u1f609', u'\u1f60a', u'\u1f60b', u'\u1f60c', u'\u1f60d', u'\u1f60e', u'\u1f60f',
u'\u1f610', u'\u1f611', u'\u1f612', u'\u1f603', u'\u1f614', u'\u1f615', u'\u1f616', u'\u1f617',
u'\u1f618', u'\u1f619', u'\u1f61a', u'\u1f60b', u'\u1f61c', u'\u1f61d', u'\u1f61e', u'\u1f61f',
u'\u1f620', u'\u1f621', u'\u1f622', u'\u1f603', u'\u1f624', u'\u1f625', u'\u1f626', u'\u1f627',
u'\u1f628', u'\u1f629', u'\u1f62a', u'\u1f60b', u'\u1f62c', u'\u1f62d', u'\u1f62e', u'\u1f62f',
u'\u1f630', u'\u1f631', u'\u1f632', u'\u1f63 | ntent.find('\n', current)
if end == -1:
end = len(content)
| conditional_block |
Ngrams.py | (question):
import re
twitter_rw = ['rt', 'dm']
cleansed_question = ' ' + question + ' '
for rw in twitter_rw:
cleansed_question = re.sub(r'\W(?:%s)\W' % rw, ' ', cleansed_question, flags=re.IGNORECASE)
return cleansed_question.strip()
def _remove_urls(question):
import re
cleansed_question = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ' ', question, flags=re.MULTILINE)
return cleansed_question.strip()
def _remove_smilies(question):
import re
smileys = [': /+','>:[+', ':-(+', ':(+', ':-c', ':-<+', ':-[+', ':{', '>.>', ':o',
'<.<', '>.<', ':\\+', ':\\\'(+', ':\\\'(+', ':\'(+', ';\'(+', '>:\\', '>:/+',
':-/+', ':-.', ':\\+', '=/+', '=\\+', ':S', ':/+', ':$', '\*cry\*', '-_-',
';-)+', ':D', ';D', ';;)+', ':-x', ':x', '^-^', 'x)', 'è_é','haha','hehe', 'u_u',
'0:-)', ':o)', ':*', ':-*', '8-}', '=P~', '>:D<', '<:-P', ';))',
'xD', '>.<','u_u',
'>:]+', ':3', ':c', ':>', '=]', '8)+', '=)+', ':}+', ':^)+', '>:D', ':-D', '8-D', 'x-D', 'X-D', '=-D', '=D', '=-3', '8-)+',
'>;]+', '*-)+', '*)+', ';-]+', ';]+', ';^)+', '>:P', ':-P', ':P', 'X-P', ':-p', '=p', ':-Þ', ':Þ', ':-b', '>:o', '>:O', ':-O',
'°o°', '°O°', 'o_O', 'o.O', '8-0', 'o_O', 'x)+', '^^+', ':=)', '((?::|;|=)(?:-)?(?:\)|D|P))']
regex = '|'.join(['(\s%s\s)' % i for i in [re.escape(smiley) for smiley in smileys]])
cleansed_question = re.sub(regex, ' ', question, flags=re.MULTILINE|re.I)
return cleansed_question.strip()
def _remove_hashtags(question):
import re
question = question.strip()
hash_tags = [tag for tag in question.split() if tag.startswith('#')]
if not hash_tags:
return question
# check the position of the hash tags within the question and decide whether to clean and keep it
# or to remove it
import sys
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove all hash tags being after the last question mark
for tag in hash_tags:
tag_pos = question.find(tag)
if tag_pos > last_question_mark_pos:
# completely remove hashtag if they are after the last qm or just at the beginning of the tweet
question = question.replace(tag, ' ')
question = question.strip()
last_question_mark_pos = question.rfind('?')
else:
# just remove the hashtag sign
question = question.replace(tag, tag[1:])
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove extra spaces and return
pattern = re.compile(r'\s+')
question = re.sub(pattern, ' ', question.strip())
return question
def find_urls(content, scheme):
current = content.find(scheme)
while current != -1:
end = content.find(' ', current)
if end == -1:
end = content.find('\n', current)
if end == -1:
end = len(content)
while content[end-1] in [',', '.', ';', ':', '?']:
end -= 1
yield (current, end)
current = end
current = content.find(scheme, current)
class SmileyParser:
def __init__(self):
import re
western_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
':-)', ':)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}', ':^)',
':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D', '=-3', '=3', 'B^D',
':-))',
'>:[', ':-(', ':(', ':-c', ':c', ':-<', ':<', ':-[', ':[', ':{',
':-||', ':@', '>:(',
":'-(", ":'(",
":'-)", ":')",
'D:<', 'D:', 'D8', 'D;', 'D=', 'DX', 'v.v', "D-':",
'>:O', ':-O', ':O', '8-0',
':*', ':^*',
';-)', ';)', '*-)', '*)', ';-]', ';]', ';D', ';^)', ':-,',
'>:P', ':-P', ':P', 'X-P', 'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b',
'>:\\', '>:/', ':-/', ':-.', ':/', ':\\', '=/', '=\\', ':L', '=L', ':S', '>.<',
':|', ':-|',
':$',
':-X', ':X', ':-#', ':#',
'O:-)', '0:-3', '0:3', '0:-)', '0:)', '0;^)',
'>:)', '>;)', '>:-)',
'}:-)', '}:)', '3:-)', '3:)',
'o/\o', '^5', '>_>^', '^<_<',
'|;-)', '|-O',
':-&', ':&',
'#-)',
'%-)', '%)',
':-###..', ':###..',
'\\o/', '/o\\'
'*\\0/*',
# additional
':=)', ';=)', ';))', ':))', ';;)'
]
eastern_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
]
unicode_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
u'\u2639', u'\u263a', u'\u263b',
u'\u1f600', u'\u1f601', u'\u1f602', u'\u1f603', u'\u1f604', u'\u1f605', u'\u1f606', u'\u1f607',
u'\u1f608', u'\u1f609', u'\u1f60a', u'\u1f60b', u'\u1f60c', u'\u1f60d', u'\u1f60e', u'\u1f60f',
u'\u1f610', u'\u1f611', u'\u1f612', u'\u1f603', u'\u1f614', u'\u1f615', u'\u1f616', u'\u1f617',
u'\u1f618', u'\u1f619', u'\u1f61a', u'\u1f60b', u'\u1f61c', u'\u1f61d', u'\u1f61e', u'\u1f61f',
u'\u1f620', u'\u1f621', u'\u1f622', u'\u1f603', u'\u1f624', u'\u1f625', u'\u1f626', u'\u1f627',
u'\u1f628', u'\u1f629', u'\u1f62a', u'\u1f60b', u'\u1f62c', u'\u1f62d', u'\u1f62e', u'\u1f62f',
u'\u1f630', u'\u1f631', u'\u1f632', u'\u1f633 | _remove_twitter_rw | identifier_name | |
Ngrams.py | ',
'>;]+', '*-)+', '*)+', ';-]+', ';]+', ';^)+', '>:P', ':-P', ':P', 'X-P', ':-p', '=p', ':-Þ', ':Þ', ':-b', '>:o', '>:O', ':-O',
'°o°', '°O°', 'o_O', 'o.O', '8-0', 'o_O', 'x)+', '^^+', ':=)', '((?::|;|=)(?:-)?(?:\)|D|P))']
regex = '|'.join(['(\s%s\s)' % i for i in [re.escape(smiley) for smiley in smileys]])
cleansed_question = re.sub(regex, ' ', question, flags=re.MULTILINE|re.I)
return cleansed_question.strip()
def _remove_hashtags(question):
import re
question = question.strip()
hash_tags = [tag for tag in question.split() if tag.startswith('#')]
if not hash_tags:
return question
# check the position of the hash tags within the question and decide whether to clean and keep it
# or to remove it
import sys
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove all hash tags being after the last question mark
for tag in hash_tags:
tag_pos = question.find(tag)
if tag_pos > last_question_mark_pos:
# completely remove hashtag if they are after the last qm or just at the beginning of the tweet
question = question.replace(tag, ' ')
question = question.strip()
last_question_mark_pos = question.rfind('?')
else:
# just remove the hashtag sign
question = question.replace(tag, tag[1:])
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove extra spaces and return
pattern = re.compile(r'\s+')
question = re.sub(pattern, ' ', question.strip())
return question
def find_urls(content, scheme):
current = content.find(scheme)
while current != -1:
end = content.find(' ', current)
if end == -1:
end = content.find('\n', current)
if end == -1:
end = len(content)
while content[end-1] in [',', '.', ';', ':', '?']:
end -= 1
yield (current, end)
current = end
current = content.find(scheme, current)
class SmileyParser:
def __init__(self):
import re
western_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
':-)', ':)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}', ':^)',
':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D', '=-3', '=3', 'B^D',
':-))',
'>:[', ':-(', ':(', ':-c', ':c', ':-<', ':<', ':-[', ':[', ':{',
':-||', ':@', '>:(',
":'-(", ":'(",
":'-)", ":')",
'D:<', 'D:', 'D8', 'D;', 'D=', 'DX', 'v.v', "D-':",
'>:O', ':-O', ':O', '8-0',
':*', ':^*',
';-)', ';)', '*-)', '*)', ';-]', ';]', ';D', ';^)', ':-,',
'>:P', ':-P', ':P', 'X-P', 'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b',
'>:\\', '>:/', ':-/', ':-.', ':/', ':\\', '=/', '=\\', ':L', '=L', ':S', '>.<',
':|', ':-|',
':$',
':-X', ':X', ':-#', ':#',
'O:-)', '0:-3', '0:3', '0:-)', '0:)', '0;^)',
'>:)', '>;)', '>:-)',
'}:-)', '}:)', '3:-)', '3:)',
'o/\o', '^5', '>_>^', '^<_<',
'|;-)', '|-O',
':-&', ':&',
'#-)',
'%-)', '%)',
':-###..', ':###..',
'\\o/', '/o\\'
'*\\0/*',
# additional
':=)', ';=)', ';))', ':))', ';;)'
]
eastern_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
]
unicode_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
u'\u2639', u'\u263a', u'\u263b',
u'\u1f600', u'\u1f601', u'\u1f602', u'\u1f603', u'\u1f604', u'\u1f605', u'\u1f606', u'\u1f607',
u'\u1f608', u'\u1f609', u'\u1f60a', u'\u1f60b', u'\u1f60c', u'\u1f60d', u'\u1f60e', u'\u1f60f',
u'\u1f610', u'\u1f611', u'\u1f612', u'\u1f603', u'\u1f614', u'\u1f615', u'\u1f616', u'\u1f617',
u'\u1f618', u'\u1f619', u'\u1f61a', u'\u1f60b', u'\u1f61c', u'\u1f61d', u'\u1f61e', u'\u1f61f',
u'\u1f620', u'\u1f621', u'\u1f622', u'\u1f603', u'\u1f624', u'\u1f625', u'\u1f626', u'\u1f627',
u'\u1f628', u'\u1f629', u'\u1f62a', u'\u1f60b', u'\u1f62c', u'\u1f62d', u'\u1f62e', u'\u1f62f',
u'\u1f630', u'\u1f631', u'\u1f632', u'\u1f633', u'\u1f634', u'\u1f635', u'\u1f636', u'\u1f637',
u'\u1f638', u'\u1f639', u'\u1f63a', u'\u1f63b', u'\u1f63c', u'\u1f63d', u'\u1f63e', u'\u1f63f',
u'\u1f640', u'\u1f645', u'\u1f646', u'\u1f647',
u'\u1f648', u'\u1f649', u'\u1f64a', u'\u1f64b', u'\u1f64c', u'\u1f64d', u'\u1f64e', u'\u1f64f',
]
# try to generate rotated smileys
invert = {
# ltr to rtl
')' : '(',
'(' : ')',
']' : '[',
'[' : ']',
'{' : '}',
'}' : '{',
'<' : '>',
'>' : '<',
'/' : '\\',
'\\' : '/',
# does not change
'o' : 'o',
'x' : 'x',
'v' : 'v',
'0' : '0',
'8' : '8',
'|' : '|',
':' : ':',
';' : ';',
',' : ',',
"'" : "'",
'*' : '*',
'%' : '%'
}
def reverse_smiley(smiley):
output = []
for c in reversed(smiley):
if c in invert: | output.append(invert[c])
else:
return None
return ''.join(output)
| random_line_split | |
Ngrams.py | regex = '|'.join(['(\s%s\s)' % i for i in [re.escape(smiley) for smiley in smileys]])
cleansed_question = re.sub(regex, ' ', question, flags=re.MULTILINE|re.I)
return cleansed_question.strip()
def _remove_hashtags(question):
import re
question = question.strip()
hash_tags = [tag for tag in question.split() if tag.startswith('#')]
if not hash_tags:
return question
# check the position of the hash tags within the question and decide whether to clean and keep it
# or to remove it
import sys
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove all hash tags being after the last question mark
for tag in hash_tags:
tag_pos = question.find(tag)
if tag_pos > last_question_mark_pos:
# completely remove hashtag if they are after the last qm or just at the beginning of the tweet
question = question.replace(tag, ' ')
question = question.strip()
last_question_mark_pos = question.rfind('?')
else:
# just remove the hashtag sign
question = question.replace(tag, tag[1:])
last_question_mark_pos = sys.maxint if question.rfind('?') == -1 else question.rfind('?')
# remove extra spaces and return
pattern = re.compile(r'\s+')
question = re.sub(pattern, ' ', question.strip())
return question
def find_urls(content, scheme):
current = content.find(scheme)
while current != -1:
end = content.find(' ', current)
if end == -1:
end = content.find('\n', current)
if end == -1:
end = len(content)
while content[end-1] in [',', '.', ';', ':', '?']:
end -= 1
yield (current, end)
current = end
current = content.find(scheme, current)
class SmileyParser:
def __init__(self):
import re
western_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
':-)', ':)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}', ':^)',
':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D', '=-3', '=3', 'B^D',
':-))',
'>:[', ':-(', ':(', ':-c', ':c', ':-<', ':<', ':-[', ':[', ':{',
':-||', ':@', '>:(',
":'-(", ":'(",
":'-)", ":')",
'D:<', 'D:', 'D8', 'D;', 'D=', 'DX', 'v.v', "D-':",
'>:O', ':-O', ':O', '8-0',
':*', ':^*',
';-)', ';)', '*-)', '*)', ';-]', ';]', ';D', ';^)', ':-,',
'>:P', ':-P', ':P', 'X-P', 'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b',
'>:\\', '>:/', ':-/', ':-.', ':/', ':\\', '=/', '=\\', ':L', '=L', ':S', '>.<',
':|', ':-|',
':$',
':-X', ':X', ':-#', ':#',
'O:-)', '0:-3', '0:3', '0:-)', '0:)', '0;^)',
'>:)', '>;)', '>:-)',
'}:-)', '}:)', '3:-)', '3:)',
'o/\o', '^5', '>_>^', '^<_<',
'|;-)', '|-O',
':-&', ':&',
'#-)',
'%-)', '%)',
':-###..', ':###..',
'\\o/', '/o\\'
'*\\0/*',
# additional
':=)', ';=)', ';))', ':))', ';;)'
]
eastern_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
]
unicode_smileys = [
# from http://en.wikipedia.org/wiki/List_of_emoticons
u'\u2639', u'\u263a', u'\u263b',
u'\u1f600', u'\u1f601', u'\u1f602', u'\u1f603', u'\u1f604', u'\u1f605', u'\u1f606', u'\u1f607',
u'\u1f608', u'\u1f609', u'\u1f60a', u'\u1f60b', u'\u1f60c', u'\u1f60d', u'\u1f60e', u'\u1f60f',
u'\u1f610', u'\u1f611', u'\u1f612', u'\u1f603', u'\u1f614', u'\u1f615', u'\u1f616', u'\u1f617',
u'\u1f618', u'\u1f619', u'\u1f61a', u'\u1f60b', u'\u1f61c', u'\u1f61d', u'\u1f61e', u'\u1f61f',
u'\u1f620', u'\u1f621', u'\u1f622', u'\u1f603', u'\u1f624', u'\u1f625', u'\u1f626', u'\u1f627',
u'\u1f628', u'\u1f629', u'\u1f62a', u'\u1f60b', u'\u1f62c', u'\u1f62d', u'\u1f62e', u'\u1f62f',
u'\u1f630', u'\u1f631', u'\u1f632', u'\u1f633', u'\u1f634', u'\u1f635', u'\u1f636', u'\u1f637',
u'\u1f638', u'\u1f639', u'\u1f63a', u'\u1f63b', u'\u1f63c', u'\u1f63d', u'\u1f63e', u'\u1f63f',
u'\u1f640', u'\u1f645', u'\u1f646', u'\u1f647',
u'\u1f648', u'\u1f649', u'\u1f64a', u'\u1f64b', u'\u1f64c', u'\u1f64d', u'\u1f64e', u'\u1f64f',
]
# try to generate rotated smileys
invert = {
# ltr to rtl
')' : '(',
'(' : ')',
']' : '[',
'[' : ']',
'{' : '}',
'}' : '{',
'<' : '>',
'>' : '<',
'/' : '\\',
'\\' : '/',
# does not change
'o' : 'o',
'x' : 'x',
'v' : 'v',
'0' : '0',
'8' : '8',
'|' : '|',
':' : ':',
';' : ';',
',' : ',',
"'" : "'",
'*' : '*',
'%' : '%'
}
def reverse_smiley(smiley):
output = []
for c in reversed(smiley):
if c in invert:
output.append(invert[c])
else:
return None
return ''.join(output)
smileys = western_smileys + \
filter(lambda x:not x is None, [reverse_smiley(smiley) for smiley in western_smileys]) + \
eastern_smileys + \
unicode_smileys
self._regex = re.compile('|'.join(['(?:%s)' % i for i in [re.escape(smiley) for smiley in smileys]]), re.I | re.U)
def parse(self, content):
for i in | self._regex.finditer(content):
start, end = i.span()
yield (start, end)
def _ | identifier_body | |
index.d.ts | {
constructor(authTlsOpts: { certificatePath: string; privateKeyPath: string });
certificatePath: string;
privateKeyPath: string;
}
export class AuthenticationToken {
constructor(authTokenOpts: { token: string });
token: string;
}
export interface ClientOpts {
/**
* The connection URL for the Pulsar cluster.
*/
serviceUrl: string;
/**
* Configure the authentication provider.
* Default: No Authentication
*/
authentication?: AuthenticationTls | AuthenticationToken;
/**
* The timeout for Node.js client operations (creating producers, subscribing to and unsubscribing from topics).
* Retries will occur until this threshold is reached, at which point the operation will fail.
* Default: 30
*/
operationTimeoutSeconds?: number;
/**
* The number of threads to use for handling connections to Pulsar brokers.
* Default: 1
*/
ioThreads?: number;
/**
* The number of threads used by message listeners (consumers and readers).
* Default: 1
*/
messageListenerThreads?: number;
/**
* The number of concurrent lookup requests that can be sent on each broker connection.
* Setting a maximum helps to keep from overloading brokers.
* You should set values over the default only if the client needs to produce and/or subscribe to thousands of Pulsar topics.
* Default: 50000
*/
concurrentLookupRequest?: number;
/**
* The file path for the trusted TLS certificate.
*/
tlsTrustCertsFilePath?: string;
/**
* The boolean value of setup whether to enable TLS hostname verification.
* Default: false
*/
tlsValidateHostname?: boolean;
/**
* The boolean value of setup whether the Pulsar client accepts untrusted TLS certificate from broker.
* Default: false
*/
tlsAllowInsecureConnection?: boolean;
/**
* Interval between each stat info. Stats is activated with positive statsInterval. The value should be set to >= 1 second.
* Default: 600
*/
statsIntervalInSeconds?: number;
}
export class Client {
constructor(opts: ClientOpts);
createProducer(data: ProducerOpts): Promise<Producer>;
createReader(data: ReaderOpts): Promise<Reader>;
subscribe(data: SubscribeOpts): Promise<Consumer>;
close(): Promise<null>;
}
export class MessageId {
/**
* MessageId representing the earliest, or oldest available message stored in the topic.
*/
static earliest(): MessageId;
/**
* MessageId representing the latest, or last published message in the topic.
*/
static latest(): MessageId;
/**
* Deserialize a message id object from a Buffer.
* @param data
*/
static deserialize(data: Buffer): MessageId;
/**
* Serialize the message id into a Buffer for storing.
*/
serialize(): Buffer;
/**
* Get message id as String.
*/
toString(): string;
}
export class Message {
/**
* Getter method of topic name.
*/
getTopicName(): string;
/**
* Getter method of properties.
*/
getProperties(): MessageProperties;
/**
* Getter method of message data.
*/
getData(): Buffer;
/**
* Getter method of message id object.
*/
getMessageId(): MessageId;
/**
* Getter method of publish timestamp.
*/
getPublishTimestamp(): number;
/**
* Getter method of event timestamp.
*/
getEventTimestamp(): number;
/**
* Getter method of partition key.
*/
getPartitionKey(): string;
}
export interface ProducerOpts {
/**
* The Pulsar topic to which the producer will publish messages.
*/
topic: string;
/**
* A name for the producer. If you do not explicitly assign a name, Pulsar will automatically generate a globally unique name.
* If you choose to explicitly assign a name, it will need to be unique across all Pulsar clusters, otherwise the creation operation will throw an error.
*/
producerName?: string;
/**
* When publishing a message to a topic, the producer will wait for an acknowledgment from the responsible Pulsar broker.
* If a message is not acknowledged within the threshold set by this parameter, an error will be thrown. If you set sendTimeoutMs to -1,
* the timeout will be set to infinity (and thus removed). Removing the send timeout is recommended when using Pulsar's message de-duplication feature.
* Default: 30000
*/
sendTimeoutMs?: number;
/**
* The initial sequence ID of the message. When producer send message, add sequence ID to message. The ID is increased each time to send.
*/
initialSequenceId?: number;
/**
* The maximum size of the queue holding pending messages (i.e. messages waiting to receive an acknowledgment from the broker).
* By default, when the queue is full all calls to the send method will fail unless blockIfQueueFull is set to true.
* Default: 1000
*/
maxPendingMessages?: number;
/**
* The maximum size of the sum of partition's pending queue.
* Default: 50000
*/
maxPendingMessagesAcrossPartitions?: number;
/**
* If set to true, the producer's send method will wait when the outgoing message queue is full rather than failing and throwing an error
* (the size of that queue is dictated by the maxPendingMessages parameter); if set to false (the default),send operations will fail and
* throw a error when the queue is full.
* Default: false
*/
blockIfQueueFull?: boolean;
/**
* The message routing logic (for producers on partitioned topics). This logic is applied only when no key is set on messages. The available
* options are: round robin (RoundRobinDistribution), or publishing all messages to a single partition (UseSinglePartition).
* Default: UseSinglePartition
*/
messageRoutingMode?: MessageRoutingModes;
/**
* The hashing function that determines the partition on which a particular message is published (partitioned topics only).
* The available options are: JavaStringHash (the equivalent of String.hashCode() in Java), Murmur3_32Hash (applies the Murmur3 hashing function),
* or BoostHash (applies the hashing function from C++'s Boost library).
* Default: BoostHash
*/
hashingScheme?: HashingScheme;
/**
* The message data compression type used by the producer. The available options are LZ4, and Zlib.
* Default: No Compression
*/
compressionType?: CompressionType;
/**
* If set to true, the producer send message as batch.
* Default: true
*/
batchingEnabled?: boolean;
/**
* The maximum time of delay sending message in batching.
* Default: 10
*/
batchingMaxPublishDelayMs?: number;
/**
* The maximum size of sending message in each time of batching.
* Default: 1000
*/
batchingMaxMessages?: number;
/**
* The metadata of producer.
*/
properties?: MessageProperties;
}
export interface ProducerMessage {
/**
* The actual data payload of the message.
*/
data: Buffer;
/**
* A Object for any application-specific metadata attached to the message.
*/
properties?: MessageProperties;
/**
* The timestamp associated with the message.
*/
eventTimestamp?: number;
/**
* The sequence ID of the message.
*/
sequenceId?: number;
/**
* The optional key associated with the message (particularly useful for things like topic compaction).
*/
partitionKey?: string;
/**
* The clusters to which this message will be replicated. Pulsar brokers handle message replication automatically;
* you should only change this setting if you want to override the broker default.
*/
replicationClusters?: string[];
}
export class Producer {
/**
* Publishes a message to the producer's topic. When the message is successfully acknowledged by the Pulsar broker,
* or an error will be thrown, the Promise object run executor function.
* @param message Message to be published.
*/
send(message: ProducerMessage): Promise<null>;
/**
* Sends message from send queue to Pulser broker. When the message is successfully acknowledged by the Pulsar broker,
* or an error will be thrown, the Promise object run executor function.
*/
flush(): Promise<null>;
/**
* Closes the producer and releases all resources allocated to it. If close() is called then no more messages will be accepted from the publisher.
* This method will return Promise object, and when all pending publish requests have been persisted by Pulsar then run executor function.
* If an error is thrown, no pending writes will be retried.
*/
close(): Promise<null>;
}
export interface SubscribeOpts {
/**
* The Pulsar topic on which the consumer will establish a subscription and listen for messages.
*/
topic: string;
/**
* The subscription name for this consumer.
*/
subscription: string;
/**
* Available options are Exclusive, Shared, and Failover.
* Default: Exclusive
*/
subscriptionType?: SubscriptionType;
/**
* Acknowledge timeout in milliseconds.
* Default: 0
*/
ackTimeoutMs?: number;
| AuthenticationTls | identifier_name | |
index.d.ts | , at which point the operation will fail.
* Default: 30
*/
operationTimeoutSeconds?: number;
/**
* The number of threads to use for handling connections to Pulsar brokers.
* Default: 1
*/
ioThreads?: number;
/**
* The number of threads used by message listeners (consumers and readers).
* Default: 1
*/
messageListenerThreads?: number;
/**
* The number of concurrent lookup requests that can be sent on each broker connection.
* Setting a maximum helps to keep from overloading brokers.
* You should set values over the default only if the client needs to produce and/or subscribe to thousands of Pulsar topics.
* Default: 50000
*/
concurrentLookupRequest?: number;
/**
* The file path for the trusted TLS certificate.
*/
tlsTrustCertsFilePath?: string;
/**
* The boolean value of setup whether to enable TLS hostname verification.
* Default: false
*/
tlsValidateHostname?: boolean;
/**
* The boolean value of setup whether the Pulsar client accepts untrusted TLS certificate from broker.
* Default: false |
/**
* Interval between each stat info. Stats is activated with positive statsInterval. The value should be set to >= 1 second.
* Default: 600
*/
statsIntervalInSeconds?: number;
}
export class Client {
constructor(opts: ClientOpts);
createProducer(data: ProducerOpts): Promise<Producer>;
createReader(data: ReaderOpts): Promise<Reader>;
subscribe(data: SubscribeOpts): Promise<Consumer>;
close(): Promise<null>;
}
export class MessageId {
/**
* MessageId representing the earliest, or oldest available message stored in the topic.
*/
static earliest(): MessageId;
/**
* MessageId representing the latest, or last published message in the topic.
*/
static latest(): MessageId;
/**
* Deserialize a message id object from a Buffer.
* @param data
*/
static deserialize(data: Buffer): MessageId;
/**
* Serialize the message id into a Buffer for storing.
*/
serialize(): Buffer;
/**
* Get message id as String.
*/
toString(): string;
}
export class Message {
/**
* Getter method of topic name.
*/
getTopicName(): string;
/**
* Getter method of properties.
*/
getProperties(): MessageProperties;
/**
* Getter method of message data.
*/
getData(): Buffer;
/**
* Getter method of message id object.
*/
getMessageId(): MessageId;
/**
* Getter method of publish timestamp.
*/
getPublishTimestamp(): number;
/**
* Getter method of event timestamp.
*/
getEventTimestamp(): number;
/**
* Getter method of partition key.
*/
getPartitionKey(): string;
}
export interface ProducerOpts {
/**
* The Pulsar topic to which the producer will publish messages.
*/
topic: string;
/**
* A name for the producer. If you do not explicitly assign a name, Pulsar will automatically generate a globally unique name.
* If you choose to explicitly assign a name, it will need to be unique across all Pulsar clusters, otherwise the creation operation will throw an error.
*/
producerName?: string;
/**
* When publishing a message to a topic, the producer will wait for an acknowledgment from the responsible Pulsar broker.
* If a message is not acknowledged within the threshold set by this parameter, an error will be thrown. If you set sendTimeoutMs to -1,
* the timeout will be set to infinity (and thus removed). Removing the send timeout is recommended when using Pulsar's message de-duplication feature.
* Default: 30000
*/
sendTimeoutMs?: number;
/**
* The initial sequence ID of the message. When producer send message, add sequence ID to message. The ID is increased each time to send.
*/
initialSequenceId?: number;
/**
* The maximum size of the queue holding pending messages (i.e. messages waiting to receive an acknowledgment from the broker).
* By default, when the queue is full all calls to the send method will fail unless blockIfQueueFull is set to true.
* Default: 1000
*/
maxPendingMessages?: number;
/**
* The maximum size of the sum of partition's pending queue.
* Default: 50000
*/
maxPendingMessagesAcrossPartitions?: number;
/**
* If set to true, the producer's send method will wait when the outgoing message queue is full rather than failing and throwing an error
* (the size of that queue is dictated by the maxPendingMessages parameter); if set to false (the default),send operations will fail and
* throw a error when the queue is full.
* Default: false
*/
blockIfQueueFull?: boolean;
/**
* The message routing logic (for producers on partitioned topics). This logic is applied only when no key is set on messages. The available
* options are: round robin (RoundRobinDistribution), or publishing all messages to a single partition (UseSinglePartition).
* Default: UseSinglePartition
*/
messageRoutingMode?: MessageRoutingModes;
/**
* The hashing function that determines the partition on which a particular message is published (partitioned topics only).
* The available options are: JavaStringHash (the equivalent of String.hashCode() in Java), Murmur3_32Hash (applies the Murmur3 hashing function),
* or BoostHash (applies the hashing function from C++'s Boost library).
* Default: BoostHash
*/
hashingScheme?: HashingScheme;
/**
* The message data compression type used by the producer. The available options are LZ4, and Zlib.
* Default: No Compression
*/
compressionType?: CompressionType;
/**
* If set to true, the producer send message as batch.
* Default: true
*/
batchingEnabled?: boolean;
/**
* The maximum time of delay sending message in batching.
* Default: 10
*/
batchingMaxPublishDelayMs?: number;
/**
* The maximum size of sending message in each time of batching.
* Default: 1000
*/
batchingMaxMessages?: number;
/**
* The metadata of producer.
*/
properties?: MessageProperties;
}
export interface ProducerMessage {
/**
* The actual data payload of the message.
*/
data: Buffer;
/**
* A Object for any application-specific metadata attached to the message.
*/
properties?: MessageProperties;
/**
* The timestamp associated with the message.
*/
eventTimestamp?: number;
/**
* The sequence ID of the message.
*/
sequenceId?: number;
/**
* The optional key associated with the message (particularly useful for things like topic compaction).
*/
partitionKey?: string;
/**
* The clusters to which this message will be replicated. Pulsar brokers handle message replication automatically;
* you should only change this setting if you want to override the broker default.
*/
replicationClusters?: string[];
}
export class Producer {
/**
* Publishes a message to the producer's topic. When the message is successfully acknowledged by the Pulsar broker,
* or an error will be thrown, the Promise object run executor function.
* @param message Message to be published.
*/
send(message: ProducerMessage): Promise<null>;
/**
* Sends message from send queue to Pulser broker. When the message is successfully acknowledged by the Pulsar broker,
* or an error will be thrown, the Promise object run executor function.
*/
flush(): Promise<null>;
/**
* Closes the producer and releases all resources allocated to it. If close() is called then no more messages will be accepted from the publisher.
* This method will return Promise object, and when all pending publish requests have been persisted by Pulsar then run executor function.
* If an error is thrown, no pending writes will be retried.
*/
close(): Promise<null>;
}
export interface SubscribeOpts {
/**
* The Pulsar topic on which the consumer will establish a subscription and listen for messages.
*/
topic: string;
/**
* The subscription name for this consumer.
*/
subscription: string;
/**
* Available options are Exclusive, Shared, and Failover.
* Default: Exclusive
*/
subscriptionType?: SubscriptionType;
/**
* Acknowledge timeout in milliseconds.
* Default: 0
*/
ackTimeoutMs?: number;
/**
* Sets the size of the consumer's receiver queue, i.e. the number of messages that can be accumulated by the consumer before the application calls receive.
* A value higher than the default could increase consumer throughput, though at the expense of more memory utilization.
* Default: 1000
*/
receiverQueueSize?: number;
/**
* Set the max total receiver queue size across partitions.
* This setting will be used to reduce the receiver queue size for individual partitions if the total exceeds this value.
* Default: 50000
*/
receiverQueueSizeAcrossPartitions?: number;
/**
* The name of consumer. Currently, failover | */
tlsAllowInsecureConnection?: boolean; | random_line_split |
main.rs | calculate_hash_proof(
self.index.clone(),
self.previus_hash.clone(),
self.timestamp.clone(),
self.data.clone(),
PROOF.clone(),
)
.0
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Transaction {
sender: String,
reciver: String,
amount: u64,
hash: Option<String>,
}
impl Transaction {
fn new(sender: Wallet, reciver: Wallet, amount: u64) -> Transaction {
let sender = sender.pub_key.clone();
let reciver = reciver.pub_key.clone();
let x = Transaction {
sender,
reciver,
amount,
hash: None,
};
calculate_hash_transaction(x)
}
}
fn calculate_hash_transaction(transaction: Transaction) -> Transaction {
use sha3::{Digest, Sha3_512};
let mut hasher = Sha3_512::new();
hasher.update(transaction.sender.clone());
hasher.update(transaction.reciver.clone());
hasher.update(transaction.amount.to_string().as_bytes());
let hash = format!("{:20x}", hasher.finalize());
let x = Transaction {
sender: transaction.sender,
reciver: transaction.reciver,
amount: transaction.amount,
hash: Some(hash),
};
x
}
impl Display for Transaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}-{:?}-{}", self.sender, self.reciver, self.amount)
}
}
trait Createblock {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Self;
}
impl Createblock for Block {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Block {
Block {
index,
previus_hash,
timestamp,
data,
hash,
proof: Some(proof),
}
}
}
trait Blockchain {
fn new() -> Self;
}
impl Blockchain for BlockChain {
fn new() -> BlockChain {
BlockChain {
blocks: Vec::new(),
pending_transactions: Vec::new(),
}
}
}
impl BlockChain {
fn add_block_thirst(&mut self, block: Block) {
self.blocks.push(block);
}
fn add_transaction(&mut self, transaction: Transaction) {
self.pending_transactions.push(transaction);
}
fn add_block(&mut self, data: Vec<Transaction>, proof: &str) {
let (calculate_hash, proof) = calculate_hash_proof(
self.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
self.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string(),
data.clone(),
proof,
);
self.add_block_thirst(Block {
index: self
.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
previus_hash: self
.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
timestamp: chrono::offset::Utc::now().timestamp_millis().to_string(),
data,
hash: calculate_hash,
proof: Some(proof),
})
}
fn get_pendding_transactions(&self) -> Vec<Transaction> {
self.pending_transactions.clone()
}
fn clear_pendding_transactions(&mut self) {
self.pending_transactions.clear();
}
fn is_good(&self) -> bool {
let blocksss = self.blocks.clone();
for x in 1..self.blocks.len() {
// println!("{} || {}", self.blocks[x].test_block(),self.blocks[x + 1].previus_hash);
let test = blocksss[x].test_block();
let prev = blocksss[x - 1].previus_hash.clone();
if test != prev {
// println!("||||||||||||||| {:?} |||||||||||||||||||| {:?} ||||||||||||||||||", self.blocks[x].test_block(), self.blocks[x + 1].previus_hash);
return false;
}
}
return true;
}
// funcion for mining pending transactions
fn mine(&mut self, proof: &str) {
let mut data = self.get_pendding_transactions();
if data.len() > 0 {
self.add_block(data, proof);
self.clear_pendding_transactions();
}
}
// how much money the user has using wallet
fn get_balance(&self, wallet: Wallet) -> u128 {
let mut balance: u128 = 0;
for x in self.blocks.clone() {
for y in x.data.clone() {
if y.sender == wallet.pub_key |
if y.reciver == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
}
}
println!("{}", balance);
balance
}
}
impl Display for Block {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {:?} {:?} {:?} {:?}",
self.index, self.previus_hash, self.timestamp, self.data, self.hash
)
}
}
fn calculate_hash_proof(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
proof: &str,
) -> (String, u128) {
use sha3::{Digest, Sha3_512};
let proof = proof.to_owned();
let mut hasher = Sha3_512::new();
let before = index.to_string().parse::<String>().unwrap()
+ &previus_hash
+ &format!("{:?}", timestamp)
+ &format!("{:?}", data);
hasher.update(before.as_bytes());
let steps: u128 = std::u128::MAX;
let mut i = 0;
for x in 0..steps {
if format!("{:02x}", hasher.clone().finalize())[..proof.len()] == proof {
println!(
"Mined! : {} difficulty: {}",
format!("{:02x}", hasher.clone().finalize()),
x
);
i = x;
break;
} else {
hasher.update(x.to_string().as_bytes());
}
}
(format!("{:02x}", hasher.finalize()), i)
}
fn main() {
let olek = generate_wallet();
let anna = generate_wallet();
let mut blockchin: BlockChain = Blockchain::new();
let s: Transaction = Transaction::new(olek.clone(), anna, 22);
let time = chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string();
let calc = calculate_hash_proof(0, "".to_string(), time.clone(), vec![s.clone()], PROOF);
let start: Block = Block::new(
0,
"".to_string(),
time,
vec![s.clone()],
calc.0,
Some(calc.1),
);
blockchin.add_block_thirst(start);
// end of starrt code
// let mut transactions = vec![];
// for x in 0..=33 {
// let a: Transaction = Transaction::new(x.to_string(), (x + 10).to_string(), x + 100);
// transactions.push(a);
// }
// for x in transactions {
// blockchin.add_transaction(x);
// }
// let mut transaction = vec![];
// for _ in 0..10 {
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let one = generate_wallet();
// // sleep for 1 second
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let two = generate_wallet();
// let s: Transaction = Transaction::new(one, two, 100);
// blockchin.add_transaction(s.clone());
// transaction.push(s);
// }
blockchin.mine(PROOF);
// create_pending(&mut blockchin, PROOF);
let json = serde_json::to_string_pretty(&blockchin).unwrap();
println!("{}", json);
std::fs::write("json.json", json).expect("Unable to write file");
if blockchin.is_good() {
println!("XD")
}
let nic = generate_wallet();
// check user balance
println!(" is {}", blockchin.get_balance(olek));
// blockchin
// let contents =
// std::fs::read_to_string("json.json").expect("Something went wrong reading the file");
// let bc: BlockChain = serde_json::from_str(&contents).unwrap();
// if bc.is_good() | {
let amount = y.amount as u128;
balance += amount;
} | conditional_block |
main.rs | calculate_hash_proof(
self.index.clone(),
self.previus_hash.clone(),
self.timestamp.clone(),
self.data.clone(),
PROOF.clone(),
)
.0
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct | {
sender: String,
reciver: String,
amount: u64,
hash: Option<String>,
}
impl Transaction {
fn new(sender: Wallet, reciver: Wallet, amount: u64) -> Transaction {
let sender = sender.pub_key.clone();
let reciver = reciver.pub_key.clone();
let x = Transaction {
sender,
reciver,
amount,
hash: None,
};
calculate_hash_transaction(x)
}
}
fn calculate_hash_transaction(transaction: Transaction) -> Transaction {
use sha3::{Digest, Sha3_512};
let mut hasher = Sha3_512::new();
hasher.update(transaction.sender.clone());
hasher.update(transaction.reciver.clone());
hasher.update(transaction.amount.to_string().as_bytes());
let hash = format!("{:20x}", hasher.finalize());
let x = Transaction {
sender: transaction.sender,
reciver: transaction.reciver,
amount: transaction.amount,
hash: Some(hash),
};
x
}
impl Display for Transaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}-{:?}-{}", self.sender, self.reciver, self.amount)
}
}
trait Createblock {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Self;
}
impl Createblock for Block {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Block {
Block {
index,
previus_hash,
timestamp,
data,
hash,
proof: Some(proof),
}
}
}
trait Blockchain {
fn new() -> Self;
}
impl Blockchain for BlockChain {
fn new() -> BlockChain {
BlockChain {
blocks: Vec::new(),
pending_transactions: Vec::new(),
}
}
}
impl BlockChain {
fn add_block_thirst(&mut self, block: Block) {
self.blocks.push(block);
}
fn add_transaction(&mut self, transaction: Transaction) {
self.pending_transactions.push(transaction);
}
fn add_block(&mut self, data: Vec<Transaction>, proof: &str) {
let (calculate_hash, proof) = calculate_hash_proof(
self.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
self.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string(),
data.clone(),
proof,
);
self.add_block_thirst(Block {
index: self
.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
previus_hash: self
.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
timestamp: chrono::offset::Utc::now().timestamp_millis().to_string(),
data,
hash: calculate_hash,
proof: Some(proof),
})
}
fn get_pendding_transactions(&self) -> Vec<Transaction> {
self.pending_transactions.clone()
}
fn clear_pendding_transactions(&mut self) {
self.pending_transactions.clear();
}
fn is_good(&self) -> bool {
let blocksss = self.blocks.clone();
for x in 1..self.blocks.len() {
// println!("{} || {}", self.blocks[x].test_block(),self.blocks[x + 1].previus_hash);
let test = blocksss[x].test_block();
let prev = blocksss[x - 1].previus_hash.clone();
if test != prev {
// println!("||||||||||||||| {:?} |||||||||||||||||||| {:?} ||||||||||||||||||", self.blocks[x].test_block(), self.blocks[x + 1].previus_hash);
return false;
}
}
return true;
}
// funcion for mining pending transactions
fn mine(&mut self, proof: &str) {
let mut data = self.get_pendding_transactions();
if data.len() > 0 {
self.add_block(data, proof);
self.clear_pendding_transactions();
}
}
// how much money the user has using wallet
fn get_balance(&self, wallet: Wallet) -> u128 {
let mut balance: u128 = 0;
for x in self.blocks.clone() {
for y in x.data.clone() {
if y.sender == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
if y.reciver == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
}
}
println!("{}", balance);
balance
}
}
impl Display for Block {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {:?} {:?} {:?} {:?}",
self.index, self.previus_hash, self.timestamp, self.data, self.hash
)
}
}
fn calculate_hash_proof(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
proof: &str,
) -> (String, u128) {
use sha3::{Digest, Sha3_512};
let proof = proof.to_owned();
let mut hasher = Sha3_512::new();
let before = index.to_string().parse::<String>().unwrap()
+ &previus_hash
+ &format!("{:?}", timestamp)
+ &format!("{:?}", data);
hasher.update(before.as_bytes());
let steps: u128 = std::u128::MAX;
let mut i = 0;
for x in 0..steps {
if format!("{:02x}", hasher.clone().finalize())[..proof.len()] == proof {
println!(
"Mined! : {} difficulty: {}",
format!("{:02x}", hasher.clone().finalize()),
x
);
i = x;
break;
} else {
hasher.update(x.to_string().as_bytes());
}
}
(format!("{:02x}", hasher.finalize()), i)
}
fn main() {
let olek = generate_wallet();
let anna = generate_wallet();
let mut blockchin: BlockChain = Blockchain::new();
let s: Transaction = Transaction::new(olek.clone(), anna, 22);
let time = chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string();
let calc = calculate_hash_proof(0, "".to_string(), time.clone(), vec![s.clone()], PROOF);
let start: Block = Block::new(
0,
"".to_string(),
time,
vec![s.clone()],
calc.0,
Some(calc.1),
);
blockchin.add_block_thirst(start);
// end of starrt code
// let mut transactions = vec![];
// for x in 0..=33 {
// let a: Transaction = Transaction::new(x.to_string(), (x + 10).to_string(), x + 100);
// transactions.push(a);
// }
// for x in transactions {
// blockchin.add_transaction(x);
// }
// let mut transaction = vec![];
// for _ in 0..10 {
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let one = generate_wallet();
// // sleep for 1 second
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let two = generate_wallet();
// let s: Transaction = Transaction::new(one, two, 100);
// blockchin.add_transaction(s.clone());
// transaction.push(s);
// }
blockchin.mine(PROOF);
// create_pending(&mut blockchin, PROOF);
let json = serde_json::to_string_pretty(&blockchin).unwrap();
println!("{}", json);
std::fs::write("json.json", json).expect("Unable to write file");
if blockchin.is_good() {
println!("XD")
}
let nic = generate_wallet();
// check user balance
println!(" is {}", blockchin.get_balance(olek));
// blockchin
// let contents =
// std::fs::read_to_string("json.json").expect("Something went wrong reading the file");
// let bc: BlockChain = serde_json::from_str(&contents).unwrap();
// if bc.is_good() {
| Transaction | identifier_name |
main.rs | {
calculate_hash_proof(
self.index.clone(),
self.previus_hash.clone(),
self.timestamp.clone(),
self.data.clone(),
PROOF.clone(),
)
.0
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Transaction {
sender: String,
reciver: String,
amount: u64,
hash: Option<String>,
}
impl Transaction {
fn new(sender: Wallet, reciver: Wallet, amount: u64) -> Transaction {
let sender = sender.pub_key.clone();
let reciver = reciver.pub_key.clone();
let x = Transaction {
sender,
reciver,
amount,
hash: None,
};
calculate_hash_transaction(x)
}
}
fn calculate_hash_transaction(transaction: Transaction) -> Transaction {
use sha3::{Digest, Sha3_512};
let mut hasher = Sha3_512::new();
hasher.update(transaction.sender.clone());
hasher.update(transaction.reciver.clone());
hasher.update(transaction.amount.to_string().as_bytes());
let hash = format!("{:20x}", hasher.finalize());
let x = Transaction {
sender: transaction.sender,
reciver: transaction.reciver,
amount: transaction.amount,
hash: Some(hash),
};
x
}
impl Display for Transaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}-{:?}-{}", self.sender, self.reciver, self.amount)
}
}
trait Createblock {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Self;
}
impl Createblock for Block {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Block {
Block {
index,
previus_hash,
timestamp,
data,
hash,
proof: Some(proof),
}
}
}
trait Blockchain {
fn new() -> Self;
}
impl Blockchain for BlockChain {
fn new() -> BlockChain {
BlockChain {
blocks: Vec::new(),
pending_transactions: Vec::new(),
}
}
}
impl BlockChain {
fn add_block_thirst(&mut self, block: Block) {
self.blocks.push(block);
}
fn add_transaction(&mut self, transaction: Transaction) {
self.pending_transactions.push(transaction);
}
fn add_block(&mut self, data: Vec<Transaction>, proof: &str) {
let (calculate_hash, proof) = calculate_hash_proof(
self.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
self.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string(),
data.clone(),
proof,
);
self.add_block_thirst(Block {
index: self
.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
previus_hash: self
.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
timestamp: chrono::offset::Utc::now().timestamp_millis().to_string(),
data,
hash: calculate_hash,
proof: Some(proof),
})
}
fn get_pendding_transactions(&self) -> Vec<Transaction> {
self.pending_transactions.clone()
}
fn clear_pendding_transactions(&mut self) {
self.pending_transactions.clear();
}
fn is_good(&self) -> bool {
let blocksss = self.blocks.clone();
for x in 1..self.blocks.len() {
// println!("{} || {}", self.blocks[x].test_block(),self.blocks[x + 1].previus_hash);
let test = blocksss[x].test_block();
let prev = blocksss[x - 1].previus_hash.clone();
if test != prev {
// println!("||||||||||||||| {:?} |||||||||||||||||||| {:?} ||||||||||||||||||", self.blocks[x].test_block(), self.blocks[x + 1].previus_hash);
return false;
}
}
return true;
}
// funcion for mining pending transactions
fn mine(&mut self, proof: &str) {
let mut data = self.get_pendding_transactions();
if data.len() > 0 {
self.add_block(data, proof);
self.clear_pendding_transactions();
}
}
// how much money the user has using wallet
fn get_balance(&self, wallet: Wallet) -> u128 {
let mut balance: u128 = 0;
for x in self.blocks.clone() {
for y in x.data.clone() {
if y.sender == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
if y.reciver == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
}
}
println!("{}", balance);
balance
}
}
impl Display for Block { | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {:?} {:?} {:?} {:?}",
self.index, self.previus_hash, self.timestamp, self.data, self.hash
)
}
}
fn calculate_hash_proof(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
proof: &str,
) -> (String, u128) {
use sha3::{Digest, Sha3_512};
let proof = proof.to_owned();
let mut hasher = Sha3_512::new();
let before = index.to_string().parse::<String>().unwrap()
+ &previus_hash
+ &format!("{:?}", timestamp)
+ &format!("{:?}", data);
hasher.update(before.as_bytes());
let steps: u128 = std::u128::MAX;
let mut i = 0;
for x in 0..steps {
if format!("{:02x}", hasher.clone().finalize())[..proof.len()] == proof {
println!(
"Mined! : {} difficulty: {}",
format!("{:02x}", hasher.clone().finalize()),
x
);
i = x;
break;
} else {
hasher.update(x.to_string().as_bytes());
}
}
(format!("{:02x}", hasher.finalize()), i)
}
fn main() {
let olek = generate_wallet();
let anna = generate_wallet();
let mut blockchin: BlockChain = Blockchain::new();
let s: Transaction = Transaction::new(olek.clone(), anna, 22);
let time = chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string();
let calc = calculate_hash_proof(0, "".to_string(), time.clone(), vec![s.clone()], PROOF);
let start: Block = Block::new(
0,
"".to_string(),
time,
vec![s.clone()],
calc.0,
Some(calc.1),
);
blockchin.add_block_thirst(start);
// end of starrt code
// let mut transactions = vec![];
// for x in 0..=33 {
// let a: Transaction = Transaction::new(x.to_string(), (x + 10).to_string(), x + 100);
// transactions.push(a);
// }
// for x in transactions {
// blockchin.add_transaction(x);
// }
// let mut transaction = vec![];
// for _ in 0..10 {
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let one = generate_wallet();
// // sleep for 1 second
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let two = generate_wallet();
// let s: Transaction = Transaction::new(one, two, 100);
// blockchin.add_transaction(s.clone());
// transaction.push(s);
// }
blockchin.mine(PROOF);
// create_pending(&mut blockchin, PROOF);
let json = serde_json::to_string_pretty(&blockchin).unwrap();
println!("{}", json);
std::fs::write("json.json", json).expect("Unable to write file");
if blockchin.is_good() {
println!("XD")
}
let nic = generate_wallet();
// check user balance
println!(" is {}", blockchin.get_balance(olek));
// blockchin
// let contents =
// std::fs::read_to_string("json.json").expect("Something went wrong reading the file");
// let bc: BlockChain = serde_json::from_str(&contents).unwrap();
// if bc.is_good() {
| random_line_split | |
main.rs | calculate_hash_proof(
self.index.clone(),
self.previus_hash.clone(),
self.timestamp.clone(),
self.data.clone(),
PROOF.clone(),
)
.0
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
struct Transaction {
sender: String,
reciver: String,
amount: u64,
hash: Option<String>,
}
impl Transaction {
fn new(sender: Wallet, reciver: Wallet, amount: u64) -> Transaction {
let sender = sender.pub_key.clone();
let reciver = reciver.pub_key.clone();
let x = Transaction {
sender,
reciver,
amount,
hash: None,
};
calculate_hash_transaction(x)
}
}
fn calculate_hash_transaction(transaction: Transaction) -> Transaction {
use sha3::{Digest, Sha3_512};
let mut hasher = Sha3_512::new();
hasher.update(transaction.sender.clone());
hasher.update(transaction.reciver.clone());
hasher.update(transaction.amount.to_string().as_bytes());
let hash = format!("{:20x}", hasher.finalize());
let x = Transaction {
sender: transaction.sender,
reciver: transaction.reciver,
amount: transaction.amount,
hash: Some(hash),
};
x
}
impl Display for Transaction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
trait Createblock {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Self;
}
impl Createblock for Block {
fn new(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
hash: String,
proof: u128,
) -> Block {
Block {
index,
previus_hash,
timestamp,
data,
hash,
proof: Some(proof),
}
}
}
trait Blockchain {
fn new() -> Self;
}
impl Blockchain for BlockChain {
fn new() -> BlockChain {
BlockChain {
blocks: Vec::new(),
pending_transactions: Vec::new(),
}
}
}
impl BlockChain {
fn add_block_thirst(&mut self, block: Block) {
self.blocks.push(block);
}
fn add_transaction(&mut self, transaction: Transaction) {
self.pending_transactions.push(transaction);
}
fn add_block(&mut self, data: Vec<Transaction>, proof: &str) {
let (calculate_hash, proof) = calculate_hash_proof(
self.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
self.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string(),
data.clone(),
proof,
);
self.add_block_thirst(Block {
index: self
.blocks
.last()
.expect("Can't get previous block index")
.index
+ 1,
previus_hash: self
.blocks
.last()
.expect("Can't get previous block hash")
.hash
.clone(),
timestamp: chrono::offset::Utc::now().timestamp_millis().to_string(),
data,
hash: calculate_hash,
proof: Some(proof),
})
}
fn get_pendding_transactions(&self) -> Vec<Transaction> {
self.pending_transactions.clone()
}
fn clear_pendding_transactions(&mut self) {
self.pending_transactions.clear();
}
fn is_good(&self) -> bool {
let blocksss = self.blocks.clone();
for x in 1..self.blocks.len() {
// println!("{} || {}", self.blocks[x].test_block(),self.blocks[x + 1].previus_hash);
let test = blocksss[x].test_block();
let prev = blocksss[x - 1].previus_hash.clone();
if test != prev {
// println!("||||||||||||||| {:?} |||||||||||||||||||| {:?} ||||||||||||||||||", self.blocks[x].test_block(), self.blocks[x + 1].previus_hash);
return false;
}
}
return true;
}
// funcion for mining pending transactions
fn mine(&mut self, proof: &str) {
let mut data = self.get_pendding_transactions();
if data.len() > 0 {
self.add_block(data, proof);
self.clear_pendding_transactions();
}
}
// how much money the user has using wallet
fn get_balance(&self, wallet: Wallet) -> u128 {
let mut balance: u128 = 0;
for x in self.blocks.clone() {
for y in x.data.clone() {
if y.sender == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
if y.reciver == wallet.pub_key {
let amount = y.amount as u128;
balance += amount;
}
}
}
println!("{}", balance);
balance
}
}
impl Display for Block {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{} {:?} {:?} {:?} {:?}",
self.index, self.previus_hash, self.timestamp, self.data, self.hash
)
}
}
fn calculate_hash_proof(
index: u64,
previus_hash: String,
timestamp: String,
data: Vec<Transaction>,
proof: &str,
) -> (String, u128) {
use sha3::{Digest, Sha3_512};
let proof = proof.to_owned();
let mut hasher = Sha3_512::new();
let before = index.to_string().parse::<String>().unwrap()
+ &previus_hash
+ &format!("{:?}", timestamp)
+ &format!("{:?}", data);
hasher.update(before.as_bytes());
let steps: u128 = std::u128::MAX;
let mut i = 0;
for x in 0..steps {
if format!("{:02x}", hasher.clone().finalize())[..proof.len()] == proof {
println!(
"Mined! : {} difficulty: {}",
format!("{:02x}", hasher.clone().finalize()),
x
);
i = x;
break;
} else {
hasher.update(x.to_string().as_bytes());
}
}
(format!("{:02x}", hasher.finalize()), i)
}
fn main() {
let olek = generate_wallet();
let anna = generate_wallet();
let mut blockchin: BlockChain = Blockchain::new();
let s: Transaction = Transaction::new(olek.clone(), anna, 22);
let time = chrono::offset::Utc::now()
.timestamp_millis()
.to_string()
.to_string();
let calc = calculate_hash_proof(0, "".to_string(), time.clone(), vec![s.clone()], PROOF);
let start: Block = Block::new(
0,
"".to_string(),
time,
vec![s.clone()],
calc.0,
Some(calc.1),
);
blockchin.add_block_thirst(start);
// end of starrt code
// let mut transactions = vec![];
// for x in 0..=33 {
// let a: Transaction = Transaction::new(x.to_string(), (x + 10).to_string(), x + 100);
// transactions.push(a);
// }
// for x in transactions {
// blockchin.add_transaction(x);
// }
// let mut transaction = vec![];
// for _ in 0..10 {
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let one = generate_wallet();
// // sleep for 1 second
// std::thread::sleep(std::time::Duration::from_millis(1000));
// let two = generate_wallet();
// let s: Transaction = Transaction::new(one, two, 100);
// blockchin.add_transaction(s.clone());
// transaction.push(s);
// }
blockchin.mine(PROOF);
// create_pending(&mut blockchin, PROOF);
let json = serde_json::to_string_pretty(&blockchin).unwrap();
println!("{}", json);
std::fs::write("json.json", json).expect("Unable to write file");
if blockchin.is_good() {
println!("XD")
}
let nic = generate_wallet();
// check user balance
println!(" is {}", blockchin.get_balance(olek));
// blockchin
// let contents =
// std::fs::read_to_string("json.json").expect("Something went wrong reading the file");
// let bc: BlockChain = serde_json::from_str(&contents).unwrap();
// if bc.is_good() | {
write!(f, "{:?}-{:?}-{}", self.sender, self.reciver, self.amount)
} | identifier_body |
workloads.go | Ports represents open ports for a service running on a workload
type OpenServicePorts struct {
Address string `json:"address,omitempty"`
Package string `json:"package,omitempty"`
Port int `json:"port,omitempty"`
ProcessName string `json:"process_name,omitempty"`
Protocol int `json:"protocol,omitempty"`
User string `json:"user,omitempty"`
WinServiceName string `json:"win_service_name,omitempty"`
}
// A Workload represents a workload in the PCE
type Workload struct {
Agent *Agent `json:"agent,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
CreatedBy *CreatedBy `json:"created_by,omitempty"`
DataCenter string `json:"data_center,omitempty"`
DataCenterZone string `json:"data_center_zone,omitempty"`
DeleteType string `json:"delete_type,omitempty"`
Deleted *bool `json:"deleted,omitempty"`
DeletedAt string `json:"deleted_at,omitempty"`
DeletedBy *DeletedBy `json:"deleted_by,omitempty"`
Description string `json:"description,omitempty"`
ExternalDataReference string `json:"external_data_reference,omitempty"`
ExternalDataSet string `json:"external_data_set,omitempty"`
Hostname string `json:"hostname,omitempty"`
Href string `json:"href,omitempty"`
Interfaces []*Interface `json:"interfaces,omitempty"`
Labels []*Label `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Online bool `json:"online,omitempty"`
OsDetail string `json:"os_detail,omitempty"`
OsID string `json:"os_id,omitempty"`
PublicIP string `json:"public_ip,omitempty"`
ServicePrincipalName string `json:"service_principal_name,omitempty"`
ServiceProvider string `json:"service_provider,omitempty"`
Services []*Services `json:"services,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
UpdatedBy *UpdatedBy `json:"updated_by,omitempty"`
}
// SecureConnect represents SecureConnect for an Agent on a Workload
type SecureConnect struct {
MatchingIssuerName string `json:"matching_issuer_name,omitempty"`
}
// Services represent the Services running on a Workload
type Services struct {
CreatedAt string `json:"created_at,omitempty"`
OpenServicePorts []*OpenServicePorts `json:"open_service_ports,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// Status represents the Status of an Agent on a Workload
type Status struct {
AgentHealth []*AgentHealth `json:"agent_health,omitempty"`
AgentHealthErrors *AgentHealthErrors `json:"agent_health_errors,omitempty"`
AgentVersion string `json:"agent_version,omitempty"`
FirewallRuleCount int `json:"firewall_rule_count,omitempty"`
FwConfigCurrent bool `json:"fw_config_current,omitempty"`
LastHeartbeatOn string `json:"last_heartbeat_on,omitempty"`
ManagedSince string `json:"managed_since,omitempty"`
SecurityPolicyAppliedAt string `json:"security_policy_applied_at,omitempty"`
SecurityPolicyReceivedAt string `json:"security_policy_received_at,omitempty"`
SecurityPolicyRefreshAt string `json:"security_policy_refresh_at,omitempty"`
SecurityPolicySyncState string `json:"security_policy_sync_state,omitempty"`
UID string `json:"uid,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// GetAllWorkloads returns an slice of workloads in the Illumio PCE.
// The first API call to the PCE does not use the async option.
// If the array length is >=500, it re-runs with async.
func GetAllWorkloads(pce PCE) ([]Workload, APIResponse, error) {
var workloads []Workload
var api APIResponse
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Call the API
api, err = apicall("GET", apiURL.String(), pce, nil, false)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
json.Unmarshal([]byte(api.RespBody), &workloads)
// If length is 500, re-run with async
if len(workloads) >= 500 {
api, err = apicall("GET", apiURL.String(), pce, nil, true)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Unmarshal response to struct
json.Unmarshal([]byte(api.RespBody), &workloads)
}
// Get all labels and create a map
labels, _, err := GetAllLabels(pce)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - get all labels - %s", err)
}
labelMap := make(map[string]Label)
for _, l := range labels {
labelMap[l.Href] = l
}
// Update the workloads array to label values and keys (not just HREFs)
for _, w := range workloads {
for _, l := range w.Labels {
*l = labelMap[l.Href]
}
}
return workloads, api, nil
}
// CreateWorkload creates a new unmanaged workload in the Illumio PCE
func CreateWorkload(pce PCE, workload Workload) (Workload, APIResponse, error) {
var newWL Workload
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err) |
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
api, err = apicall("POST", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Marshal JSON
json.Unmarshal([]byte(api.RespBody), &newWL)
return newWL, api, nil
}
// UpdateWorkload updates an existing workload in the Illumio PCE
// The provided workload struct must include an Href.
// Properties that cannot be included in the PUT method will be ignored.
func UpdateWorkload(pce PCE, workload Workload) (APIResponse, error) {
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2" + workload.Href)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
workload.SanitizePut()
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
api, err = apicall("PUT", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
return api, nil
}
// UpdateLabel updates a workload struct with new label href.
// It does not call the Illumio API. To reflect the change in your PCE,
// you'd use UpdateLabel method on the workload struct and then use the UpdateWorkload function
func (w *Workload) UpdateLabel(pce PCE, key, value string) error {
var updatedLabels []*Label
for _, l := range w.Labels {
x, _, err := GetLabelbyHref(pce, l.Href)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
if x.Key == key {
// Get our new label's href
newLabel, _, err := GetLabelbyKeyValue(pce, key, value)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
// Create the label if it doesn't exist
if newLabel.Href == "" {
createdLabel, _, err := CreateLabel(pce, Label{Key: key, Value: value})
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
updatedLabels = append(updatedLabels, &Label{Href: createdLabel.Href})
// If the new label does exist, add it to the slice
} else {
updatedLabels = append(updatedLabels, &Label{Href: newLabel.Href})
}
} else {
updatedLabels = append(updatedLabels, &Label{Href: l.Href})
}
w.Label | } | random_line_split |
workloads.go | represents open ports for a service running on a workload
type OpenServicePorts struct {
Address string `json:"address,omitempty"`
Package string `json:"package,omitempty"`
Port int `json:"port,omitempty"`
ProcessName string `json:"process_name,omitempty"`
Protocol int `json:"protocol,omitempty"`
User string `json:"user,omitempty"`
WinServiceName string `json:"win_service_name,omitempty"`
}
// A Workload represents a workload in the PCE
type Workload struct {
Agent *Agent `json:"agent,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
CreatedBy *CreatedBy `json:"created_by,omitempty"`
DataCenter string `json:"data_center,omitempty"`
DataCenterZone string `json:"data_center_zone,omitempty"`
DeleteType string `json:"delete_type,omitempty"`
Deleted *bool `json:"deleted,omitempty"`
DeletedAt string `json:"deleted_at,omitempty"`
DeletedBy *DeletedBy `json:"deleted_by,omitempty"`
Description string `json:"description,omitempty"`
ExternalDataReference string `json:"external_data_reference,omitempty"`
ExternalDataSet string `json:"external_data_set,omitempty"`
Hostname string `json:"hostname,omitempty"`
Href string `json:"href,omitempty"`
Interfaces []*Interface `json:"interfaces,omitempty"`
Labels []*Label `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Online bool `json:"online,omitempty"`
OsDetail string `json:"os_detail,omitempty"`
OsID string `json:"os_id,omitempty"`
PublicIP string `json:"public_ip,omitempty"`
ServicePrincipalName string `json:"service_principal_name,omitempty"`
ServiceProvider string `json:"service_provider,omitempty"`
Services []*Services `json:"services,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
UpdatedBy *UpdatedBy `json:"updated_by,omitempty"`
}
// SecureConnect represents SecureConnect for an Agent on a Workload
type SecureConnect struct {
MatchingIssuerName string `json:"matching_issuer_name,omitempty"`
}
// Services represent the Services running on a Workload
type Services struct {
CreatedAt string `json:"created_at,omitempty"`
OpenServicePorts []*OpenServicePorts `json:"open_service_ports,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// Status represents the Status of an Agent on a Workload
type Status struct {
AgentHealth []*AgentHealth `json:"agent_health,omitempty"`
AgentHealthErrors *AgentHealthErrors `json:"agent_health_errors,omitempty"`
AgentVersion string `json:"agent_version,omitempty"`
FirewallRuleCount int `json:"firewall_rule_count,omitempty"`
FwConfigCurrent bool `json:"fw_config_current,omitempty"`
LastHeartbeatOn string `json:"last_heartbeat_on,omitempty"`
ManagedSince string `json:"managed_since,omitempty"`
SecurityPolicyAppliedAt string `json:"security_policy_applied_at,omitempty"`
SecurityPolicyReceivedAt string `json:"security_policy_received_at,omitempty"`
SecurityPolicyRefreshAt string `json:"security_policy_refresh_at,omitempty"`
SecurityPolicySyncState string `json:"security_policy_sync_state,omitempty"`
UID string `json:"uid,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// GetAllWorkloads returns an slice of workloads in the Illumio PCE.
// The first API call to the PCE does not use the async option.
// If the array length is >=500, it re-runs with async.
func GetAllWorkloads(pce PCE) ([]Workload, APIResponse, error) {
var workloads []Workload
var api APIResponse
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Call the API
api, err = apicall("GET", apiURL.String(), pce, nil, false)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
json.Unmarshal([]byte(api.RespBody), &workloads)
// If length is 500, re-run with async
if len(workloads) >= 500 {
api, err = apicall("GET", apiURL.String(), pce, nil, true)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Unmarshal response to struct
json.Unmarshal([]byte(api.RespBody), &workloads)
}
// Get all labels and create a map
labels, _, err := GetAllLabels(pce)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - get all labels - %s", err)
}
labelMap := make(map[string]Label)
for _, l := range labels {
labelMap[l.Href] = l
}
// Update the workloads array to label values and keys (not just HREFs)
for _, w := range workloads {
for _, l := range w.Labels {
*l = labelMap[l.Href]
}
}
return workloads, api, nil
}
// CreateWorkload creates a new unmanaged workload in the Illumio PCE
func CreateWorkload(pce PCE, workload Workload) (Workload, APIResponse, error) {
var newWL Workload
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
api, err = apicall("POST", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Marshal JSON
json.Unmarshal([]byte(api.RespBody), &newWL)
return newWL, api, nil
}
// UpdateWorkload updates an existing workload in the Illumio PCE
// The provided workload struct must include an Href.
// Properties that cannot be included in the PUT method will be ignored.
func UpdateWorkload(pce PCE, workload Workload) (APIResponse, error) {
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2" + workload.Href)
if err != nil |
workload.SanitizePut()
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
api, err = apicall("PUT", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
return api, nil
}
// UpdateLabel updates a workload struct with new label href.
// It does not call the Illumio API. To reflect the change in your PCE,
// you'd use UpdateLabel method on the workload struct and then use the UpdateWorkload function
func (w *Workload) UpdateLabel(pce PCE, key, value string) error {
var updatedLabels []*Label
for _, l := range w.Labels {
x, _, err := GetLabelbyHref(pce, l.Href)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
if x.Key == key {
// Get our new label's href
newLabel, _, err := GetLabelbyKeyValue(pce, key, value)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
// Create the label if it doesn't exist
if newLabel.Href == "" {
createdLabel, _, err := CreateLabel(pce, Label{Key: key, Value: value})
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
updatedLabels = append(updatedLabels, &Label{Href: createdLabel.Href})
// If the new label does exist, add it to the slice
} else {
updatedLabels = append(updatedLabels, &Label{Href: newLabel.Href})
}
} else {
updatedLabels = append(updatedLabels, &Label{Href: l.Href})
}
w.Label | {
return api, fmt.Errorf("update workload - %s", err)
} | conditional_block |
workloads.go | err)
}
json.Unmarshal([]byte(api.RespBody), &workloads)
// If length is 500, re-run with async
if len(workloads) >= 500 {
api, err = apicall("GET", apiURL.String(), pce, nil, true)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Unmarshal response to struct
json.Unmarshal([]byte(api.RespBody), &workloads)
}
// Get all labels and create a map
labels, _, err := GetAllLabels(pce)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - get all labels - %s", err)
}
labelMap := make(map[string]Label)
for _, l := range labels {
labelMap[l.Href] = l
}
// Update the workloads array to label values and keys (not just HREFs)
for _, w := range workloads {
for _, l := range w.Labels {
*l = labelMap[l.Href]
}
}
return workloads, api, nil
}
// CreateWorkload creates a new unmanaged workload in the Illumio PCE
func CreateWorkload(pce PCE, workload Workload) (Workload, APIResponse, error) {
var newWL Workload
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
api, err = apicall("POST", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Marshal JSON
json.Unmarshal([]byte(api.RespBody), &newWL)
return newWL, api, nil
}
// UpdateWorkload updates an existing workload in the Illumio PCE
// The provided workload struct must include an Href.
// Properties that cannot be included in the PUT method will be ignored.
func UpdateWorkload(pce PCE, workload Workload) (APIResponse, error) {
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2" + workload.Href)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
workload.SanitizePut()
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
api, err = apicall("PUT", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
return api, nil
}
// UpdateLabel updates a workload struct with new label href.
// It does not call the Illumio API. To reflect the change in your PCE,
// you'd use UpdateLabel method on the workload struct and then use the UpdateWorkload function
func (w *Workload) UpdateLabel(pce PCE, key, value string) error {
var updatedLabels []*Label
for _, l := range w.Labels {
x, _, err := GetLabelbyHref(pce, l.Href)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
if x.Key == key {
// Get our new label's href
newLabel, _, err := GetLabelbyKeyValue(pce, key, value)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
// Create the label if it doesn't exist
if newLabel.Href == "" {
createdLabel, _, err := CreateLabel(pce, Label{Key: key, Value: value})
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
updatedLabels = append(updatedLabels, &Label{Href: createdLabel.Href})
// If the new label does exist, add it to the slice
} else {
updatedLabels = append(updatedLabels, &Label{Href: newLabel.Href})
}
} else {
updatedLabels = append(updatedLabels, &Label{Href: l.Href})
}
w.Labels = updatedLabels
}
return nil
}
// BulkWorkload takes a bulk action on an array of workloads.
// Method must be create, update, or delete
func BulkWorkload(pce PCE, workloads []Workload, method string) ([]APIResponse, error) {
var apiResps []APIResponse
var err error
// Check on method
method = strings.ToLower(method)
if method != "create" && method != "update" && method != "delete" {
return apiResps, errors.New("bulk workload error - method must be create, update, or delete")
}
// Sanitize update
if method == "update" {
sanitizedWLs := []Workload{}
for _, workload := range workloads {
workload.SanitizeBulkUpdate()
sanitizedWLs = append(sanitizedWLs, workload)
}
workloads = sanitizedWLs
}
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads/bulk_" + method)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
// If the method is delete, we can only send Hrefs
if method == "delete" {
hrefWorkloads := []Workload{}
for _, workload := range workloads {
hrefWorkloads = append(hrefWorkloads, Workload{Href: workload.Href})
}
// Re-assign workloads to just the HREF
workloads = hrefWorkloads
}
// Figure out how many API calls we need to make
numAPICalls := int(math.Ceil(float64(len(workloads)) / 1000))
// Build the array to be passed to the API
apiArrays := [][]Workload{}
for i := 0; i < numAPICalls; i++ {
// Get 1,000 elements if this is not the last array
if (i + 1) != numAPICalls {
apiArrays = append(apiArrays, workloads[i*1000:(1+i)*1000])
// Get the rest on the last array
} else {
apiArrays = append(apiArrays, workloads[i*1000:])
}
}
// Call the API for each array
for _, apiArray := range apiArrays {
workloadsJSON, err := json.Marshal(apiArray)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
// Uncomment this line if you want to print the JSON object
// fmt.Println(string(workloadsJSON))
api, err := apicall("PUT", apiURL.String(), pce, workloadsJSON, false)
if err != nil {
return apiResps, fmt.Errorf("bulk workload error - %s", err)
}
apiResps = append(apiResps, api)
}
return apiResps, nil
}
// SanitizeBulkUpdate removes the properites necessary for a bulk update
func (w *Workload) SanitizeBulkUpdate() {
// All Workloads
w.CreatedAt = ""
w.CreatedBy = nil
w.DeleteType = ""
w.Deleted = nil
w.DeletedAt = ""
w.DeletedBy = nil
w.UpdatedAt = ""
w.UpdatedBy = nil
// Managed workloads
if w.Agent != nil && w.Agent.Status != nil {
w.Hostname = ""
w.Interfaces = nil
w.Online = false
w.OsDetail = ""
w.OsID = ""
w.PublicIP = ""
w.Agent.Status = nil
w.Services = nil
w.Online = false
}
// Replace Labels with Hrefs
newLabels := []*Label{}
for _, l := range w.Labels {
newLabel := Label{Href: l.Href}
newLabels = append(newLabels, &newLabel)
}
w.Labels = newLabels
}
// SanitizePut removes the necessary properties to update an unmanaged and managed workload
func (w *Workload) SanitizePut() | {
w.SanitizeBulkUpdate()
w.Href = ""
} | identifier_body | |
workloads.go | Ports represents open ports for a service running on a workload
type OpenServicePorts struct {
Address string `json:"address,omitempty"`
Package string `json:"package,omitempty"`
Port int `json:"port,omitempty"`
ProcessName string `json:"process_name,omitempty"`
Protocol int `json:"protocol,omitempty"`
User string `json:"user,omitempty"`
WinServiceName string `json:"win_service_name,omitempty"`
}
// A Workload represents a workload in the PCE
type Workload struct {
Agent *Agent `json:"agent,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
CreatedBy *CreatedBy `json:"created_by,omitempty"`
DataCenter string `json:"data_center,omitempty"`
DataCenterZone string `json:"data_center_zone,omitempty"`
DeleteType string `json:"delete_type,omitempty"`
Deleted *bool `json:"deleted,omitempty"`
DeletedAt string `json:"deleted_at,omitempty"`
DeletedBy *DeletedBy `json:"deleted_by,omitempty"`
Description string `json:"description,omitempty"`
ExternalDataReference string `json:"external_data_reference,omitempty"`
ExternalDataSet string `json:"external_data_set,omitempty"`
Hostname string `json:"hostname,omitempty"`
Href string `json:"href,omitempty"`
Interfaces []*Interface `json:"interfaces,omitempty"`
Labels []*Label `json:"labels,omitempty"`
Name string `json:"name,omitempty"`
Online bool `json:"online,omitempty"`
OsDetail string `json:"os_detail,omitempty"`
OsID string `json:"os_id,omitempty"`
PublicIP string `json:"public_ip,omitempty"`
ServicePrincipalName string `json:"service_principal_name,omitempty"`
ServiceProvider string `json:"service_provider,omitempty"`
Services []*Services `json:"services,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
UpdatedBy *UpdatedBy `json:"updated_by,omitempty"`
}
// SecureConnect represents SecureConnect for an Agent on a Workload
type SecureConnect struct {
MatchingIssuerName string `json:"matching_issuer_name,omitempty"`
}
// Services represent the Services running on a Workload
type Services struct {
CreatedAt string `json:"created_at,omitempty"`
OpenServicePorts []*OpenServicePorts `json:"open_service_ports,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// Status represents the Status of an Agent on a Workload
type Status struct {
AgentHealth []*AgentHealth `json:"agent_health,omitempty"`
AgentHealthErrors *AgentHealthErrors `json:"agent_health_errors,omitempty"`
AgentVersion string `json:"agent_version,omitempty"`
FirewallRuleCount int `json:"firewall_rule_count,omitempty"`
FwConfigCurrent bool `json:"fw_config_current,omitempty"`
LastHeartbeatOn string `json:"last_heartbeat_on,omitempty"`
ManagedSince string `json:"managed_since,omitempty"`
SecurityPolicyAppliedAt string `json:"security_policy_applied_at,omitempty"`
SecurityPolicyReceivedAt string `json:"security_policy_received_at,omitempty"`
SecurityPolicyRefreshAt string `json:"security_policy_refresh_at,omitempty"`
SecurityPolicySyncState string `json:"security_policy_sync_state,omitempty"`
UID string `json:"uid,omitempty"`
UptimeSeconds int `json:"uptime_seconds,omitempty"`
}
// GetAllWorkloads returns an slice of workloads in the Illumio PCE.
// The first API call to the PCE does not use the async option.
// If the array length is >=500, it re-runs with async.
func GetAllWorkloads(pce PCE) ([]Workload, APIResponse, error) {
var workloads []Workload
var api APIResponse
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Call the API
api, err = apicall("GET", apiURL.String(), pce, nil, false)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
json.Unmarshal([]byte(api.RespBody), &workloads)
// If length is 500, re-run with async
if len(workloads) >= 500 {
api, err = apicall("GET", apiURL.String(), pce, nil, true)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - %s", err)
}
// Unmarshal response to struct
json.Unmarshal([]byte(api.RespBody), &workloads)
}
// Get all labels and create a map
labels, _, err := GetAllLabels(pce)
if err != nil {
return nil, api, fmt.Errorf("get all workloads - get all labels - %s", err)
}
labelMap := make(map[string]Label)
for _, l := range labels {
labelMap[l.Href] = l
}
// Update the workloads array to label values and keys (not just HREFs)
for _, w := range workloads {
for _, l := range w.Labels {
*l = labelMap[l.Href]
}
}
return workloads, api, nil
}
// CreateWorkload creates a new unmanaged workload in the Illumio PCE
func | (pce PCE, workload Workload) (Workload, APIResponse, error) {
var newWL Workload
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2/orgs/" + strconv.Itoa(pce.Org) + "/workloads")
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
api, err = apicall("POST", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return newWL, api, fmt.Errorf("create workload - %s", err)
}
// Marshal JSON
json.Unmarshal([]byte(api.RespBody), &newWL)
return newWL, api, nil
}
// UpdateWorkload updates an existing workload in the Illumio PCE
// The provided workload struct must include an Href.
// Properties that cannot be included in the PUT method will be ignored.
func UpdateWorkload(pce PCE, workload Workload) (APIResponse, error) {
var api APIResponse
var err error
// Build the API URL
apiURL, err := url.Parse("https://" + pceSanitization(pce.FQDN) + ":" + strconv.Itoa(pce.Port) + "/api/v2" + workload.Href)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
workload.SanitizePut()
// Call the API
workloadJSON, err := json.Marshal(workload)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
api, err = apicall("PUT", apiURL.String(), pce, workloadJSON, false)
if err != nil {
return api, fmt.Errorf("update workload - %s", err)
}
return api, nil
}
// UpdateLabel updates a workload struct with new label href.
// It does not call the Illumio API. To reflect the change in your PCE,
// you'd use UpdateLabel method on the workload struct and then use the UpdateWorkload function
func (w *Workload) UpdateLabel(pce PCE, key, value string) error {
var updatedLabels []*Label
for _, l := range w.Labels {
x, _, err := GetLabelbyHref(pce, l.Href)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
if x.Key == key {
// Get our new label's href
newLabel, _, err := GetLabelbyKeyValue(pce, key, value)
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
// Create the label if it doesn't exist
if newLabel.Href == "" {
createdLabel, _, err := CreateLabel(pce, Label{Key: key, Value: value})
if err != nil {
return fmt.Errorf("error updating workload - %s", err)
}
updatedLabels = append(updatedLabels, &Label{Href: createdLabel.Href})
// If the new label does exist, add it to the slice
} else {
updatedLabels = append(updatedLabels, &Label{Href: newLabel.Href})
}
} else {
updatedLabels = append(updatedLabels, &Label{Href: l.Href})
}
w.Label | CreateWorkload | identifier_name |
Context.js | extends Component {
state = {
dateToday: getDateToday(),
siteData: SiteData,
catData: CatData,
items: [],
filterIsActive: false,
categoryName: null,
categoryNameDefault: 'Live',
categoryArr: {},
brandArr: [],
sortedItems: [],
featuredItems: [],
featuredItemsArchive: [],
loading: true,
brand: 'all',
price: 0,
minPrice: 0,
maxPrice: 0,
maxPriceInit: 0,
minYear: 0,
minYearInit: 0,
maxYear: 0,
maxYearInit: 0,
priceRangeArr: [],
sortBy: 'DateDesc',
sortByArr: {},
sortRangeArr: []
};
/////////////////////////////////////////////////////////////////////////// GET data (homepage)
// Get data from API to show on the homepage
getData = async (getPageName) => {
if (!getPageName) return;
try {
this.setState({ loading: true });
const data = await fetch(CatData[this.state.categoryNameDefault].apiFeatured).then((data) => data.json());
const dataArchive = await fetch(CatData.Archive.apiFeatured).then((dataArchive) => dataArchive.json());
const dataOther = await fetch(CatData.General.apiFeatured).then((dataOther) => dataOther.json());
let items = this.formatData(data);
let itemsArchive = this.formatData(dataArchive);
let itemsOther = this.formatData(dataOther);
// ConsoleLog("[Context] getData > items..." + items);
//////////////
// FEATURED //
//////////////
// Featured items [Live]
let featuredItems = items.slice(0, SiteData.featuredItems.itemCount); // get first # items from main array
// Featured items [Archive]
let featuredItemsArchive = itemsArchive.slice(0, SiteData.featuredItems.itemCount);
let featuredItemsVideos = itemsOther.slice(2, 4);
let featuredItemsTestimonials = itemsOther.slice(0, 2);
ConsoleLog('[Context] featuredItemsVideos: ' + featuredItemsVideos);
///////////////
// SET STATE //
///////////////
this.setState({
featuredItems,
featuredItemsArchive,
featuredItemsVideos,
featuredItemsTestimonials,
loading: false
});
} catch (error) {
console.log('[Context] getData > error: ' + error);
}
};
// (END) getData
/////////////////////////////////////////////////////////////////////////// GET data (items)
// Get data from API to show on the items page(s)
// accept category and status parameter to determine api call
getDataItems = async (getCategoryName, getBrandSlug) => {
ConsoleLog('[Context] getDataItems() > getCategoryName: ' + getCategoryName);
ConsoleLog('[Context] getDataItems() > getBrandSlug: ' + getBrandSlug);
try {
this.setState({ loading: true });
let apiArr = {
base: CatData[getCategoryName].api,
brandName: getBrandSlug !== "all" ? getBrandSlug : null
}
const data = await fetch(apiGetItems(apiArr), {
method: 'GET'
}).then((data) => data.json());
const categoryName = getCategoryName; // ? getCategoryName : null;
const statusId = categoryName === 'Archive' ? 2 : 1;
const brandSlug = getBrandSlug ? getBrandSlug : null; // status determines Live or Archive
let allItems = this.formatData(data);
let items = allItems;
let sortedItems = [];
let brand = null;
let subcategoryArr = {};
if (brandSlug) {
subcategoryArr = allItems.find((x) => x.subcategoryArr.slug === brandSlug).subcategoryArr;
brand = subcategoryArr.id;
sortedItems = allItems.filter((item) => item.subcategoryArr.slug === brandSlug);
} else {
sortedItems = allItems;
}
////////////
// FILTER // properties based on items
////////////
// Price (2 dropdowns)
let minPrice = 0; //Math.min(...items.map(item => item.price));
let maxPrice = Math.max(...items.map((item) => item.price));
const maxPriceInit = Math.round((maxPrice / 100000).toFixed() * 100000);
const priceRangeArr = SiteData.priceRangeArr; // [0, 5000, 10000, ...]
// Year (2 numeric inputs)
let minYear = Math.min(...items.map((item) => item.year));
let maxYear = Math.max(...items.map((item) => item.year));
const minYearInit = minYear;
const maxYearInit = maxYear;
// Brand (dropdown)
const brandArr = this.setBrandArr(items);
const categoryArr = this.getCategoryArr(categoryName, statusId);
// setDocumentTitle(categoryArr.title);
//////////
// SORT // options based on items page type
//////////
// Sort (dropdown)
let sortRangeArr = [];
sortRangeArr.push(SortFilterRangeData.DateDesc);
sortRangeArr.push(SortFilterRangeData.DateAsc);
// CONDITION: Only show price option for Live pages
if (categoryName === 'Live') {
sortRangeArr.push(SortFilterRangeData.PriceDesc);
sortRangeArr.push(SortFilterRangeData.PriceAsc);
}
// CONDITION: Only show Years option for Live/Archive pages
sortRangeArr.push(SortFilterRangeData.YearDesc);
sortRangeArr.push(SortFilterRangeData.YearAsc);
// ConsoleLog("[Context] sortRangeArr..." + sortRangeArr);
const sortByArr = sortRangeArr[0];
const sortBy = sortByArr.name;
///////////////
// SET STATE //
///////////////
this.setState({
items,
categoryName,
categoryArr,
subcategoryArr,
brand,
brandArr,
sortedItems,
loading: false,
price: maxPrice,
minPrice,
maxPrice: maxPriceInit,
maxPriceInit,
minYear,
minYearInit,
maxYear,
maxYearInit,
priceRangeArr,
sortBy,
sortByArr,
sortRangeArr
});
} catch (error) {
console.log('[Context] getDataItems() > error...');
console.log(error);
}
};
// (END) getDataItems
componentDidMount() {
// ConsoleLog(
// "[Context] componentDidMount()... category = " +
// this.state.categoryName
// );
}
componentDidUpdate() {
ConsoleLog(
'[Context] componentDidUpdate() > categoryName: '
+ this.state.categoryName
+ ' | brand = ' + this.state.brand
);
}
/////////////////////////////////////////////////////////////////////////// SET brand array (items)
setBrandArr = (myObj) => {
// console.log('>>>', myObj[0].categoryArr);
const categorySlugBase = myObj[0].categoryArr.slug ? StripOpeningSlash(myObj[0].categoryArr.slug) : null;//get the base slug from first (any) item
const myArr = { list: myObj }; //put obj array into list for flatMap
const myUniqueBrandList = myArr.list
.flatMap((obj) => obj.subcategoryArr)
.filter((e, i, a) => a.findIndex(({ id, brand }) => id === e.id && brand === e.brand) === i);
// SORT alphabetically [A-Z]
myUniqueBrandList.sort(function(a, b) {
var nameA = a.brand.toLowerCase(),
nameB = b.brand.toLowerCase();
if (nameA < nameB)
//sort string ascending
return -1;
if (nameA > nameB) return 1;
return 0; //default return value (no sorting)
});
// COUNT items in subcategory
let myUniqueBrandListWithCount = this.countItemsInBrand(myUniqueBrandList, myObj);
myUniqueBrandListWithCount = [
{ id: 'all', brand: 'ALL', slug: categorySlugBase, itemCount: myObj.length },
...myUniqueBrandListWithCount
];
// console.log('[Context] myUniqueBrandList...', myUniqueBrandListWithCount);
return myUniqueBrandListWithCount;
};
/////////////////////////////////////////////////////////////////////////// COUNT items in each brand
countItemsInBrand(getBrandArr, getItemsArr) {
for (let i = 0; i < getBrandArr.length; i++) {
var tmp = getItemsArr.filter((item) => item.subcategoryArr.id === getBrandArr[i].id).length;
getBrandArr[i].itemCount = tmp;
}
return getBrandArr;
}
/////////////////////////////////////////////////////////////////////////// SORT used by filter
// REF: https://stackoverflow.com/questions/6913512/how-to-sort-an-array-of-objects-by-multiple-fields
fieldSorter | ItemProvider | identifier_name | |
Context.js | let itemsArchive = this.formatData(dataArchive);
let itemsOther = this.formatData(dataOther);
// ConsoleLog("[Context] getData > items..." + items);
//////////////
// FEATURED //
//////////////
// Featured items [Live]
let featuredItems = items.slice(0, SiteData.featuredItems.itemCount); // get first # items from main array
// Featured items [Archive]
let featuredItemsArchive = itemsArchive.slice(0, SiteData.featuredItems.itemCount);
let featuredItemsVideos = itemsOther.slice(2, 4);
let featuredItemsTestimonials = itemsOther.slice(0, 2);
ConsoleLog('[Context] featuredItemsVideos: ' + featuredItemsVideos);
///////////////
// SET STATE //
///////////////
this.setState({
featuredItems,
featuredItemsArchive,
featuredItemsVideos,
featuredItemsTestimonials,
loading: false
});
} catch (error) {
console.log('[Context] getData > error: ' + error);
}
};
// (END) getData
/////////////////////////////////////////////////////////////////////////// GET data (items)
// Get data from API to show on the items page(s)
// accept category and status parameter to determine api call
getDataItems = async (getCategoryName, getBrandSlug) => {
ConsoleLog('[Context] getDataItems() > getCategoryName: ' + getCategoryName);
ConsoleLog('[Context] getDataItems() > getBrandSlug: ' + getBrandSlug);
try {
this.setState({ loading: true });
let apiArr = {
base: CatData[getCategoryName].api,
brandName: getBrandSlug !== "all" ? getBrandSlug : null
}
const data = await fetch(apiGetItems(apiArr), {
method: 'GET'
}).then((data) => data.json());
const categoryName = getCategoryName; // ? getCategoryName : null;
const statusId = categoryName === 'Archive' ? 2 : 1;
const brandSlug = getBrandSlug ? getBrandSlug : null; // status determines Live or Archive
let allItems = this.formatData(data);
let items = allItems;
let sortedItems = [];
let brand = null;
let subcategoryArr = {};
if (brandSlug) {
subcategoryArr = allItems.find((x) => x.subcategoryArr.slug === brandSlug).subcategoryArr;
brand = subcategoryArr.id;
sortedItems = allItems.filter((item) => item.subcategoryArr.slug === brandSlug);
} else {
sortedItems = allItems;
}
////////////
// FILTER // properties based on items
////////////
// Price (2 dropdowns)
let minPrice = 0; //Math.min(...items.map(item => item.price));
let maxPrice = Math.max(...items.map((item) => item.price));
const maxPriceInit = Math.round((maxPrice / 100000).toFixed() * 100000);
const priceRangeArr = SiteData.priceRangeArr; // [0, 5000, 10000, ...]
// Year (2 numeric inputs)
let minYear = Math.min(...items.map((item) => item.year));
let maxYear = Math.max(...items.map((item) => item.year));
const minYearInit = minYear;
const maxYearInit = maxYear;
// Brand (dropdown)
const brandArr = this.setBrandArr(items);
const categoryArr = this.getCategoryArr(categoryName, statusId);
// setDocumentTitle(categoryArr.title);
//////////
// SORT // options based on items page type
//////////
// Sort (dropdown)
let sortRangeArr = [];
sortRangeArr.push(SortFilterRangeData.DateDesc);
sortRangeArr.push(SortFilterRangeData.DateAsc);
// CONDITION: Only show price option for Live pages
if (categoryName === 'Live') {
sortRangeArr.push(SortFilterRangeData.PriceDesc);
sortRangeArr.push(SortFilterRangeData.PriceAsc);
}
// CONDITION: Only show Years option for Live/Archive pages
sortRangeArr.push(SortFilterRangeData.YearDesc);
sortRangeArr.push(SortFilterRangeData.YearAsc);
// ConsoleLog("[Context] sortRangeArr..." + sortRangeArr);
const sortByArr = sortRangeArr[0];
const sortBy = sortByArr.name;
///////////////
// SET STATE //
///////////////
this.setState({
items,
categoryName,
categoryArr,
subcategoryArr,
brand,
brandArr,
sortedItems,
loading: false,
price: maxPrice,
minPrice,
maxPrice: maxPriceInit,
maxPriceInit,
minYear,
minYearInit,
maxYear,
maxYearInit,
priceRangeArr,
sortBy,
sortByArr,
sortRangeArr
});
} catch (error) {
console.log('[Context] getDataItems() > error...');
console.log(error);
}
};
// (END) getDataItems
componentDidMount() {
// ConsoleLog(
// "[Context] componentDidMount()... category = " +
// this.state.categoryName
// );
}
componentDidUpdate() {
ConsoleLog(
'[Context] componentDidUpdate() > categoryName: '
+ this.state.categoryName
+ ' | brand = ' + this.state.brand
);
}
/////////////////////////////////////////////////////////////////////////// SET brand array (items)
setBrandArr = (myObj) => {
// console.log('>>>', myObj[0].categoryArr);
const categorySlugBase = myObj[0].categoryArr.slug ? StripOpeningSlash(myObj[0].categoryArr.slug) : null;//get the base slug from first (any) item
const myArr = { list: myObj }; //put obj array into list for flatMap
const myUniqueBrandList = myArr.list
.flatMap((obj) => obj.subcategoryArr)
.filter((e, i, a) => a.findIndex(({ id, brand }) => id === e.id && brand === e.brand) === i);
// SORT alphabetically [A-Z]
myUniqueBrandList.sort(function(a, b) {
var nameA = a.brand.toLowerCase(),
nameB = b.brand.toLowerCase();
if (nameA < nameB)
//sort string ascending
return -1;
if (nameA > nameB) return 1;
return 0; //default return value (no sorting)
});
// COUNT items in subcategory
let myUniqueBrandListWithCount = this.countItemsInBrand(myUniqueBrandList, myObj);
myUniqueBrandListWithCount = [
{ id: 'all', brand: 'ALL', slug: categorySlugBase, itemCount: myObj.length },
...myUniqueBrandListWithCount
];
// console.log('[Context] myUniqueBrandList...', myUniqueBrandListWithCount);
return myUniqueBrandListWithCount;
};
/////////////////////////////////////////////////////////////////////////// COUNT items in each brand
countItemsInBrand(getBrandArr, getItemsArr) {
for (let i = 0; i < getBrandArr.length; i++) {
var tmp = getItemsArr.filter((item) => item.subcategoryArr.id === getBrandArr[i].id).length;
getBrandArr[i].itemCount = tmp;
}
return getBrandArr;
}
/////////////////////////////////////////////////////////////////////////// SORT used by filter
// REF: https://stackoverflow.com/questions/6913512/how-to-sort-an-array-of-objects-by-multiple-fields
fieldSorter = (fields) => {
return function(a, b) {
return fields
.map(function(o) {
var dir = 1;
if (o[0] === '-') {
dir = -1;
o = o.substring(1);
}
if (a[o] > b[o]) return dir;
if (a[o] < b[o]) return -dir;
return 0;
})
.reduce(function firstNonZeroValue(p, n) {
return p ? p : n;
}, 0);
};
};
/////////////////////////////////////////////////////////////////////////// FORMAT item data
formatData(getItemsData) {
let tempItems = getItemsData.map((dataItem) => {
let id = dataItem.id;
let name = parse(dataItem.name).toString();
let slug = this.generateSlugFromName(name);// 2do - get from API (dataItem.slug)
let status = dataItem.status;
let category = dataItem.category;
let categoryArr = this.getCategoryArr(category, dataItem.status);
let subcategoryArr = dataItem.catalogue_subcat;
if(!dataItem.catalogue_subcat.slug) dataItem.catalogue_subcat.slug = this | const dataArchive = await fetch(CatData.Archive.apiFeatured).then((dataArchive) => dataArchive.json());
const dataOther = await fetch(CatData.General.apiFeatured).then((dataOther) => dataOther.json());
let items = this.formatData(data);
| random_line_split | |
WikitudePlugin.js | /**
* Callbacks that are used during device compatibilty checks.
*/
this._onDeviceSupportedCallback = null;
this._onDeviceNotSupportedCallback = null;
/**
* Callbacks that are used if an ARchitect World was launched successfully or not.
*/
this._onARchitectWorldLaunchedCallback = null;
this._onARchitectWorldFailedLaunchingCallback = null;
};
/*
* =============================================================================================================================
*
* PUBLIC API
*
* =============================================================================================================================
*/
/* Managing ARchitect world loading */
/**
* Use this function to check if the current device is capable of running ARchitect Worlds.
*
* @param {function} successCallback A callback which is called if the device is capable of running ARchitect Worlds.
* @param {function} errorCallback A callback which is called if the device is not capable of running ARchitect Worlds.
*/
WikitudePlugin.prototype.isDeviceSupported = function(successCallback, errorCallback) {
// Store a reference to the success and error callback function because we intercept the callbacks ourself but need to call the developer ones afterwards
this._onDeviceSupportedCallback = successCallback;
this._onDeviceNotSupportedCallback = errorCallback;
// Check if the current device is capable of running Architect Worlds
cordova.exec(this.deviceIsARchitectReady, this.deviceIsNotARchitectReady, "WikitudePlugin", "isDeviceSupported", [this._augmentedRealityMode]);
};
/**
* Use this function to load an ARchitect World.
*
* @param {String} worldPath The path to an ARchitect world, ether on the device or on e.g. your Dropbox.
*/
WikitudePlugin.prototype.loadARchitectWorld = function(worldPath) {
// before we actually call load, we check again if the device is able to open the world
if (this._isDeviceSupported) {
// the 'open' function of the Wikitude Plugin requires some parameters
// @param {String} SDKKey (required) The Wikitude SDK license key that you received with your purchase
// @param {String} ARchitectWorldPath (required) The path to a local ARchitect world or to a ARchitect world on a server or your dropbox
// @param {String} AugmentedRealityMode (optional) describes in more detail how the Wikitude SDK should be instantiated
cordova.exec(this.worldLaunched, this.worldFailedLaunching, "WikitudePlugin", "open", [{
"SDKKey": this._sdkKey,
"ARchitectWorldPath": worldPath,
"AugmentedRealityMode": this._augmentedRealityMode
}]);
// We add an event listener on the resume and pause event of the application lifecycle
document.addEventListener("resume", this.onResume, false);
document.addEventListener("pause", this.onPause, false);
} else {
// If the device is not supported, we call the device not supported callback again.
if (this._onDeviceNotSupportedCallback) {
this._onDeviceNotSupportedCallback();
}
}
};
/* Managing the Wikitude SDK Lifecycle */
/**
* Use this function to stop the Wikitude SDK and to remove it from the screen.
*/
WikitudePlugin.prototype.close = function() {
document.removeEventListener("pause", this.onPause, false);
document.removeEventListener("resume", this.onResume, false);
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "close", [""]);
};
/**
* Use this function to only hide the Wikitude SDK. All location and rendering updates are still active.
*/
WikitudePlugin.prototype.hide = function() {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "hide", [""]);
};
/**
* Use this function to show the Wikitude SDK again if it was hidden before.
*/
WikitudePlugin.prototype.show = function() {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "show", [""]);
};
/* Interacting with the Wikitude SDK */
/**
* Use this function to call javascript which will be executed in the context of the currently loaded ARchitect World.
*
* @param js The JavaScript that should be evaluated in the ARchitect View.
*/
WikitudePlugin.prototype.callJavaScript = function(js) {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "callJavascript", [js]);
};
/**
* Use this function to set a callback which will be invoked when the ARchitect World opens an architectsdk =// url.
* document.location = "architectsdk =//opendetailpage?id=9";
*
* @param onUrlInvokeCallback A function which will be called when the ARchitect World invokes a call to "document.location = architectsdk =//"
*/
WikitudePlugin.prototype.setOnUrlInvokeCallback = function(onUrlInvokeCallback) {
cordova.exec(onUrlInvokeCallback, this.onWikitudeError, "WikitudePlugin", "onUrlInvoke", [""]);
};
/**
* Use this function to inject a location into the Wikituce SDK.
*
* @param latitude The latitude which should be simulated
* @param longitude The longitude which should be simulated
* @param altitude The altitude which should be simulated
* @param accuracy The simulated location accuracy
*/
WikitudePlugin.prototype.setLocation = function(latitude, longitude, altitude, accuracy) {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "setLocation", [latitude, longitude, altitude, accuracy]);
};
/**
* Use this function to generate a screenshot from the current Wikitude SDK view.
*
* @param includeWebView Indicates if the ARchitect web view should be included in the generated screenshot or not.
* @param imagePathInBundleorNullForPhotoLibrary If a file path or file name is given, the generated screenshot will be saved in the application bundle. Passing null will save the photo in the device photo library.
*/
WikitudePlugin.prototype.captureScreen = function(includeWebView, imagePathInBundleOrNullForPhotoLibrary, successCallback, errorCallback) {
cordova.exec(successCallback, errorCallback, "WikitudePlugin", "captureScreen", [includeWebView, imagePathInBundleOrNullForPhotoLibrary]);
};
/*
* =============================================================================================================================
*
* Callbacks of public functions
*
* =============================================================================================================================
*/
/**
* This function gets called if the Wikitude Plugin reports that the device is able to start the Wikitude SDK
*/
WikitudePlugin.prototype.deviceIsARchitectReady = function() {
// Keep track of the device status
module.exports._isDeviceSupported = true;
// if the developer passed in a device supported callback, call it
if (module.exports._onDeviceSupportedCallback) {
module.exports._onDeviceSupportedCallback();
}
};
/**
* This function gets called if the Wikitude Plugin reports that the device is not able of starting the Wikitude SDK.
*/
WikitudePlugin.prototype.deviceIsNotARchitectReady = function() {
// Keep track of the device status
module.exports._isDeviceSupported = false;
// if the developer passed in a device not supported callback, call it
if (module.exports._onDeviceNotSupportedCallback) {
module.exports._onDeviceNotSupportedCallback();
}
};
/**
* Use this callback to get notified when the ARchitect World was loaded successfully.
*/
WikitudePlugin.prototype.worldLaunched = function(url) {
if (module.exports._onARchitectWorldLaunchedCallback) {
module.exports._onARchitectWorldLaunchedCallback(url);
}
};
/**
* Use this callback to get notified when the ARchitect World could not be loaded.
*/
WikitudePlugin.prototype.worldFailedLaunching = function(err) {
if (module.exports._onARchitectWorldFailedLaunchingCallback) {
module.exports._onARchitectWorldFailedLaunchingCallback(err);
}
};
/* Lifecycle updates */
/**
* This function gets called every time the application did become active.
*/
WikitudePlugin.prototype.onResume = function() {
// Call the Wikitude SDK that it should resume.
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "onResume", [""]);
};
/**
* This function gets called every time the application is about to become inactive.
*/
WikitudePlugin.prototype.onPause = function() {
// Call the Wikitude SDK that the application did become inactive
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "onPause", [""]);
};
/**
* A generic success callback used inside this wrapper.
*/
WikitudePlugin.prototype.onWikitudeOK = function() {};
/**
* A generic error callback used inside this wrapper.
*/
WikitudePlugin.prototype.onWikitudeError = function() {};
/* Export a new WikitudePlugin instance */
var wikitudePlugin = new WikitudePlugin();
| random_line_split | ||
WikitudePlugin.js | KEY-HERE";
/**
* This variable represents if the current device is capable of running ARchitect Worlds.
*/
this._isDeviceSupported = false;
/**
* The Wikitude SDK can run in different modes.
* * Geo means, that objects are placed at latitude/longitude positions.
* * IR means that only image recognition is used in the ARchitect World.
* When your ARchitect World uses both, geo and ir, than set this value to "IrAndGeo". Otherwise, if the ARchitectWorld only needs image recognition, placing "IR" will require less features from the device and therefore, support a wider range of devices. Keep in mind that image recognition requires a dual core cpu to work satisfyingly.
*/
this._augmentedRealityMode = "IrAndGeo"; // "IR" for image recognition worlds only, "Geo" if you want to use Geo AR only
/**
* Callbacks that are used during device compatibilty checks.
*/
this._onDeviceSupportedCallback = null;
this._onDeviceNotSupportedCallback = null;
/**
* Callbacks that are used if an ARchitect World was launched successfully or not.
*/
this._onARchitectWorldLaunchedCallback = null;
this._onARchitectWorldFailedLaunchingCallback = null;
};
/*
* =============================================================================================================================
*
* PUBLIC API
*
* =============================================================================================================================
*/
/* Managing ARchitect world loading */
/**
* Use this function to check if the current device is capable of running ARchitect Worlds.
*
* @param {function} successCallback A callback which is called if the device is capable of running ARchitect Worlds.
* @param {function} errorCallback A callback which is called if the device is not capable of running ARchitect Worlds.
*/
WikitudePlugin.prototype.isDeviceSupported = function(successCallback, errorCallback) {
// Store a reference to the success and error callback function because we intercept the callbacks ourself but need to call the developer ones afterwards
this._onDeviceSupportedCallback = successCallback;
this._onDeviceNotSupportedCallback = errorCallback;
// Check if the current device is capable of running Architect Worlds
cordova.exec(this.deviceIsARchitectReady, this.deviceIsNotARchitectReady, "WikitudePlugin", "isDeviceSupported", [this._augmentedRealityMode]);
};
/**
* Use this function to load an ARchitect World.
*
* @param {String} worldPath The path to an ARchitect world, ether on the device or on e.g. your Dropbox.
*/
WikitudePlugin.prototype.loadARchitectWorld = function(worldPath) {
// before we actually call load, we check again if the device is able to open the world
if (this._isDeviceSupported) {
// the 'open' function of the Wikitude Plugin requires some parameters
// @param {String} SDKKey (required) The Wikitude SDK license key that you received with your purchase
// @param {String} ARchitectWorldPath (required) The path to a local ARchitect world or to a ARchitect world on a server or your dropbox
// @param {String} AugmentedRealityMode (optional) describes in more detail how the Wikitude SDK should be instantiated
cordova.exec(this.worldLaunched, this.worldFailedLaunching, "WikitudePlugin", "open", [{
"SDKKey": this._sdkKey,
"ARchitectWorldPath": worldPath,
"AugmentedRealityMode": this._augmentedRealityMode
}]);
// We add an event listener on the resume and pause event of the application lifecycle
document.addEventListener("resume", this.onResume, false);
document.addEventListener("pause", this.onPause, false);
} else {
// If the device is not supported, we call the device not supported callback again.
if (this._onDeviceNotSupportedCallback) {
this._onDeviceNotSupportedCallback();
}
}
};
/* Managing the Wikitude SDK Lifecycle */
/**
* Use this function to stop the Wikitude SDK and to remove it from the screen.
*/
WikitudePlugin.prototype.close = function() {
document.removeEventListener("pause", this.onPause, false);
document.removeEventListener("resume", this.onResume, false);
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "close", [""]);
};
/**
* Use this function to only hide the Wikitude SDK. All location and rendering updates are still active.
*/
WikitudePlugin.prototype.hide = function() {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "hide", [""]);
};
/**
* Use this function to show the Wikitude SDK again if it was hidden before.
*/
WikitudePlugin.prototype.show = function() {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "show", [""]);
};
/* Interacting with the Wikitude SDK */
/**
* Use this function to call javascript which will be executed in the context of the currently loaded ARchitect World.
*
* @param js The JavaScript that should be evaluated in the ARchitect View.
*/
WikitudePlugin.prototype.callJavaScript = function(js) {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "callJavascript", [js]);
};
/**
* Use this function to set a callback which will be invoked when the ARchitect World opens an architectsdk =// url.
* document.location = "architectsdk =//opendetailpage?id=9";
*
* @param onUrlInvokeCallback A function which will be called when the ARchitect World invokes a call to "document.location = architectsdk =//"
*/
WikitudePlugin.prototype.setOnUrlInvokeCallback = function(onUrlInvokeCallback) {
cordova.exec(onUrlInvokeCallback, this.onWikitudeError, "WikitudePlugin", "onUrlInvoke", [""]);
};
/**
* Use this function to inject a location into the Wikituce SDK.
*
* @param latitude The latitude which should be simulated
* @param longitude The longitude which should be simulated
* @param altitude The altitude which should be simulated
* @param accuracy The simulated location accuracy
*/
WikitudePlugin.prototype.setLocation = function(latitude, longitude, altitude, accuracy) {
cordova.exec(this.onWikitudeOK, this.onWikitudeError, "WikitudePlugin", "setLocation", [latitude, longitude, altitude, accuracy]);
};
/**
* Use this function to generate a screenshot from the current Wikitude SDK view.
*
* @param includeWebView Indicates if the ARchitect web view should be included in the generated screenshot or not.
* @param imagePathInBundleorNullForPhotoLibrary If a file path or file name is given, the generated screenshot will be saved in the application bundle. Passing null will save the photo in the device photo library.
*/
WikitudePlugin.prototype.captureScreen = function(includeWebView, imagePathInBundleOrNullForPhotoLibrary, successCallback, errorCallback) {
cordova.exec(successCallback, errorCallback, "WikitudePlugin", "captureScreen", [includeWebView, imagePathInBundleOrNullForPhotoLibrary]);
};
/*
* =============================================================================================================================
*
* Callbacks of public functions
*
* =============================================================================================================================
*/
/**
* This function gets called if the Wikitude Plugin reports that the device is able to start the Wikitude SDK
*/
WikitudePlugin.prototype.deviceIsARchitectReady = function() {
// Keep track of the device status
module.exports._isDeviceSupported = true;
// if the developer passed in a device supported callback, call it
if (module.exports._onDeviceSupportedCallback) |
};
/**
* This function gets called if the Wikitude Plugin reports that the device is not able of starting the Wikitude SDK.
*/
WikitudePlugin.prototype.deviceIsNotARchitectReady = function() {
// Keep track of the device status
module.exports._isDeviceSupported = false;
// if the developer passed in a device not supported callback, call it
if (module.exports._onDeviceNotSupportedCallback) {
module.exports._onDeviceNotSupportedCallback();
}
};
/**
* Use this callback to get notified when the ARchitect World was loaded successfully.
*/
WikitudePlugin.prototype.worldLaunched = function(url) {
if (module.exports._onARchitectWorldLaunchedCallback) {
module.exports._onARchitectWorldLaunchedCallback(url);
}
};
/**
* Use this callback to get notified when the ARchitect World could not be loaded.
*/
WikitudePlugin.prototype.worldFailedLaunching = function(err) {
if (module.exports._onARchitectWorldFailedLaunchingCallback) {
module.exports._onARchitectWorldFailedLaunchingCallback(err);
}
};
/* Lifecycle updates */
/**
* This function gets called every time the application did become active.
*/
| {
module.exports._onDeviceSupportedCallback();
} | conditional_block |
actions_linux.go |
func convertImage(filename string, unsquashfsPath string) (string, error) {
img, err := image.Init(filename, false)
if err != nil {
return "", fmt.Errorf("could not open image %s: %s", filename, err)
}
defer img.File.Close()
if !img.HasRootFs() {
return "", fmt.Errorf("no root filesystem found in %s", filename)
}
// squashfs only
if img.Partitions[0].Type != image.SQUASHFS {
return "", fmt.Errorf("not a squashfs root filesystem")
}
// create a reader for rootfs partition
reader, err := image.NewPartitionReader(img, "", 0)
if err != nil {
return "", fmt.Errorf("could not extract root filesystem: %s", err)
}
s := unpacker.NewSquashfs()
if !s.HasUnsquashfs() && unsquashfsPath != "" {
s.UnsquashfsPath = unsquashfsPath
}
// keep compatibility with v2
tmpdir := os.Getenv("SINGULARITY_LOCALCACHEDIR")
if tmpdir == "" {
tmpdir = os.Getenv("SINGULARITY_CACHEDIR")
}
// create temporary sandbox
dir, err := ioutil.TempDir(tmpdir, "rootfs-")
if err != nil {
return "", fmt.Errorf("could not create temporary sandbox: %s", err)
}
// extract root filesystem
if err := s.ExtractAll(reader, dir); err != nil {
os.RemoveAll(dir)
return "", fmt.Errorf("root filesystem extraction failed: %s", err)
}
return dir, err
}
// TODO: Let's stick this in another file so that that CLI is just CLI
func execStarter(cobraCmd *cobra.Command, image string, args []string, name string) {
targetUID := 0
targetGID := make([]int, 0)
procname := ""
uid := uint32(os.Getuid())
gid := uint32(os.Getgid())
// Are we running from a privileged account?
isPrivileged := uid == 0
checkPrivileges := func(cond bool, desc string, fn func()) {
if !cond {
return
}
if !isPrivileged {
sylog.Fatalf("%s requires root privileges", desc)
}
fn()
}
syscall.Umask(0022)
starter := buildcfg.LIBEXECDIR + "/singularity/bin/starter-suid"
engineConfig := singularityConfig.NewConfig()
configurationFile := buildcfg.SYSCONFDIR + "/singularity/singularity.conf"
if err := config.Parser(configurationFile, engineConfig.File); err != nil {
sylog.Fatalf("Unable to parse singularity.conf file: %s", err)
}
ociConfig := &oci.Config{}
generator := generate.Generator{Config: &ociConfig.Spec}
engineConfig.OciConfig = ociConfig
generator.SetProcessArgs(args)
uidParam := security.GetParam(Security, "uid")
gidParam := security.GetParam(Security, "gid")
// handle target UID/GID for root user
checkPrivileges(uidParam != "", "uid security feature", func() {
u, err := strconv.ParseUint(uidParam, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided UID")
}
targetUID = int(u)
uid = uint32(targetUID)
engineConfig.SetTargetUID(targetUID)
})
checkPrivileges(gidParam != "", "gid security feature", func() {
gids := strings.Split(gidParam, ":")
for _, id := range gids {
g, err := strconv.ParseUint(id, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided GID")
}
targetGID = append(targetGID, int(g))
}
if len(gids) > 0 {
gid = uint32(targetGID[0])
}
engineConfig.SetTargetGID(targetGID)
})
if strings.HasPrefix(image, "instance://") {
if name != "" {
sylog.Fatalf("Starting an instance from another is not allowed")
}
instanceName := instance.ExtractName(image)
file, err := instance.Get(instanceName, instance.SingSubDir)
if err != nil {
sylog.Fatalf("%s", err)
}
if !file.Privileged {
UserNamespace = true
}
generator.AddProcessEnv("SINGULARITY_CONTAINER", file.Image)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(file.Image))
engineConfig.SetImage(image)
engineConfig.SetInstanceJoin(true)
} else {
abspath, err := filepath.Abs(image)
generator.AddProcessEnv("SINGULARITY_CONTAINER", abspath)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(abspath))
if err != nil {
sylog.Fatalf("Failed to determine image absolute path for %s: %s", image, err)
}
engineConfig.SetImage(abspath)
}
if !NoNvidia && (Nvidia || engineConfig.File.AlwaysUseNv) {
userPath := os.Getenv("USER_PATH")
if engineConfig.File.AlwaysUseNv {
sylog.Verbosef("'always use nv = yes' found in singularity.conf")
sylog.Verbosef("binding nvidia files into container")
}
libs, bins, err := nvidia.Paths(buildcfg.SINGULARITY_CONFDIR, userPath)
if err != nil {
sylog.Warningf("Unable to capture NVIDIA bind points: %v", err)
} else {
if len(bins) == 0 {
sylog.Infof("Could not find any NVIDIA binaries on this host!")
} else {
if IsWritable {
sylog.Warningf("NVIDIA binaries may not be bound with --writable")
}
for _, binary := range bins {
usrBinBinary := filepath.Join("/usr/bin", filepath.Base(binary))
bind := strings.Join([]string{binary, usrBinBinary}, ":")
BindPaths = append(BindPaths, bind)
}
}
if len(libs) == 0 {
sylog.Warningf("Could not find any NVIDIA libraries on this host!")
sylog.Warningf("You may need to edit %v/nvliblist.conf", buildcfg.SINGULARITY_CONFDIR)
} else {
ContainLibsPath = append(ContainLibsPath, libs...)
}
}
}
engineConfig.SetBindPath(BindPaths)
engineConfig.SetNetwork(Network)
engineConfig.SetDNS(DNS)
engineConfig.SetNetworkArgs(NetworkArgs)
engineConfig.SetOverlayImage(OverlayPath)
engineConfig.SetWritableImage(IsWritable)
engineConfig.SetNoHome(NoHome)
engineConfig.SetNv(Nvidia)
engineConfig.SetAddCaps(AddCaps)
engineConfig.SetDropCaps(DropCaps)
checkPrivileges(AllowSUID, "--allow-setuid", func() {
engineConfig.SetAllowSUID(AllowSUID)
})
checkPrivileges(KeepPrivs, "--keep-privs", func() {
engineConfig.SetKeepPrivs(KeepPrivs)
})
engineConfig.SetNoPrivs(NoPrivs)
engineConfig.SetSecurity(Security)
engineConfig.SetShell(ShellPath)
engineConfig.SetLibrariesPath(ContainLibsPath)
if ShellPath != "" {
generator.AddProcessEnv("SINGULARITY_SHELL", ShellPath)
}
checkPrivileges(CgroupsPath != "", "--apply-cgroups", func() {
engineConfig.SetCgroupsPath(CgroupsPath)
})
if IsWritable && IsWritableTmpfs {
sylog.Warningf("Disabling --writable-tmpfs flag, mutually exclusive with --writable")
engineConfig.SetWritableTmpfs(false)
} else {
engineConfig.SetWritableTmpfs(IsWritableTmpfs)
}
homeFlag := cobraCmd.Flag("home")
engineConfig.SetCustomHome(homeFlag.Changed)
// set home directory for the targeted UID if it exists on host system
if !homeFlag.Changed && targetUID != 0 {
if targetUID > 500 {
if pwd, err := user.GetPwUID(uint32(targetUID)); err == nil {
sylog.Debugf("Target UID requested, set home directory to %s", pwd.Dir)
HomePath = pwd.Dir
engineConfig.SetCustomHome(true)
} else {
sylog.Verbosef("Home directory for UID %d not found, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
} else {
sylog.Verbosef("System UID %d requested, home won't be mounted", targetUID)
| {
if os.Geteuid() != 0 {
if len(args) >= 1 && len(args[0]) > 0 {
// The first argument is the context
sylog.Fatalf("command '%s %s' requires root privileges", args[0], cmd.Name())
} else {
sylog.Fatalf("command %s requires root privileges", cmd.Name())
}
}
} | identifier_body | |
actions_linux.go | .AddProcessEnv("SINGULARITY_CONTAINER", abspath)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(abspath))
if err != nil {
sylog.Fatalf("Failed to determine image absolute path for %s: %s", image, err)
}
engineConfig.SetImage(abspath)
}
if !NoNvidia && (Nvidia || engineConfig.File.AlwaysUseNv) {
userPath := os.Getenv("USER_PATH")
if engineConfig.File.AlwaysUseNv {
sylog.Verbosef("'always use nv = yes' found in singularity.conf")
sylog.Verbosef("binding nvidia files into container")
}
libs, bins, err := nvidia.Paths(buildcfg.SINGULARITY_CONFDIR, userPath)
if err != nil {
sylog.Warningf("Unable to capture NVIDIA bind points: %v", err)
} else {
if len(bins) == 0 {
sylog.Infof("Could not find any NVIDIA binaries on this host!")
} else {
if IsWritable {
sylog.Warningf("NVIDIA binaries may not be bound with --writable")
}
for _, binary := range bins {
usrBinBinary := filepath.Join("/usr/bin", filepath.Base(binary))
bind := strings.Join([]string{binary, usrBinBinary}, ":")
BindPaths = append(BindPaths, bind)
}
}
if len(libs) == 0 {
sylog.Warningf("Could not find any NVIDIA libraries on this host!")
sylog.Warningf("You may need to edit %v/nvliblist.conf", buildcfg.SINGULARITY_CONFDIR)
} else {
ContainLibsPath = append(ContainLibsPath, libs...)
}
}
}
engineConfig.SetBindPath(BindPaths)
engineConfig.SetNetwork(Network)
engineConfig.SetDNS(DNS)
engineConfig.SetNetworkArgs(NetworkArgs)
engineConfig.SetOverlayImage(OverlayPath)
engineConfig.SetWritableImage(IsWritable)
engineConfig.SetNoHome(NoHome)
engineConfig.SetNv(Nvidia)
engineConfig.SetAddCaps(AddCaps)
engineConfig.SetDropCaps(DropCaps)
checkPrivileges(AllowSUID, "--allow-setuid", func() {
engineConfig.SetAllowSUID(AllowSUID)
})
checkPrivileges(KeepPrivs, "--keep-privs", func() {
engineConfig.SetKeepPrivs(KeepPrivs)
})
engineConfig.SetNoPrivs(NoPrivs)
engineConfig.SetSecurity(Security)
engineConfig.SetShell(ShellPath)
engineConfig.SetLibrariesPath(ContainLibsPath)
if ShellPath != "" {
generator.AddProcessEnv("SINGULARITY_SHELL", ShellPath)
}
checkPrivileges(CgroupsPath != "", "--apply-cgroups", func() {
engineConfig.SetCgroupsPath(CgroupsPath)
})
if IsWritable && IsWritableTmpfs {
sylog.Warningf("Disabling --writable-tmpfs flag, mutually exclusive with --writable")
engineConfig.SetWritableTmpfs(false)
} else {
engineConfig.SetWritableTmpfs(IsWritableTmpfs)
}
homeFlag := cobraCmd.Flag("home")
engineConfig.SetCustomHome(homeFlag.Changed)
// set home directory for the targeted UID if it exists on host system
if !homeFlag.Changed && targetUID != 0 {
if targetUID > 500 {
if pwd, err := user.GetPwUID(uint32(targetUID)); err == nil {
sylog.Debugf("Target UID requested, set home directory to %s", pwd.Dir)
HomePath = pwd.Dir
engineConfig.SetCustomHome(true)
} else {
sylog.Verbosef("Home directory for UID %d not found, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
} else {
sylog.Verbosef("System UID %d requested, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
}
if Hostname != "" {
UtsNamespace = true
engineConfig.SetHostname(Hostname)
}
checkPrivileges(IsBoot, "--boot", func() {})
if IsContained || IsContainAll || IsBoot {
engineConfig.SetContain(true)
if IsContainAll {
PidNamespace = true
IpcNamespace = true
IsCleanEnv = true
}
}
engineConfig.SetScratchDir(ScratchPath)
engineConfig.SetWorkdir(WorkdirPath)
homeSlice := strings.Split(HomePath, ":")
if len(homeSlice) > 2 || len(homeSlice) == 0 {
sylog.Fatalf("home argument has incorrect number of elements: %v", len(homeSlice))
}
engineConfig.SetHomeSource(homeSlice[0])
if len(homeSlice) == 1 {
engineConfig.SetHomeDest(homeSlice[0])
} else {
engineConfig.SetHomeDest(homeSlice[1])
}
if !engineConfig.File.AllowSetuid || IsFakeroot {
UserNamespace = true
}
/* if name submitted, run as instance */
if name != "" {
PidNamespace = true
IpcNamespace = true
engineConfig.SetInstance(true)
engineConfig.SetBootInstance(IsBoot)
_, err := instance.Get(name, instance.SingSubDir)
if err == nil {
sylog.Fatalf("instance %s already exists", name)
}
if IsBoot {
UtsNamespace = true
NetNamespace = true
if Hostname == "" {
engineConfig.SetHostname(name)
}
if !KeepPrivs {
engineConfig.SetDropCaps("CAP_SYS_BOOT,CAP_SYS_RAWIO")
}
generator.SetProcessArgs([]string{"/sbin/init"})
}
pwd, err := user.GetPwUID(uint32(os.Getuid()))
if err != nil {
sylog.Fatalf("failed to retrieve user information for UID %d: %s", os.Getuid(), err)
}
procname, err = instance.ProcName(name, pwd.Name)
if err != nil {
sylog.Fatalf("%s", err)
}
} else {
generator.SetProcessArgs(args)
procname = "Singularity runtime parent"
}
if NetNamespace {
generator.AddOrReplaceLinuxNamespace("network", "")
}
if UtsNamespace {
generator.AddOrReplaceLinuxNamespace("uts", "")
}
if PidNamespace {
generator.AddOrReplaceLinuxNamespace("pid", "")
engineConfig.SetNoInit(NoInit)
}
if IpcNamespace {
generator.AddOrReplaceLinuxNamespace("ipc", "")
}
if !UserNamespace {
if _, err := os.Stat(starter); os.IsNotExist(err) {
sylog.Verbosef("starter-suid not found, using user namespace")
UserNamespace = true
}
}
if UserNamespace {
generator.AddOrReplaceLinuxNamespace("user", "")
starter = buildcfg.LIBEXECDIR + "/singularity/bin/starter"
if IsFakeroot {
generator.AddLinuxUIDMapping(uid, 0, 1)
generator.AddLinuxGIDMapping(gid, 0, 1)
} else {
generator.AddLinuxUIDMapping(uid, uid, 1)
generator.AddLinuxGIDMapping(gid, gid, 1)
}
}
// Copy and cache environment
environment := os.Environ()
// Clean environment
env.SetContainerEnv(&generator, environment, IsCleanEnv, engineConfig.GetHomeDest())
// force to use getwd syscall
os.Unsetenv("PWD")
if pwd, err := os.Getwd(); err == nil {
if PwdPath != "" {
generator.SetProcessCwd(PwdPath)
} else {
if engineConfig.GetContain() {
generator.SetProcessCwd(engineConfig.GetHomeDest())
} else {
generator.SetProcessCwd(pwd)
}
}
} else {
sylog.Warningf("can't determine current working directory: %s", err)
}
Env := []string{sylog.GetEnvVar()}
generator.AddProcessEnv("SINGULARITY_APPNAME", AppName)
// convert image file to sandbox if image contains
// a squashfs filesystem
if UserNamespace && fs.IsFile(image) | {
unsquashfsPath := ""
if engineConfig.File.MksquashfsPath != "" {
d := filepath.Dir(engineConfig.File.MksquashfsPath)
unsquashfsPath = filepath.Join(d, "unsquashfs")
}
sylog.Verbosef("User namespace requested, convert image %s to sandbox", image)
sylog.Infof("Convert SIF file to sandbox...")
dir, err := convertImage(image, unsquashfsPath)
if err != nil {
sylog.Fatalf("while extracting %s: %s", image, err)
}
engineConfig.SetImage(dir)
engineConfig.SetDeleteImage(true)
generator.AddProcessEnv("SINGULARITY_CONTAINER", dir)
} | conditional_block | |
actions_linux.go | filesystem")
}
// create a reader for rootfs partition
reader, err := image.NewPartitionReader(img, "", 0)
if err != nil {
return "", fmt.Errorf("could not extract root filesystem: %s", err)
}
s := unpacker.NewSquashfs()
if !s.HasUnsquashfs() && unsquashfsPath != "" {
s.UnsquashfsPath = unsquashfsPath
}
// keep compatibility with v2
tmpdir := os.Getenv("SINGULARITY_LOCALCACHEDIR")
if tmpdir == "" {
tmpdir = os.Getenv("SINGULARITY_CACHEDIR")
}
// create temporary sandbox
dir, err := ioutil.TempDir(tmpdir, "rootfs-")
if err != nil {
return "", fmt.Errorf("could not create temporary sandbox: %s", err)
}
// extract root filesystem
if err := s.ExtractAll(reader, dir); err != nil {
os.RemoveAll(dir)
return "", fmt.Errorf("root filesystem extraction failed: %s", err)
}
return dir, err
}
// TODO: Let's stick this in another file so that that CLI is just CLI
func execStarter(cobraCmd *cobra.Command, image string, args []string, name string) {
targetUID := 0
targetGID := make([]int, 0)
procname := ""
uid := uint32(os.Getuid())
gid := uint32(os.Getgid())
// Are we running from a privileged account?
isPrivileged := uid == 0
checkPrivileges := func(cond bool, desc string, fn func()) {
if !cond {
return
}
if !isPrivileged {
sylog.Fatalf("%s requires root privileges", desc)
}
fn()
}
syscall.Umask(0022)
starter := buildcfg.LIBEXECDIR + "/singularity/bin/starter-suid"
engineConfig := singularityConfig.NewConfig()
configurationFile := buildcfg.SYSCONFDIR + "/singularity/singularity.conf"
if err := config.Parser(configurationFile, engineConfig.File); err != nil {
sylog.Fatalf("Unable to parse singularity.conf file: %s", err)
}
ociConfig := &oci.Config{}
generator := generate.Generator{Config: &ociConfig.Spec}
engineConfig.OciConfig = ociConfig
generator.SetProcessArgs(args)
uidParam := security.GetParam(Security, "uid")
gidParam := security.GetParam(Security, "gid")
// handle target UID/GID for root user
checkPrivileges(uidParam != "", "uid security feature", func() {
u, err := strconv.ParseUint(uidParam, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided UID")
}
targetUID = int(u)
uid = uint32(targetUID)
engineConfig.SetTargetUID(targetUID)
})
checkPrivileges(gidParam != "", "gid security feature", func() {
gids := strings.Split(gidParam, ":")
for _, id := range gids {
g, err := strconv.ParseUint(id, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided GID")
}
targetGID = append(targetGID, int(g))
}
if len(gids) > 0 {
gid = uint32(targetGID[0])
}
engineConfig.SetTargetGID(targetGID)
})
if strings.HasPrefix(image, "instance://") {
if name != "" {
sylog.Fatalf("Starting an instance from another is not allowed")
}
instanceName := instance.ExtractName(image)
file, err := instance.Get(instanceName, instance.SingSubDir)
if err != nil {
sylog.Fatalf("%s", err)
}
if !file.Privileged {
UserNamespace = true
}
generator.AddProcessEnv("SINGULARITY_CONTAINER", file.Image)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(file.Image))
engineConfig.SetImage(image)
engineConfig.SetInstanceJoin(true)
} else {
abspath, err := filepath.Abs(image)
generator.AddProcessEnv("SINGULARITY_CONTAINER", abspath)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(abspath))
if err != nil {
sylog.Fatalf("Failed to determine image absolute path for %s: %s", image, err)
}
engineConfig.SetImage(abspath)
}
if !NoNvidia && (Nvidia || engineConfig.File.AlwaysUseNv) {
userPath := os.Getenv("USER_PATH")
if engineConfig.File.AlwaysUseNv {
sylog.Verbosef("'always use nv = yes' found in singularity.conf")
sylog.Verbosef("binding nvidia files into container")
}
libs, bins, err := nvidia.Paths(buildcfg.SINGULARITY_CONFDIR, userPath)
if err != nil {
sylog.Warningf("Unable to capture NVIDIA bind points: %v", err)
} else {
if len(bins) == 0 {
sylog.Infof("Could not find any NVIDIA binaries on this host!")
} else {
if IsWritable {
sylog.Warningf("NVIDIA binaries may not be bound with --writable")
}
for _, binary := range bins {
usrBinBinary := filepath.Join("/usr/bin", filepath.Base(binary))
bind := strings.Join([]string{binary, usrBinBinary}, ":")
BindPaths = append(BindPaths, bind)
}
}
if len(libs) == 0 {
sylog.Warningf("Could not find any NVIDIA libraries on this host!")
sylog.Warningf("You may need to edit %v/nvliblist.conf", buildcfg.SINGULARITY_CONFDIR)
} else {
ContainLibsPath = append(ContainLibsPath, libs...)
}
}
}
engineConfig.SetBindPath(BindPaths)
engineConfig.SetNetwork(Network)
engineConfig.SetDNS(DNS)
engineConfig.SetNetworkArgs(NetworkArgs)
engineConfig.SetOverlayImage(OverlayPath)
engineConfig.SetWritableImage(IsWritable)
engineConfig.SetNoHome(NoHome)
engineConfig.SetNv(Nvidia)
engineConfig.SetAddCaps(AddCaps)
engineConfig.SetDropCaps(DropCaps)
checkPrivileges(AllowSUID, "--allow-setuid", func() {
engineConfig.SetAllowSUID(AllowSUID)
})
checkPrivileges(KeepPrivs, "--keep-privs", func() {
engineConfig.SetKeepPrivs(KeepPrivs)
})
engineConfig.SetNoPrivs(NoPrivs)
engineConfig.SetSecurity(Security)
engineConfig.SetShell(ShellPath)
engineConfig.SetLibrariesPath(ContainLibsPath)
if ShellPath != "" {
generator.AddProcessEnv("SINGULARITY_SHELL", ShellPath)
}
checkPrivileges(CgroupsPath != "", "--apply-cgroups", func() {
engineConfig.SetCgroupsPath(CgroupsPath)
})
if IsWritable && IsWritableTmpfs {
sylog.Warningf("Disabling --writable-tmpfs flag, mutually exclusive with --writable")
engineConfig.SetWritableTmpfs(false)
} else {
engineConfig.SetWritableTmpfs(IsWritableTmpfs)
}
homeFlag := cobraCmd.Flag("home")
engineConfig.SetCustomHome(homeFlag.Changed)
// set home directory for the targeted UID if it exists on host system
if !homeFlag.Changed && targetUID != 0 {
if targetUID > 500 {
if pwd, err := user.GetPwUID(uint32(targetUID)); err == nil {
sylog.Debugf("Target UID requested, set home directory to %s", pwd.Dir)
HomePath = pwd.Dir
engineConfig.SetCustomHome(true)
} else {
sylog.Verbosef("Home directory for UID %d not found, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
} else {
sylog.Verbosef("System UID %d requested, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
}
if Hostname != "" {
UtsNamespace = true
engineConfig.SetHostname(Hostname)
}
checkPrivileges(IsBoot, "--boot", func() {})
if IsContained || IsContainAll || IsBoot {
engineConfig.SetContain(true)
if IsContainAll {
PidNamespace = true
IpcNamespace = true
IsCleanEnv = true
}
}
engineConfig.SetScratchDir(ScratchPath)
engineConfig.SetWorkdir(WorkdirPath)
homeSlice := strings.Split(HomePath, ":")
if len(homeSlice) > 2 || len(homeSlice) == 0 {
sylog.Fatalf("home argument has incorrect number of elements: %v", len(homeSlice))
} | engineConfig.SetHomeDest(homeSlice[0])
|
engineConfig.SetHomeSource(homeSlice[0])
if len(homeSlice) == 1 { | random_line_split |
actions_linux.go | (cmd *cobra.Command, args []string) {
if os.Geteuid() != 0 {
if len(args) >= 1 && len(args[0]) > 0 {
// The first argument is the context
sylog.Fatalf("command '%s %s' requires root privileges", args[0], cmd.Name())
} else {
sylog.Fatalf("command %s requires root privileges", cmd.Name())
}
}
}
func convertImage(filename string, unsquashfsPath string) (string, error) {
img, err := image.Init(filename, false)
if err != nil {
return "", fmt.Errorf("could not open image %s: %s", filename, err)
}
defer img.File.Close()
if !img.HasRootFs() {
return "", fmt.Errorf("no root filesystem found in %s", filename)
}
// squashfs only
if img.Partitions[0].Type != image.SQUASHFS {
return "", fmt.Errorf("not a squashfs root filesystem")
}
// create a reader for rootfs partition
reader, err := image.NewPartitionReader(img, "", 0)
if err != nil {
return "", fmt.Errorf("could not extract root filesystem: %s", err)
}
s := unpacker.NewSquashfs()
if !s.HasUnsquashfs() && unsquashfsPath != "" {
s.UnsquashfsPath = unsquashfsPath
}
// keep compatibility with v2
tmpdir := os.Getenv("SINGULARITY_LOCALCACHEDIR")
if tmpdir == "" {
tmpdir = os.Getenv("SINGULARITY_CACHEDIR")
}
// create temporary sandbox
dir, err := ioutil.TempDir(tmpdir, "rootfs-")
if err != nil {
return "", fmt.Errorf("could not create temporary sandbox: %s", err)
}
// extract root filesystem
if err := s.ExtractAll(reader, dir); err != nil {
os.RemoveAll(dir)
return "", fmt.Errorf("root filesystem extraction failed: %s", err)
}
return dir, err
}
// TODO: Let's stick this in another file so that that CLI is just CLI
func execStarter(cobraCmd *cobra.Command, image string, args []string, name string) {
targetUID := 0
targetGID := make([]int, 0)
procname := ""
uid := uint32(os.Getuid())
gid := uint32(os.Getgid())
// Are we running from a privileged account?
isPrivileged := uid == 0
checkPrivileges := func(cond bool, desc string, fn func()) {
if !cond {
return
}
if !isPrivileged {
sylog.Fatalf("%s requires root privileges", desc)
}
fn()
}
syscall.Umask(0022)
starter := buildcfg.LIBEXECDIR + "/singularity/bin/starter-suid"
engineConfig := singularityConfig.NewConfig()
configurationFile := buildcfg.SYSCONFDIR + "/singularity/singularity.conf"
if err := config.Parser(configurationFile, engineConfig.File); err != nil {
sylog.Fatalf("Unable to parse singularity.conf file: %s", err)
}
ociConfig := &oci.Config{}
generator := generate.Generator{Config: &ociConfig.Spec}
engineConfig.OciConfig = ociConfig
generator.SetProcessArgs(args)
uidParam := security.GetParam(Security, "uid")
gidParam := security.GetParam(Security, "gid")
// handle target UID/GID for root user
checkPrivileges(uidParam != "", "uid security feature", func() {
u, err := strconv.ParseUint(uidParam, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided UID")
}
targetUID = int(u)
uid = uint32(targetUID)
engineConfig.SetTargetUID(targetUID)
})
checkPrivileges(gidParam != "", "gid security feature", func() {
gids := strings.Split(gidParam, ":")
for _, id := range gids {
g, err := strconv.ParseUint(id, 10, 32)
if err != nil {
sylog.Fatalf("failed to parse provided GID")
}
targetGID = append(targetGID, int(g))
}
if len(gids) > 0 {
gid = uint32(targetGID[0])
}
engineConfig.SetTargetGID(targetGID)
})
if strings.HasPrefix(image, "instance://") {
if name != "" {
sylog.Fatalf("Starting an instance from another is not allowed")
}
instanceName := instance.ExtractName(image)
file, err := instance.Get(instanceName, instance.SingSubDir)
if err != nil {
sylog.Fatalf("%s", err)
}
if !file.Privileged {
UserNamespace = true
}
generator.AddProcessEnv("SINGULARITY_CONTAINER", file.Image)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(file.Image))
engineConfig.SetImage(image)
engineConfig.SetInstanceJoin(true)
} else {
abspath, err := filepath.Abs(image)
generator.AddProcessEnv("SINGULARITY_CONTAINER", abspath)
generator.AddProcessEnv("SINGULARITY_NAME", filepath.Base(abspath))
if err != nil {
sylog.Fatalf("Failed to determine image absolute path for %s: %s", image, err)
}
engineConfig.SetImage(abspath)
}
if !NoNvidia && (Nvidia || engineConfig.File.AlwaysUseNv) {
userPath := os.Getenv("USER_PATH")
if engineConfig.File.AlwaysUseNv {
sylog.Verbosef("'always use nv = yes' found in singularity.conf")
sylog.Verbosef("binding nvidia files into container")
}
libs, bins, err := nvidia.Paths(buildcfg.SINGULARITY_CONFDIR, userPath)
if err != nil {
sylog.Warningf("Unable to capture NVIDIA bind points: %v", err)
} else {
if len(bins) == 0 {
sylog.Infof("Could not find any NVIDIA binaries on this host!")
} else {
if IsWritable {
sylog.Warningf("NVIDIA binaries may not be bound with --writable")
}
for _, binary := range bins {
usrBinBinary := filepath.Join("/usr/bin", filepath.Base(binary))
bind := strings.Join([]string{binary, usrBinBinary}, ":")
BindPaths = append(BindPaths, bind)
}
}
if len(libs) == 0 {
sylog.Warningf("Could not find any NVIDIA libraries on this host!")
sylog.Warningf("You may need to edit %v/nvliblist.conf", buildcfg.SINGULARITY_CONFDIR)
} else {
ContainLibsPath = append(ContainLibsPath, libs...)
}
}
}
engineConfig.SetBindPath(BindPaths)
engineConfig.SetNetwork(Network)
engineConfig.SetDNS(DNS)
engineConfig.SetNetworkArgs(NetworkArgs)
engineConfig.SetOverlayImage(OverlayPath)
engineConfig.SetWritableImage(IsWritable)
engineConfig.SetNoHome(NoHome)
engineConfig.SetNv(Nvidia)
engineConfig.SetAddCaps(AddCaps)
engineConfig.SetDropCaps(DropCaps)
checkPrivileges(AllowSUID, "--allow-setuid", func() {
engineConfig.SetAllowSUID(AllowSUID)
})
checkPrivileges(KeepPrivs, "--keep-privs", func() {
engineConfig.SetKeepPrivs(KeepPrivs)
})
engineConfig.SetNoPrivs(NoPrivs)
engineConfig.SetSecurity(Security)
engineConfig.SetShell(ShellPath)
engineConfig.SetLibrariesPath(ContainLibsPath)
if ShellPath != "" {
generator.AddProcessEnv("SINGULARITY_SHELL", ShellPath)
}
checkPrivileges(CgroupsPath != "", "--apply-cgroups", func() {
engineConfig.SetCgroupsPath(CgroupsPath)
})
if IsWritable && IsWritableTmpfs {
sylog.Warningf("Disabling --writable-tmpfs flag, mutually exclusive with --writable")
engineConfig.SetWritableTmpfs(false)
} else {
engineConfig.SetWritableTmpfs(IsWritableTmpfs)
}
homeFlag := cobraCmd.Flag("home")
engineConfig.SetCustomHome(homeFlag.Changed)
// set home directory for the targeted UID if it exists on host system
if !homeFlag.Changed && targetUID != 0 {
if targetUID > 500 {
if pwd, err := user.GetPwUID(uint32(targetUID)); err == nil {
sylog.Debugf("Target UID requested, set home directory to %s", pwd.Dir)
HomePath = pwd.Dir
engineConfig.SetCustomHome(true)
} else {
sylog.Verbosef("Home directory for UID %d not found, home won't be mounted", targetUID)
engineConfig.SetNoHome(true)
HomePath = "/"
}
} else {
sylog.Verbosef("System UID %d requested | EnsureRootPriv | identifier_name | |
plugin.py | _outcomes
def logsdir(self):
ldir = self._logsdir
if ldir:
return ldir
logger_logsdir = self._config.getoption('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.getini('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.hook.pytest_logger_logsdir(config=self._config)
if logger_logsdir:
ldir = _make_logsdir_dir(logger_logsdir)
else:
ldir = _make_logsdir_tmpdir(self._config._tmpdirhandler)
self._logsdir = ldir
for link in self._logdirlinks:
_refresh_link(str(ldir), link)
return ldir
def pytest_runtest_setup(self, item):
loggers = _choose_loggers(self._loggers, _loggers_from_hooks(item))
formatter = self._formatter_class()
item._logger = state = LoggerState(item=item,
stdoutloggers=loggers.stdout,
fileloggers=loggers.file,
formatter=formatter)
state.on_setup()
def pytest_runtest_teardown(self, item, nextitem):
logger = getattr(item, '_logger', None)
if logger:
logger.on_teardown()
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
outcome = yield
tr = outcome.get_result()
logger = getattr(item, '_logger', None)
if logger:
if self._logsdir and self._split_by_outcome_subdir and tr.outcome in self._split_by_outcome_outcomes:
split_by_outcome_logdir = self._logsdir.join(self._split_by_outcome_subdir, tr.outcome)
nodeid = _sanitize_nodeid(item.nodeid)
nodepath = os.path.dirname(nodeid)
split_by_outcome_logdir.join(nodepath).ensure(dir=1)
destdir_relpath = os.path.relpath(str(self._logsdir.join(nodeid)),
str(split_by_outcome_logdir.join(nodepath)))
_refresh_link(destdir_relpath, str(split_by_outcome_logdir.join(nodeid)))
if call.when == 'teardown':
logger.on_makereport()
class LoggerState(object):
def __init__(self, item, stdoutloggers, fileloggers, formatter):
self._put_newlines = bool(item.config.option.capture == 'no' and stdoutloggers)
self.handlers = _make_handlers(stdoutloggers, fileloggers, item, formatter)
self.root_enabler = RootEnabler(bool(stdoutloggers and fileloggers))
def put_newline(self):
if self._put_newlines:
sys.stdout.write('\n')
def on_setup(self):
self.put_newline()
_enable(self.handlers)
self.root_enabler.enable()
def on_teardown(self):
self.put_newline()
def on_makereport(self):
self.root_enabler.disable()
_disable(self.handlers)
class RootEnabler(object):
def __init__(self, enabled):
self._enabled = enabled
self._root_level = logging.root.level
def enable(self):
if self._enabled:
self._root_level = logging.root.level
logging.root.setLevel(logging.NOTSET) # stops root logger from blocking logs
def disable(self):
if self._enabled:
logging.root.setLevel(self._root_level)
class Loggers(object):
def __init__(self, stdout, file_):
self.stdout = stdout
self.file = file_
def __bool__(self):
return bool(self.stdout) or bool(self.file)
class LoggerConfig(object):
"""Configuration of logging to stdout and filesystem."""
def __init__(self):
self._enabled = False
self._loggers = []
self._formatter_class = None
self._log_option_default = ''
self._split_by_outcome_subdir = None
self._split_by_outcome_outcomes = []
def add_loggers(self, loggers, stdout_level=logging.NOTSET, file_level=logging.NOTSET):
"""Adds loggers for stdout/filesystem handling.
Stdout: loggers will log to stdout only when mentioned in `loggers` option. If they're
mentioned without explicit level, `stdout_level` will be used.
Filesystem: loggers will log to files at `file_level`.
:arg loggers: List of logger names.
:arg stdout_level: Default level at which stdout handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
:arg file_level: Level at which filesystem handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
"""
self._enabled = True
self._loggers.append((loggers, _sanitize_level(stdout_level), _sanitize_level(file_level)))
def set_formatter_class(self, formatter_class):
"""Sets the `logging.Formatter` class to be used by all loggers.
:arg formatter_class: The `logging.Formatter` class
"""
if isinstance(formatter_class, logging.Formatter):
raise ValueError("Got a formatter instance instead of its class !")
if not issubclass(formatter_class, logging.Formatter):
raise ValueError("Formatter should be a class inheriting from logging.Formatter")
self._formatter_class = formatter_class
def set_log_option_default(self, value):
""" Sets default value of `log` option."""
self._log_option_default = value
def split_by_outcome(self, outcomes=None, subdir='by_outcome'):
"""Makes a directory inside main logdir where logs are further split by test outcome
:param outcomes: list of test outcomes to be handled (failed/passed/skipped)
:param subdir: name for the subdirectory in main log directory
"""
if outcomes is not None:
allowed_outcomes = ['passed', 'failed', 'skipped']
unexpected_outcomes = set(outcomes) - set(allowed_outcomes)
if unexpected_outcomes:
raise ValueError('got unexpected_outcomes: <' + str(list(unexpected_outcomes)) + '>')
self._split_by_outcome_outcomes = outcomes
else:
self._split_by_outcome_outcomes = ['failed']
self._split_by_outcome_subdir = subdir
class LoggerHookspec(object):
def pytest_logger_config(self, logger_config):
""" called before cmdline options parsing. Accepts terse configuration
of both stdout and file logging, adds cmdline options to manipulate
stdout logging. Cannot be used together with \\*loggers hooks.
:arg logger_config: instance of :py:class:`LoggerConfig`, allows
setting loggers for stdout and file handling and their levels.
"""
def pytest_logger_stdoutloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to terminal output. Cannot be used together with
logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_fileloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to files within logs temporary directory.
Cannot be used together with logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def | (self, config):
""" called after cmdline options parsing.
If implemented, symlink to logs directory will be created.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of requested link to logs directory.
"""
@pytest.hookspec(firstresult=True)
def pytest_logger_logsdir(self, config):
""" called after cmdline options parsing.
If implemented, place logs into the location returned. This is similar
to using --logger-logsdir or the logger_logsdir ini option, but will
only be used if those are not.
Additionally, if multiple implementations of this hook are found, only
the first non-None value will be used.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of logs directory.
"""
class DefaultFormatter(logging.Formatter):
short_level_names = {
logging.FATAL: 'ftl',
logging.ERROR: 'err',
logging.WARN: 'wrn',
logging.INFO: 'inf',
logging.DEBUG: 'dbg',
}
format_string = '%(asctime)s %(levelshortname)s %(name)s: %(message)s'
def __init__(self):
logging.Formatter.__init__(self, DefaultFormatter.format_string)
self._start = time.time()
def formatTime(self, record, datefmt=None):
ct = record.created - self._start
dt = datetime.datetime.utcfromtimestamp(ct)
return dt.strftime("%M:%S.%f")[:-3] # omit useconds, leave mseconds
def format(self, record):
record.levelshortname = DefaultFormatter.short_level_names.get(record.levelno,
'l%s' % record.levelno)
return logging.Formatter.format(self, record)
@pytest.fixture
def logdir(request):
return _make_logdir(request._pyfuncitem)
def _sanitize_nodeid(filename):
filename = filename.replace('::() | pytest_logger_logdirlink | identifier_name |
plugin.py | come_outcomes
def logsdir(self):
ldir = self._logsdir
if ldir:
return ldir
logger_logsdir = self._config.getoption('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.getini('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.hook.pytest_logger_logsdir(config=self._config)
if logger_logsdir:
ldir = _make_logsdir_dir(logger_logsdir)
else:
ldir = _make_logsdir_tmpdir(self._config._tmpdirhandler)
self._logsdir = ldir
for link in self._logdirlinks:
_refresh_link(str(ldir), link)
return ldir
def pytest_runtest_setup(self, item):
loggers = _choose_loggers(self._loggers, _loggers_from_hooks(item))
formatter = self._formatter_class()
item._logger = state = LoggerState(item=item,
stdoutloggers=loggers.stdout,
fileloggers=loggers.file,
formatter=formatter)
state.on_setup()
def pytest_runtest_teardown(self, item, nextitem):
logger = getattr(item, '_logger', None)
if logger:
logger.on_teardown()
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
outcome = yield
tr = outcome.get_result()
logger = getattr(item, '_logger', None)
if logger:
if self._logsdir and self._split_by_outcome_subdir and tr.outcome in self._split_by_outcome_outcomes:
split_by_outcome_logdir = self._logsdir.join(self._split_by_outcome_subdir, tr.outcome)
nodeid = _sanitize_nodeid(item.nodeid)
nodepath = os.path.dirname(nodeid)
split_by_outcome_logdir.join(nodepath).ensure(dir=1)
destdir_relpath = os.path.relpath(str(self._logsdir.join(nodeid)),
str(split_by_outcome_logdir.join(nodepath)))
_refresh_link(destdir_relpath, str(split_by_outcome_logdir.join(nodeid)))
if call.when == 'teardown':
logger.on_makereport()
class LoggerState(object):
def __init__(self, item, stdoutloggers, fileloggers, formatter):
self._put_newlines = bool(item.config.option.capture == 'no' and stdoutloggers)
self.handlers = _make_handlers(stdoutloggers, fileloggers, item, formatter)
self.root_enabler = RootEnabler(bool(stdoutloggers and fileloggers))
def put_newline(self):
if self._put_newlines:
sys.stdout.write('\n')
def on_setup(self):
self.put_newline()
_enable(self.handlers)
self.root_enabler.enable()
def on_teardown(self):
self.put_newline()
def on_makereport(self):
self.root_enabler.disable()
_disable(self.handlers)
class RootEnabler(object):
def __init__(self, enabled):
self._enabled = enabled
self._root_level = logging.root.level
def enable(self):
if self._enabled:
self._root_level = logging.root.level
logging.root.setLevel(logging.NOTSET) # stops root logger from blocking logs
def disable(self):
if self._enabled:
logging.root.setLevel(self._root_level)
class Loggers(object):
def __init__(self, stdout, file_):
self.stdout = stdout
self.file = file_
def __bool__(self):
return bool(self.stdout) or bool(self.file)
class LoggerConfig(object):
"""Configuration of logging to stdout and filesystem."""
def __init__(self):
self._enabled = False
self._loggers = []
self._formatter_class = None
self._log_option_default = ''
self._split_by_outcome_subdir = None
self._split_by_outcome_outcomes = []
def add_loggers(self, loggers, stdout_level=logging.NOTSET, file_level=logging.NOTSET):
"""Adds loggers for stdout/filesystem handling.
Stdout: loggers will log to stdout only when mentioned in `loggers` option. If they're
mentioned without explicit level, `stdout_level` will be used.
Filesystem: loggers will log to files at `file_level`.
:arg loggers: List of logger names.
:arg stdout_level: Default level at which stdout handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
:arg file_level: Level at which filesystem handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
"""
self._enabled = True
self._loggers.append((loggers, _sanitize_level(stdout_level), _sanitize_level(file_level)))
def set_formatter_class(self, formatter_class):
"""Sets the `logging.Formatter` class to be used by all loggers.
:arg formatter_class: The `logging.Formatter` class
"""
if isinstance(formatter_class, logging.Formatter):
raise ValueError("Got a formatter instance instead of its class !")
if not issubclass(formatter_class, logging.Formatter):
raise ValueError("Formatter should be a class inheriting from logging.Formatter") | """ Sets default value of `log` option."""
self._log_option_default = value
def split_by_outcome(self, outcomes=None, subdir='by_outcome'):
"""Makes a directory inside main logdir where logs are further split by test outcome
:param outcomes: list of test outcomes to be handled (failed/passed/skipped)
:param subdir: name for the subdirectory in main log directory
"""
if outcomes is not None:
allowed_outcomes = ['passed', 'failed', 'skipped']
unexpected_outcomes = set(outcomes) - set(allowed_outcomes)
if unexpected_outcomes:
raise ValueError('got unexpected_outcomes: <' + str(list(unexpected_outcomes)) + '>')
self._split_by_outcome_outcomes = outcomes
else:
self._split_by_outcome_outcomes = ['failed']
self._split_by_outcome_subdir = subdir
class LoggerHookspec(object):
def pytest_logger_config(self, logger_config):
""" called before cmdline options parsing. Accepts terse configuration
of both stdout and file logging, adds cmdline options to manipulate
stdout logging. Cannot be used together with \\*loggers hooks.
:arg logger_config: instance of :py:class:`LoggerConfig`, allows
setting loggers for stdout and file handling and their levels.
"""
def pytest_logger_stdoutloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to terminal output. Cannot be used together with
logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_fileloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to files within logs temporary directory.
Cannot be used together with logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_logdirlink(self, config):
""" called after cmdline options parsing.
If implemented, symlink to logs directory will be created.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of requested link to logs directory.
"""
@pytest.hookspec(firstresult=True)
def pytest_logger_logsdir(self, config):
""" called after cmdline options parsing.
If implemented, place logs into the location returned. This is similar
to using --logger-logsdir or the logger_logsdir ini option, but will
only be used if those are not.
Additionally, if multiple implementations of this hook are found, only
the first non-None value will be used.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of logs directory.
"""
class DefaultFormatter(logging.Formatter):
short_level_names = {
logging.FATAL: 'ftl',
logging.ERROR: 'err',
logging.WARN: 'wrn',
logging.INFO: 'inf',
logging.DEBUG: 'dbg',
}
format_string = '%(asctime)s %(levelshortname)s %(name)s: %(message)s'
def __init__(self):
logging.Formatter.__init__(self, DefaultFormatter.format_string)
self._start = time.time()
def formatTime(self, record, datefmt=None):
ct = record.created - self._start
dt = datetime.datetime.utcfromtimestamp(ct)
return dt.strftime("%M:%S.%f")[:-3] # omit useconds, leave mseconds
def format(self, record):
record.levelshortname = DefaultFormatter.short_level_names.get(record.levelno,
'l%s' % record.levelno)
return logging.Formatter.format(self, record)
@pytest.fixture
def logdir(request):
return _make_logdir(request._pyfuncitem)
def _sanitize_nodeid(filename):
filename = filename.replace('::() | self._formatter_class = formatter_class
def set_log_option_default(self, value): | random_line_split |
plugin.py | :
ldir = _make_logsdir_dir(logger_logsdir)
else:
ldir = _make_logsdir_tmpdir(self._config._tmpdirhandler)
self._logsdir = ldir
for link in self._logdirlinks:
_refresh_link(str(ldir), link)
return ldir
def pytest_runtest_setup(self, item):
loggers = _choose_loggers(self._loggers, _loggers_from_hooks(item))
formatter = self._formatter_class()
item._logger = state = LoggerState(item=item,
stdoutloggers=loggers.stdout,
fileloggers=loggers.file,
formatter=formatter)
state.on_setup()
def pytest_runtest_teardown(self, item, nextitem):
logger = getattr(item, '_logger', None)
if logger:
logger.on_teardown()
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
outcome = yield
tr = outcome.get_result()
logger = getattr(item, '_logger', None)
if logger:
if self._logsdir and self._split_by_outcome_subdir and tr.outcome in self._split_by_outcome_outcomes:
split_by_outcome_logdir = self._logsdir.join(self._split_by_outcome_subdir, tr.outcome)
nodeid = _sanitize_nodeid(item.nodeid)
nodepath = os.path.dirname(nodeid)
split_by_outcome_logdir.join(nodepath).ensure(dir=1)
destdir_relpath = os.path.relpath(str(self._logsdir.join(nodeid)),
str(split_by_outcome_logdir.join(nodepath)))
_refresh_link(destdir_relpath, str(split_by_outcome_logdir.join(nodeid)))
if call.when == 'teardown':
logger.on_makereport()
class LoggerState(object):
def __init__(self, item, stdoutloggers, fileloggers, formatter):
self._put_newlines = bool(item.config.option.capture == 'no' and stdoutloggers)
self.handlers = _make_handlers(stdoutloggers, fileloggers, item, formatter)
self.root_enabler = RootEnabler(bool(stdoutloggers and fileloggers))
def put_newline(self):
if self._put_newlines:
sys.stdout.write('\n')
def on_setup(self):
self.put_newline()
_enable(self.handlers)
self.root_enabler.enable()
def on_teardown(self):
self.put_newline()
def on_makereport(self):
self.root_enabler.disable()
_disable(self.handlers)
class RootEnabler(object):
def __init__(self, enabled):
self._enabled = enabled
self._root_level = logging.root.level
def enable(self):
if self._enabled:
self._root_level = logging.root.level
logging.root.setLevel(logging.NOTSET) # stops root logger from blocking logs
def disable(self):
if self._enabled:
logging.root.setLevel(self._root_level)
class Loggers(object):
def __init__(self, stdout, file_):
self.stdout = stdout
self.file = file_
def __bool__(self):
return bool(self.stdout) or bool(self.file)
class LoggerConfig(object):
"""Configuration of logging to stdout and filesystem."""
def __init__(self):
self._enabled = False
self._loggers = []
self._formatter_class = None
self._log_option_default = ''
self._split_by_outcome_subdir = None
self._split_by_outcome_outcomes = []
def add_loggers(self, loggers, stdout_level=logging.NOTSET, file_level=logging.NOTSET):
"""Adds loggers for stdout/filesystem handling.
Stdout: loggers will log to stdout only when mentioned in `loggers` option. If they're
mentioned without explicit level, `stdout_level` will be used.
Filesystem: loggers will log to files at `file_level`.
:arg loggers: List of logger names.
:arg stdout_level: Default level at which stdout handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
:arg file_level: Level at which filesystem handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
"""
self._enabled = True
self._loggers.append((loggers, _sanitize_level(stdout_level), _sanitize_level(file_level)))
def set_formatter_class(self, formatter_class):
"""Sets the `logging.Formatter` class to be used by all loggers.
:arg formatter_class: The `logging.Formatter` class
"""
if isinstance(formatter_class, logging.Formatter):
raise ValueError("Got a formatter instance instead of its class !")
if not issubclass(formatter_class, logging.Formatter):
raise ValueError("Formatter should be a class inheriting from logging.Formatter")
self._formatter_class = formatter_class
def set_log_option_default(self, value):
""" Sets default value of `log` option."""
self._log_option_default = value
def split_by_outcome(self, outcomes=None, subdir='by_outcome'):
"""Makes a directory inside main logdir where logs are further split by test outcome
:param outcomes: list of test outcomes to be handled (failed/passed/skipped)
:param subdir: name for the subdirectory in main log directory
"""
if outcomes is not None:
allowed_outcomes = ['passed', 'failed', 'skipped']
unexpected_outcomes = set(outcomes) - set(allowed_outcomes)
if unexpected_outcomes:
raise ValueError('got unexpected_outcomes: <' + str(list(unexpected_outcomes)) + '>')
self._split_by_outcome_outcomes = outcomes
else:
self._split_by_outcome_outcomes = ['failed']
self._split_by_outcome_subdir = subdir
class LoggerHookspec(object):
def pytest_logger_config(self, logger_config):
""" called before cmdline options parsing. Accepts terse configuration
of both stdout and file logging, adds cmdline options to manipulate
stdout logging. Cannot be used together with \\*loggers hooks.
:arg logger_config: instance of :py:class:`LoggerConfig`, allows
setting loggers for stdout and file handling and their levels.
"""
def pytest_logger_stdoutloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to terminal output. Cannot be used together with
logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_fileloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to files within logs temporary directory.
Cannot be used together with logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_logdirlink(self, config):
""" called after cmdline options parsing.
If implemented, symlink to logs directory will be created.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of requested link to logs directory.
"""
@pytest.hookspec(firstresult=True)
def pytest_logger_logsdir(self, config):
""" called after cmdline options parsing.
If implemented, place logs into the location returned. This is similar
to using --logger-logsdir or the logger_logsdir ini option, but will
only be used if those are not.
Additionally, if multiple implementations of this hook are found, only
the first non-None value will be used.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of logs directory.
"""
class DefaultFormatter(logging.Formatter):
short_level_names = {
logging.FATAL: 'ftl',
logging.ERROR: 'err',
logging.WARN: 'wrn',
logging.INFO: 'inf',
logging.DEBUG: 'dbg',
}
format_string = '%(asctime)s %(levelshortname)s %(name)s: %(message)s'
def __init__(self):
logging.Formatter.__init__(self, DefaultFormatter.format_string)
self._start = time.time()
def formatTime(self, record, datefmt=None):
ct = record.created - self._start
dt = datetime.datetime.utcfromtimestamp(ct)
return dt.strftime("%M:%S.%f")[:-3] # omit useconds, leave mseconds
def format(self, record):
record.levelshortname = DefaultFormatter.short_level_names.get(record.levelno,
'l%s' % record.levelno)
return logging.Formatter.format(self, record)
@pytest.fixture
def logdir(request):
return _make_logdir(request._pyfuncitem)
def _sanitize_nodeid(filename):
filename = filename.replace('::()::', '/')
filename = filename.replace('::', '/')
filename = re.sub(r'\[(.+)\]', r'-\1', filename)
return filename
def _sanitize_level(level, raises=True):
if isinstance(level, basestring):
try:
return int(level)
except ValueError:
int_level = getattr(logging, level.upper(), None)
if int_level is not None:
return int_level
elif isinstance(level, int):
| return level | conditional_block | |
plugin.py | _outcomes
def logsdir(self):
ldir = self._logsdir
if ldir:
return ldir
logger_logsdir = self._config.getoption('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.getini('logger_logsdir')
if not logger_logsdir:
logger_logsdir = self._config.hook.pytest_logger_logsdir(config=self._config)
if logger_logsdir:
ldir = _make_logsdir_dir(logger_logsdir)
else:
ldir = _make_logsdir_tmpdir(self._config._tmpdirhandler)
self._logsdir = ldir
for link in self._logdirlinks:
_refresh_link(str(ldir), link)
return ldir
def pytest_runtest_setup(self, item):
loggers = _choose_loggers(self._loggers, _loggers_from_hooks(item))
formatter = self._formatter_class()
item._logger = state = LoggerState(item=item,
stdoutloggers=loggers.stdout,
fileloggers=loggers.file,
formatter=formatter)
state.on_setup()
def pytest_runtest_teardown(self, item, nextitem):
logger = getattr(item, '_logger', None)
if logger:
logger.on_teardown()
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
outcome = yield
tr = outcome.get_result()
logger = getattr(item, '_logger', None)
if logger:
if self._logsdir and self._split_by_outcome_subdir and tr.outcome in self._split_by_outcome_outcomes:
split_by_outcome_logdir = self._logsdir.join(self._split_by_outcome_subdir, tr.outcome)
nodeid = _sanitize_nodeid(item.nodeid)
nodepath = os.path.dirname(nodeid)
split_by_outcome_logdir.join(nodepath).ensure(dir=1)
destdir_relpath = os.path.relpath(str(self._logsdir.join(nodeid)),
str(split_by_outcome_logdir.join(nodepath)))
_refresh_link(destdir_relpath, str(split_by_outcome_logdir.join(nodeid)))
if call.when == 'teardown':
logger.on_makereport()
class LoggerState(object):
def __init__(self, item, stdoutloggers, fileloggers, formatter):
self._put_newlines = bool(item.config.option.capture == 'no' and stdoutloggers)
self.handlers = _make_handlers(stdoutloggers, fileloggers, item, formatter)
self.root_enabler = RootEnabler(bool(stdoutloggers and fileloggers))
def put_newline(self):
if self._put_newlines:
sys.stdout.write('\n')
def on_setup(self):
self.put_newline()
_enable(self.handlers)
self.root_enabler.enable()
def on_teardown(self):
self.put_newline()
def on_makereport(self):
self.root_enabler.disable()
_disable(self.handlers)
class RootEnabler(object):
|
class Loggers(object):
def __init__(self, stdout, file_):
self.stdout = stdout
self.file = file_
def __bool__(self):
return bool(self.stdout) or bool(self.file)
class LoggerConfig(object):
"""Configuration of logging to stdout and filesystem."""
def __init__(self):
self._enabled = False
self._loggers = []
self._formatter_class = None
self._log_option_default = ''
self._split_by_outcome_subdir = None
self._split_by_outcome_outcomes = []
def add_loggers(self, loggers, stdout_level=logging.NOTSET, file_level=logging.NOTSET):
"""Adds loggers for stdout/filesystem handling.
Stdout: loggers will log to stdout only when mentioned in `loggers` option. If they're
mentioned without explicit level, `stdout_level` will be used.
Filesystem: loggers will log to files at `file_level`.
:arg loggers: List of logger names.
:arg stdout_level: Default level at which stdout handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
:arg file_level: Level at which filesystem handlers will pass logs.
By default: `logging.NOTSET`, which means: pass everything.
"""
self._enabled = True
self._loggers.append((loggers, _sanitize_level(stdout_level), _sanitize_level(file_level)))
def set_formatter_class(self, formatter_class):
"""Sets the `logging.Formatter` class to be used by all loggers.
:arg formatter_class: The `logging.Formatter` class
"""
if isinstance(formatter_class, logging.Formatter):
raise ValueError("Got a formatter instance instead of its class !")
if not issubclass(formatter_class, logging.Formatter):
raise ValueError("Formatter should be a class inheriting from logging.Formatter")
self._formatter_class = formatter_class
def set_log_option_default(self, value):
""" Sets default value of `log` option."""
self._log_option_default = value
def split_by_outcome(self, outcomes=None, subdir='by_outcome'):
"""Makes a directory inside main logdir where logs are further split by test outcome
:param outcomes: list of test outcomes to be handled (failed/passed/skipped)
:param subdir: name for the subdirectory in main log directory
"""
if outcomes is not None:
allowed_outcomes = ['passed', 'failed', 'skipped']
unexpected_outcomes = set(outcomes) - set(allowed_outcomes)
if unexpected_outcomes:
raise ValueError('got unexpected_outcomes: <' + str(list(unexpected_outcomes)) + '>')
self._split_by_outcome_outcomes = outcomes
else:
self._split_by_outcome_outcomes = ['failed']
self._split_by_outcome_subdir = subdir
class LoggerHookspec(object):
def pytest_logger_config(self, logger_config):
""" called before cmdline options parsing. Accepts terse configuration
of both stdout and file logging, adds cmdline options to manipulate
stdout logging. Cannot be used together with \\*loggers hooks.
:arg logger_config: instance of :py:class:`LoggerConfig`, allows
setting loggers for stdout and file handling and their levels.
"""
def pytest_logger_stdoutloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to terminal output. Cannot be used together with
logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_fileloggers(self, item):
""" called before testcase setup. If implemented, given loggers
will emit their output to files within logs temporary directory.
Cannot be used together with logger_config hook.
:arg item: test item for which handlers are to be setup.
:return list: List should contain logger name strings
or tuples with logger name string and logging level.
"""
def pytest_logger_logdirlink(self, config):
""" called after cmdline options parsing.
If implemented, symlink to logs directory will be created.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of requested link to logs directory.
"""
@pytest.hookspec(firstresult=True)
def pytest_logger_logsdir(self, config):
""" called after cmdline options parsing.
If implemented, place logs into the location returned. This is similar
to using --logger-logsdir or the logger_logsdir ini option, but will
only be used if those are not.
Additionally, if multiple implementations of this hook are found, only
the first non-None value will be used.
:arg config: pytest config object, holds e.g. options
:return string: Absolute path of logs directory.
"""
class DefaultFormatter(logging.Formatter):
short_level_names = {
logging.FATAL: 'ftl',
logging.ERROR: 'err',
logging.WARN: 'wrn',
logging.INFO: 'inf',
logging.DEBUG: 'dbg',
}
format_string = '%(asctime)s %(levelshortname)s %(name)s: %(message)s'
def __init__(self):
logging.Formatter.__init__(self, DefaultFormatter.format_string)
self._start = time.time()
def formatTime(self, record, datefmt=None):
ct = record.created - self._start
dt = datetime.datetime.utcfromtimestamp(ct)
return dt.strftime("%M:%S.%f")[:-3] # omit useconds, leave mseconds
def format(self, record):
record.levelshortname = DefaultFormatter.short_level_names.get(record.levelno,
'l%s' % record.levelno)
return logging.Formatter.format(self, record)
@pytest.fixture
def logdir(request):
return _make_logdir(request._pyfuncitem)
def _sanitize_nodeid(filename):
filename = filename.replace('::() | def __init__(self, enabled):
self._enabled = enabled
self._root_level = logging.root.level
def enable(self):
if self._enabled:
self._root_level = logging.root.level
logging.root.setLevel(logging.NOTSET) # stops root logger from blocking logs
def disable(self):
if self._enabled:
logging.root.setLevel(self._root_level) | identifier_body |
main.go | = kingpin.Flag("out", "write the ansible inventory to this file - if unset, print to stdout").String()
timeout = kingpin.Flag("timeout", "timeout for total runtime of the command, defaults to 2m").Default("2m").Duration()
)
var doRegions = []string{"ams1", "ams2", "ams3", "blr1", "fra1", "lon1", "nyc1", "nyc2", "nyc3", "sfo1", "sfo2", "sfo3", "sgp1", "tor1"}
func main() {
kingpin.Parse()
log.SetHandler(cli.Default)
if *doToken == "" {
log.Info("no access token provided, attempting to look up doctl's access token")
token, context, err := doctlToken()
if err != nil {
log.WithError(err).Fatalf("couldn't look up token")
}
*doToken = token
log.WithField("context", context).Info("using doctl access token")
}
ctx, cancel := context.WithTimeout(context.Background(), *timeout)
defer cancel()
client := godo.NewFromToken(*doToken)
// get droplets
if *tag != "" {
log.WithField("tag", *tag).Info("only selecting tagged Droplets")
}
log.Info("listing Droplets")
droplets, err := listDroplets(ctx, client, *tag)
if err != nil {
log.WithError(err).Fatal("couldn't fetch Droplets")
}
// filter out ignored droplets
droplets = removeIgnored(droplets, *ignore)
// initialize some maps
var dropletsByRegion map[string][]string
if *groupByRegion {
dropletsByRegion = make(map[string][]string, len(doRegions))
for _, r := range doRegions {
dropletsByRegion[r] = []string{}
}
}
var dropletsByTag map[string][]string
if *groupByTag {
dropletsByTag = make(map[string][]string, 0)
}
var inventory bytes.Buffer
dropletsByID := make(map[int]string, len(droplets))
for _, d := range droplets {
ll := log.WithField("droplet", d.Name)
ll.Info("processing")
dropletsByID[d.ID] = d.Name
if *groupByRegion {
r := d.Region.Slug
dropletsByRegion[r] = append(dropletsByRegion[r], d.Name)
}
if *groupByTag {
for _, tag := range d.Tags {
dropletsByTag[tag] = append(dropletsByTag[tag], d.Name)
}
}
var (
ip string
err error
)
if *privateIPs {
ip, err = d.PrivateIPv4()
} else {
ip, err = d.PublicIPv4()
}
if err != nil {
ll.WithError(err).Error("couldn't look up the Droplet's IP address, skipped")
continue
}
inventory.WriteString(d.Name)
inventory.WriteRune('\t')
if *sshUser != "" {
inventory.WriteString(fmt.Sprintf("ansible_user=%s ", *sshUser))
}
if *sshPort != 0 {
inventory.WriteString(fmt.Sprintf("ansible_port=%d ", *sshPort))
}
if ip != "" {
inventory.WriteString(fmt.Sprintf("ansible_host=%s", ip))
} else {
ll.Warn("could not get the Droplet's IP address, using hostname")
}
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
// write the region groups
if *groupByRegion {
// loop over the doRegions slice to maintain alphabetic order
for _, region := range doRegions {
log.WithField("region", region).Info("building region group")
droplets := dropletsByRegion[region]
inventory.WriteString(fmt.Sprintf("[%s]", region))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the tag groups
if *groupByTag {
for tag, droplets := range dropletsByTag {
tag = sanitizeAnsibleGroup(tag)
log.WithField("tag", tag).Info("building tag group")
inventory.WriteString(fmt.Sprintf("[%s]", tag))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the project groups
if *groupByProject {
log.Info("listing projects")
projects, _, err := client.Projects.List(ctx, nil)
if err != nil {
log.WithError(err).Fatal("couldn't list projects")
}
dropletsByProject := make(map[string][]string)
for _, project := range projects {
ll := log.WithField("project", project.Name)
ll.Info("listing project resources")
resources, err := listProjectResources(ctx, client, project.ID)
if err != nil {
ll.WithError(err).Fatal("")
}
for _, r := range resources {
if !strings.HasPrefix(r.URN, "do:droplet:") {
continue
}
id := strings.TrimPrefix(r.URN, "do:droplet:")
idInt, err := strconv.Atoi(id)
if err != nil {
ll.WithError(err).WithField("urn", r.URN).Error("parsing droplet ID, skipping")
continue
}
// skip droplets that aren't included in the inventory
droplet, exists := dropletsByID[idInt]
if !exists {
continue
}
dropletsByProject[project.Name] = append(dropletsByProject[project.Name], droplet)
}
}
for project, droplets := range dropletsByProject {
project = sanitizeAnsibleGroup(project)
log.WithField("project", project).Info("building project group")
inventory.WriteString(fmt.Sprintf("[%s]", project))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
if *out != "" {
ll := log.WithField("out", *out)
ll.Info("writing inventory to file")
f, err := os.Create(*out)
if err != nil {
ll.WithError(err).Fatal("couldn't open file for writing")
}
defer f.Close()
_, err = inventory.WriteTo(f)
if err != nil {
ll.WithError(err).Fatal("couldn't write inventory to file")
}
} else {
inventory.WriteTo(os.Stdout)
}
log.Info("done!")
}
func doctlToken() (string, string, error) {
type doctlConfig struct {
Context string `yaml:"context"`
AccessToken string `yaml:"access-token"`
AuthContexts map[string]string `yaml:"auth-contexts"`
}
cfgDir, err := os.UserConfigDir()
if err != nil {
return "", "", fmt.Errorf("couldn't look up user config dir: %w", err)
}
cfgFile, err := ioutil.ReadFile(filepath.Join(cfgDir, "doctl", "config.yaml"))
if err != nil {
return "", "", fmt.Errorf("couldn't read doctl's config.yaml: %w", err)
}
cfg := doctlConfig{}
err = yaml.Unmarshal(cfgFile, &cfg)
if err != nil {
return "", "", fmt.Errorf("couldn't unmarshal doctl's config.yaml: %w", err)
}
switch cfg.Context {
case "default":
return cfg.AccessToken, cfg.Context, nil
default:
return cfg.AuthContexts[cfg.Context], cfg.Context, nil
}
}
func sanitizeAnsibleGroup(s string) string {
// replace invalid characters
s = strings.NewReplacer(
" ", "_",
"-", "_",
":", "_",
).Replace(s)
// group names cannot start with a digit
if '0' <= s[0] && s[0] <= '9' {
s = "_" + s
}
return s
}
func removeIgnored(droplets []godo.Droplet, ignored []string) []godo.Droplet {
if len(ignored) == 0 {
return droplets
}
// copy ignored droplets into a map
ignoreList := make(map[string]interface{}, len(ignored))
for _, i := range ignored {
ignoreList[i] = struct{}{}
}
// remove ignored droplets from the list
newDroplets := droplets[:0]
for _, d := range droplets {
if _, ignored := ignoreList[d.Name]; ignored {
log.WithField("droplet", d.Name).Info("ignoring")
continue
}
newDroplets = append(newDroplets, d)
}
return newDroplets
}
// get droplets w/ pagination
func | listDroplets | identifier_name | |
main.go | -port", "default ssh port").Int()
tag = kingpin.Flag("tag", "filter droplets by tag").String()
ignore = kingpin.Flag("ignore", "ignore a Droplet by name, can be specified multiple times").Strings()
groupByRegion = kingpin.Flag("group-by-region", "group hosts by region, defaults to true").Default("true").Bool()
groupByTag = kingpin.Flag("group-by-tag", "group hosts by their Droplet tags, defaults to true").Default("true").Bool()
groupByProject = kingpin.Flag("group-by-project", "group hosts by their Projects, defaults to true").Default("true").Bool()
privateIPs = kingpin.Flag("private-ips", "use private Droplet IPs instead of public IPs").Bool()
out = kingpin.Flag("out", "write the ansible inventory to this file - if unset, print to stdout").String()
timeout = kingpin.Flag("timeout", "timeout for total runtime of the command, defaults to 2m").Default("2m").Duration()
)
var doRegions = []string{"ams1", "ams2", "ams3", "blr1", "fra1", "lon1", "nyc1", "nyc2", "nyc3", "sfo1", "sfo2", "sfo3", "sgp1", "tor1"}
func main() {
kingpin.Parse()
log.SetHandler(cli.Default)
if *doToken == "" {
log.Info("no access token provided, attempting to look up doctl's access token")
token, context, err := doctlToken()
if err != nil {
log.WithError(err).Fatalf("couldn't look up token")
}
*doToken = token
log.WithField("context", context).Info("using doctl access token")
}
ctx, cancel := context.WithTimeout(context.Background(), *timeout)
defer cancel()
client := godo.NewFromToken(*doToken)
// get droplets
if *tag != "" {
log.WithField("tag", *tag).Info("only selecting tagged Droplets")
}
log.Info("listing Droplets")
droplets, err := listDroplets(ctx, client, *tag)
if err != nil {
log.WithError(err).Fatal("couldn't fetch Droplets")
}
// filter out ignored droplets
droplets = removeIgnored(droplets, *ignore)
// initialize some maps
var dropletsByRegion map[string][]string
if *groupByRegion {
dropletsByRegion = make(map[string][]string, len(doRegions))
for _, r := range doRegions {
dropletsByRegion[r] = []string{}
}
}
var dropletsByTag map[string][]string
if *groupByTag {
dropletsByTag = make(map[string][]string, 0)
}
var inventory bytes.Buffer
dropletsByID := make(map[int]string, len(droplets))
for _, d := range droplets {
ll := log.WithField("droplet", d.Name)
ll.Info("processing")
dropletsByID[d.ID] = d.Name
if *groupByRegion {
r := d.Region.Slug
dropletsByRegion[r] = append(dropletsByRegion[r], d.Name)
}
if *groupByTag {
for _, tag := range d.Tags {
dropletsByTag[tag] = append(dropletsByTag[tag], d.Name)
}
}
var (
ip string
err error
)
if *privateIPs {
ip, err = d.PrivateIPv4()
} else {
ip, err = d.PublicIPv4()
}
if err != nil {
ll.WithError(err).Error("couldn't look up the Droplet's IP address, skipped")
continue
}
inventory.WriteString(d.Name)
inventory.WriteRune('\t')
if *sshUser != "" {
inventory.WriteString(fmt.Sprintf("ansible_user=%s ", *sshUser))
}
if *sshPort != 0 {
inventory.WriteString(fmt.Sprintf("ansible_port=%d ", *sshPort))
}
if ip != "" {
inventory.WriteString(fmt.Sprintf("ansible_host=%s", ip))
} else {
ll.Warn("could not get the Droplet's IP address, using hostname")
}
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
// write the region groups
if *groupByRegion {
// loop over the doRegions slice to maintain alphabetic order
for _, region := range doRegions {
log.WithField("region", region).Info("building region group")
droplets := dropletsByRegion[region]
inventory.WriteString(fmt.Sprintf("[%s]", region))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the tag groups
if *groupByTag {
for tag, droplets := range dropletsByTag {
tag = sanitizeAnsibleGroup(tag)
log.WithField("tag", tag).Info("building tag group")
inventory.WriteString(fmt.Sprintf("[%s]", tag))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the project groups
if *groupByProject {
log.Info("listing projects")
projects, _, err := client.Projects.List(ctx, nil)
if err != nil {
log.WithError(err).Fatal("couldn't list projects")
}
dropletsByProject := make(map[string][]string)
for _, project := range projects {
ll := log.WithField("project", project.Name)
ll.Info("listing project resources")
resources, err := listProjectResources(ctx, client, project.ID)
if err != nil {
ll.WithError(err).Fatal("")
}
for _, r := range resources {
if !strings.HasPrefix(r.URN, "do:droplet:") {
continue
}
id := strings.TrimPrefix(r.URN, "do:droplet:")
idInt, err := strconv.Atoi(id)
if err != nil |
// skip droplets that aren't included in the inventory
droplet, exists := dropletsByID[idInt]
if !exists {
continue
}
dropletsByProject[project.Name] = append(dropletsByProject[project.Name], droplet)
}
}
for project, droplets := range dropletsByProject {
project = sanitizeAnsibleGroup(project)
log.WithField("project", project).Info("building project group")
inventory.WriteString(fmt.Sprintf("[%s]", project))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
if *out != "" {
ll := log.WithField("out", *out)
ll.Info("writing inventory to file")
f, err := os.Create(*out)
if err != nil {
ll.WithError(err).Fatal("couldn't open file for writing")
}
defer f.Close()
_, err = inventory.WriteTo(f)
if err != nil {
ll.WithError(err).Fatal("couldn't write inventory to file")
}
} else {
inventory.WriteTo(os.Stdout)
}
log.Info("done!")
}
func doctlToken() (string, string, error) {
type doctlConfig struct {
Context string `yaml:"context"`
AccessToken string `yaml:"access-token"`
AuthContexts map[string]string `yaml:"auth-contexts"`
}
cfgDir, err := os.UserConfigDir()
if err != nil {
return "", "", fmt.Errorf("couldn't look up user config dir: %w", err)
}
cfgFile, err := ioutil.ReadFile(filepath.Join(cfgDir, "doctl", "config.yaml"))
if err != nil {
return "", "", fmt.Errorf("couldn't read doctl's config.yaml: %w", err)
}
cfg := doctlConfig{}
err = yaml.Unmarshal(cfgFile, &cfg)
if err != nil {
return "", "", fmt.Errorf("couldn't unmarshal doctl's config.yaml: %w", err)
}
switch cfg.Context {
case "default":
return cfg.AccessToken, cfg.Context, nil
default:
return cfg.AuthContexts[cfg.Context], cfg.Context, nil
}
}
func sanitizeAnsibleGroup(s string) string {
// replace invalid characters
s = strings.NewReplacer(
" ", "_",
"-", "_",
":", "_",
).Replace(s)
// group names cannot start with a digit
if '0' <= s[0] && s[0] <= '9' {
s = "_" + s
}
return s
}
func removeIgnored(droplets []godo | {
ll.WithError(err).WithField("urn", r.URN).Error("parsing droplet ID, skipping")
continue
} | conditional_block |
main.go | -port", "default ssh port").Int()
tag = kingpin.Flag("tag", "filter droplets by tag").String()
ignore = kingpin.Flag("ignore", "ignore a Droplet by name, can be specified multiple times").Strings()
groupByRegion = kingpin.Flag("group-by-region", "group hosts by region, defaults to true").Default("true").Bool()
groupByTag = kingpin.Flag("group-by-tag", "group hosts by their Droplet tags, defaults to true").Default("true").Bool()
groupByProject = kingpin.Flag("group-by-project", "group hosts by their Projects, defaults to true").Default("true").Bool()
privateIPs = kingpin.Flag("private-ips", "use private Droplet IPs instead of public IPs").Bool()
out = kingpin.Flag("out", "write the ansible inventory to this file - if unset, print to stdout").String()
timeout = kingpin.Flag("timeout", "timeout for total runtime of the command, defaults to 2m").Default("2m").Duration()
)
var doRegions = []string{"ams1", "ams2", "ams3", "blr1", "fra1", "lon1", "nyc1", "nyc2", "nyc3", "sfo1", "sfo2", "sfo3", "sgp1", "tor1"}
func main() {
kingpin.Parse()
log.SetHandler(cli.Default)
if *doToken == "" {
log.Info("no access token provided, attempting to look up doctl's access token")
token, context, err := doctlToken()
if err != nil {
log.WithError(err).Fatalf("couldn't look up token")
}
*doToken = token
log.WithField("context", context).Info("using doctl access token")
}
ctx, cancel := context.WithTimeout(context.Background(), *timeout)
defer cancel()
client := godo.NewFromToken(*doToken)
// get droplets
if *tag != "" {
log.WithField("tag", *tag).Info("only selecting tagged Droplets")
}
log.Info("listing Droplets")
droplets, err := listDroplets(ctx, client, *tag)
if err != nil {
log.WithError(err).Fatal("couldn't fetch Droplets")
}
// filter out ignored droplets
droplets = removeIgnored(droplets, *ignore)
// initialize some maps
var dropletsByRegion map[string][]string
if *groupByRegion {
dropletsByRegion = make(map[string][]string, len(doRegions))
for _, r := range doRegions {
dropletsByRegion[r] = []string{}
}
}
var dropletsByTag map[string][]string
if *groupByTag {
dropletsByTag = make(map[string][]string, 0)
}
var inventory bytes.Buffer
dropletsByID := make(map[int]string, len(droplets))
for _, d := range droplets {
ll := log.WithField("droplet", d.Name)
ll.Info("processing")
dropletsByID[d.ID] = d.Name
if *groupByRegion {
r := d.Region.Slug
dropletsByRegion[r] = append(dropletsByRegion[r], d.Name)
}
if *groupByTag {
for _, tag := range d.Tags {
dropletsByTag[tag] = append(dropletsByTag[tag], d.Name)
}
}
var (
ip string
err error
)
if *privateIPs {
ip, err = d.PrivateIPv4()
} else {
ip, err = d.PublicIPv4()
}
if err != nil {
ll.WithError(err).Error("couldn't look up the Droplet's IP address, skipped")
continue
}
inventory.WriteString(d.Name)
inventory.WriteRune('\t')
if *sshUser != "" {
inventory.WriteString(fmt.Sprintf("ansible_user=%s ", *sshUser))
}
if *sshPort != 0 {
inventory.WriteString(fmt.Sprintf("ansible_port=%d ", *sshPort))
}
if ip != "" {
inventory.WriteString(fmt.Sprintf("ansible_host=%s", ip))
} else {
ll.Warn("could not get the Droplet's IP address, using hostname")
}
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
// write the region groups
if *groupByRegion {
// loop over the doRegions slice to maintain alphabetic order
for _, region := range doRegions {
log.WithField("region", region).Info("building region group")
droplets := dropletsByRegion[region]
inventory.WriteString(fmt.Sprintf("[%s]", region))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the tag groups
if *groupByTag {
for tag, droplets := range dropletsByTag {
tag = sanitizeAnsibleGroup(tag)
log.WithField("tag", tag).Info("building tag group")
inventory.WriteString(fmt.Sprintf("[%s]", tag))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the project groups
if *groupByProject {
log.Info("listing projects")
projects, _, err := client.Projects.List(ctx, nil)
if err != nil {
log.WithError(err).Fatal("couldn't list projects")
}
dropletsByProject := make(map[string][]string)
for _, project := range projects {
ll := log.WithField("project", project.Name)
ll.Info("listing project resources")
resources, err := listProjectResources(ctx, client, project.ID)
if err != nil {
ll.WithError(err).Fatal("")
}
for _, r := range resources {
if !strings.HasPrefix(r.URN, "do:droplet:") {
continue
}
id := strings.TrimPrefix(r.URN, "do:droplet:")
idInt, err := strconv.Atoi(id)
if err != nil {
ll.WithError(err).WithField("urn", r.URN).Error("parsing droplet ID, skipping")
continue
}
// skip droplets that aren't included in the inventory
droplet, exists := dropletsByID[idInt]
if !exists {
continue
}
dropletsByProject[project.Name] = append(dropletsByProject[project.Name], droplet)
}
}
for project, droplets := range dropletsByProject {
project = sanitizeAnsibleGroup(project)
log.WithField("project", project).Info("building project group")
inventory.WriteString(fmt.Sprintf("[%s]", project))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
if *out != "" {
ll := log.WithField("out", *out)
ll.Info("writing inventory to file")
f, err := os.Create(*out)
if err != nil {
ll.WithError(err).Fatal("couldn't open file for writing")
}
defer f.Close()
_, err = inventory.WriteTo(f)
if err != nil {
ll.WithError(err).Fatal("couldn't write inventory to file")
}
} else {
inventory.WriteTo(os.Stdout)
}
log.Info("done!")
}
func doctlToken() (string, string, error) {
type doctlConfig struct {
Context string `yaml:"context"`
AccessToken string `yaml:"access-token"`
AuthContexts map[string]string `yaml:"auth-contexts"`
}
cfgDir, err := os.UserConfigDir()
if err != nil {
return "", "", fmt.Errorf("couldn't look up user config dir: %w", err)
}
cfgFile, err := ioutil.ReadFile(filepath.Join(cfgDir, "doctl", "config.yaml"))
if err != nil {
return "", "", fmt.Errorf("couldn't read doctl's config.yaml: %w", err)
}
cfg := doctlConfig{}
err = yaml.Unmarshal(cfgFile, &cfg)
if err != nil {
return "", "", fmt.Errorf("couldn't unmarshal doctl's config.yaml: %w", err)
}
switch cfg.Context {
case "default":
return cfg.AccessToken, cfg.Context, nil
default:
return cfg.AuthContexts[cfg.Context], cfg.Context, nil
}
}
func sanitizeAnsibleGroup(s string) string |
func removeIgnored(droplets []godo | {
// replace invalid characters
s = strings.NewReplacer(
" ", "_",
"-", "_",
":", "_",
).Replace(s)
// group names cannot start with a digit
if '0' <= s[0] && s[0] <= '9' {
s = "_" + s
}
return s
} | identifier_body |
main.go | -port", "default ssh port").Int()
tag = kingpin.Flag("tag", "filter droplets by tag").String()
ignore = kingpin.Flag("ignore", "ignore a Droplet by name, can be specified multiple times").Strings()
groupByRegion = kingpin.Flag("group-by-region", "group hosts by region, defaults to true").Default("true").Bool()
groupByTag = kingpin.Flag("group-by-tag", "group hosts by their Droplet tags, defaults to true").Default("true").Bool()
groupByProject = kingpin.Flag("group-by-project", "group hosts by their Projects, defaults to true").Default("true").Bool()
privateIPs = kingpin.Flag("private-ips", "use private Droplet IPs instead of public IPs").Bool()
out = kingpin.Flag("out", "write the ansible inventory to this file - if unset, print to stdout").String()
timeout = kingpin.Flag("timeout", "timeout for total runtime of the command, defaults to 2m").Default("2m").Duration()
)
var doRegions = []string{"ams1", "ams2", "ams3", "blr1", "fra1", "lon1", "nyc1", "nyc2", "nyc3", "sfo1", "sfo2", "sfo3", "sgp1", "tor1"}
func main() {
kingpin.Parse()
log.SetHandler(cli.Default)
if *doToken == "" {
log.Info("no access token provided, attempting to look up doctl's access token")
token, context, err := doctlToken()
if err != nil {
log.WithError(err).Fatalf("couldn't look up token")
}
*doToken = token
log.WithField("context", context).Info("using doctl access token")
}
ctx, cancel := context.WithTimeout(context.Background(), *timeout) | // get droplets
if *tag != "" {
log.WithField("tag", *tag).Info("only selecting tagged Droplets")
}
log.Info("listing Droplets")
droplets, err := listDroplets(ctx, client, *tag)
if err != nil {
log.WithError(err).Fatal("couldn't fetch Droplets")
}
// filter out ignored droplets
droplets = removeIgnored(droplets, *ignore)
// initialize some maps
var dropletsByRegion map[string][]string
if *groupByRegion {
dropletsByRegion = make(map[string][]string, len(doRegions))
for _, r := range doRegions {
dropletsByRegion[r] = []string{}
}
}
var dropletsByTag map[string][]string
if *groupByTag {
dropletsByTag = make(map[string][]string, 0)
}
var inventory bytes.Buffer
dropletsByID := make(map[int]string, len(droplets))
for _, d := range droplets {
ll := log.WithField("droplet", d.Name)
ll.Info("processing")
dropletsByID[d.ID] = d.Name
if *groupByRegion {
r := d.Region.Slug
dropletsByRegion[r] = append(dropletsByRegion[r], d.Name)
}
if *groupByTag {
for _, tag := range d.Tags {
dropletsByTag[tag] = append(dropletsByTag[tag], d.Name)
}
}
var (
ip string
err error
)
if *privateIPs {
ip, err = d.PrivateIPv4()
} else {
ip, err = d.PublicIPv4()
}
if err != nil {
ll.WithError(err).Error("couldn't look up the Droplet's IP address, skipped")
continue
}
inventory.WriteString(d.Name)
inventory.WriteRune('\t')
if *sshUser != "" {
inventory.WriteString(fmt.Sprintf("ansible_user=%s ", *sshUser))
}
if *sshPort != 0 {
inventory.WriteString(fmt.Sprintf("ansible_port=%d ", *sshPort))
}
if ip != "" {
inventory.WriteString(fmt.Sprintf("ansible_host=%s", ip))
} else {
ll.Warn("could not get the Droplet's IP address, using hostname")
}
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
// write the region groups
if *groupByRegion {
// loop over the doRegions slice to maintain alphabetic order
for _, region := range doRegions {
log.WithField("region", region).Info("building region group")
droplets := dropletsByRegion[region]
inventory.WriteString(fmt.Sprintf("[%s]", region))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the tag groups
if *groupByTag {
for tag, droplets := range dropletsByTag {
tag = sanitizeAnsibleGroup(tag)
log.WithField("tag", tag).Info("building tag group")
inventory.WriteString(fmt.Sprintf("[%s]", tag))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
// write the project groups
if *groupByProject {
log.Info("listing projects")
projects, _, err := client.Projects.List(ctx, nil)
if err != nil {
log.WithError(err).Fatal("couldn't list projects")
}
dropletsByProject := make(map[string][]string)
for _, project := range projects {
ll := log.WithField("project", project.Name)
ll.Info("listing project resources")
resources, err := listProjectResources(ctx, client, project.ID)
if err != nil {
ll.WithError(err).Fatal("")
}
for _, r := range resources {
if !strings.HasPrefix(r.URN, "do:droplet:") {
continue
}
id := strings.TrimPrefix(r.URN, "do:droplet:")
idInt, err := strconv.Atoi(id)
if err != nil {
ll.WithError(err).WithField("urn", r.URN).Error("parsing droplet ID, skipping")
continue
}
// skip droplets that aren't included in the inventory
droplet, exists := dropletsByID[idInt]
if !exists {
continue
}
dropletsByProject[project.Name] = append(dropletsByProject[project.Name], droplet)
}
}
for project, droplets := range dropletsByProject {
project = sanitizeAnsibleGroup(project)
log.WithField("project", project).Info("building project group")
inventory.WriteString(fmt.Sprintf("[%s]", project))
inventory.WriteRune('\n')
for _, d := range droplets {
inventory.WriteString(d)
inventory.WriteRune('\n')
}
inventory.WriteRune('\n')
}
}
if *out != "" {
ll := log.WithField("out", *out)
ll.Info("writing inventory to file")
f, err := os.Create(*out)
if err != nil {
ll.WithError(err).Fatal("couldn't open file for writing")
}
defer f.Close()
_, err = inventory.WriteTo(f)
if err != nil {
ll.WithError(err).Fatal("couldn't write inventory to file")
}
} else {
inventory.WriteTo(os.Stdout)
}
log.Info("done!")
}
func doctlToken() (string, string, error) {
type doctlConfig struct {
Context string `yaml:"context"`
AccessToken string `yaml:"access-token"`
AuthContexts map[string]string `yaml:"auth-contexts"`
}
cfgDir, err := os.UserConfigDir()
if err != nil {
return "", "", fmt.Errorf("couldn't look up user config dir: %w", err)
}
cfgFile, err := ioutil.ReadFile(filepath.Join(cfgDir, "doctl", "config.yaml"))
if err != nil {
return "", "", fmt.Errorf("couldn't read doctl's config.yaml: %w", err)
}
cfg := doctlConfig{}
err = yaml.Unmarshal(cfgFile, &cfg)
if err != nil {
return "", "", fmt.Errorf("couldn't unmarshal doctl's config.yaml: %w", err)
}
switch cfg.Context {
case "default":
return cfg.AccessToken, cfg.Context, nil
default:
return cfg.AuthContexts[cfg.Context], cfg.Context, nil
}
}
func sanitizeAnsibleGroup(s string) string {
// replace invalid characters
s = strings.NewReplacer(
" ", "_",
"-", "_",
":", "_",
).Replace(s)
// group names cannot start with a digit
if '0' <= s[0] && s[0] <= '9' {
s = "_" + s
}
return s
}
func removeIgnored(droplets []godo.Dro | defer cancel()
client := godo.NewFromToken(*doToken)
| random_line_split |
disk.py | (object):
def __init__(self):
self.model = ""
self.vendor = ""
self.fw = ""
self.sn = ""
self.wwn = ""
self.hctl = ""
self.dev_name = ""
self.smart = ""
self.type = ""
self.smart_attr = {}
self.age = {}
self.flash = False
@staticmethod
def map_disk_wwn_hctl(diskname):
""" map wwn and H:C:T:L from dev_name """
lsscsi = linux.exe_shell("lsscsi -w |grep /dev/|awk '{print$1,$3,$4}'")
for i in lsscsi.splitlines():
split_t = i.split(" ")
if diskname in split_t[2]:
return {
"hctl": split_t[0],
"wwn": split_t[1],
"dev_name": split_t[2]
}
return None
@staticmethod
def get_from_sas_disk_smart_i_str(disk_name):
return linux.exe_shell("smartctl -i /dev/%s" % disk_name)
@staticmethod
def get_from_sas_disk_simple_attr(disk_name):
smart = Disk.get_from_sas_disk_smart_i_str(disk_name)
model = linux.search_regex_one_line_string_column(smart, "(?:Device Model|Product):.+", ":", 1).strip()
sn = linux.search_regex_one_line_string_column(smart, "Serial (?:N|n)umber.+", ":", 1).strip()
vendor = linux.search_regex_one_line_string_column(smart, "(?:SATA Ver|Vendor).+", ":", 1).split()[0].strip()
return {
"name": disk_name,
"model": model,
"sn": sn,
"vendor": vendor
}
@staticmethod
def get_all_disk():
""" return all disk object list from hba and chipset. """
disks = []
disks_lines = linux.exe_shell("lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'")
for line in disks_lines.splitlines():
disk_t = line.split()
if len(disk_t) > 1 and "LSI" not in disk_t[1]:
disks.append(disk_t[0])
ds = []
for i in disks:
d_t = DiskFromLsiSas3("", i)
d_t.fill_attrs()
ds.append(d_t)
return ds
@staticmethod
def get_dev_attr_dict(dev_name):
i = DiskFromLsiSas3("", dev_name)
i.fill_attrs()
return {
"dev": i.dev_name,
"model": i.model,
"fw": i.fw,
"SN": i.sn,
"type": i.type,
"vendor": i.vendor,
"smart": i.smart_attr,
"hctl": i.hctl,
"wwn": i.wwn,
"age": i.age,
"is_ssd": str(i.flash)
}
@staticmethod
def __if_smart_err(disk_oj):
""" return True if smart info of disk_oj has error, else return False """
if "SAS" in disk_oj.smart:
if int(disk_oj.smart_attr["channel0Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Phy reset problem"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Phy reset problem"]) > 0:
return True
else:
return False
if "SATA" in disk_oj.smart:
if "No Errors Logged" not in disk_oj.smart:
return False
for attr_ in SATA_SMART_ERROR_LIST:
if disk_oj.smart_attr[attr_]["RAW_VALUE"] > 0:
return False
return True
@staticmethod
def get_over_agelimit_disks(disk_list):
""" return sas and sata disk list witch start_stop_hours/count or data is over the limit """
over_sas_disk = []
over_sata_disk = []
for disk in disk_list:
if disk.type == "SAS":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or float(disk.age["data_gb"]) > SAS_LIMIT_GB:
over_sas_disk.append(disk)
if disk.type == "SATA":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or int(
disk.age["power_on_hours"]) > SATA_LIMIT_HOURS:
over_sata_disk.append(disk)
return over_sas_disk, over_sata_disk
@staticmethod
def get_overage_disks_json(disk_list):
""" get_overage_disks function's json model """
pass
@staticmethod
def get_err_disk_dict():
""" return disk dict has error """
err_disk_dict = {}
disks = Disk.get_all_disk()
for i in disks:
if Disk.__if_smart_err(i):
struct = {
"dev": i.dev_name,
"model": i.model,
"fw": i.fw,
"SN": i.sn,
"type": i.type,
"vendor": i.vendor,
"smart": i.smart_attr,
"hctl": i.hctl,
"wwn": i.wwn
}
err_disk_dict[i.dev_name] = struct
return err_disk_dict
@staticmethod
def get_wearout_ssd_status():
""" return ssd wearout status dict """
disks = Disk.get_all_disk()
ssd_status = {}
for i in disks:
tmp = i.get_wearout_status()
# tmp[0] is dev_name, tmp[1] is wearout %
if tmp is not None:
ssd_status[tmp[0]] = tmp[1]
if len(ssd_status) == 0:
return None
return ssd_status
class DiskFromLsiSas3(Disk):
def __init__(self, sn, name):
super(DiskFromLsiSas3, self).__init__()
self.sn = sn
self.dev_name = name
def fill_attrs(self):
smart_str = linux.exe_shell("smartctl -a /dev/%s" % self.dev_name)
smartx_str = linux.exe_shell("smartctl -x /dev/%s" % self.dev_name)
self.smart = smartx_str
try:
self.model = linux.search_regex_one_line_string_column(smart_str, "(?:Device Model|Product):.+", ":",
1).strip()
self.fw = linux.search_regex_one_line_string_column(smart_str, "(?:Firmware|Revision).+", ":", 1).strip()
self.vendor = linux.search_regex_one_line_string_column(smart_str, "(?:SATA Ver|Vendor).+", ":", 1).split()[
0].strip()
self.sn = linux.search_regex_one_line_string_column(smart_str, "Serial (?:N|n)umber.+", ":", 1).strip()
map_temp = self.map_disk_wwn_hctl(self.dev_name)
self.wwn = map_temp["wwn"] if map_temp is not None else ""
self.hctl = map_temp["hctl"] if map_temp is not None else ""
rotational = linux.read_file(os.path.join("/sys/block", self.dev_name, "queue/rotational"))
if rotational.strip() == "0":
self.flash = True
except IOError:
print("%s read_file rotational err." % self.dev_name)
except Exception:
print("disk %s is not exists." % self.dev_name)
# fill in smart_attr
# ==========================================================================
# SAS disk
# smart_attr: {
# 'channel0Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'channel1Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'read': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
| Disk | identifier_name | |
disk.py | # }
# 'Spin_Up_Time': {
# ...(According to the following form)
# }
# }
# SATA smart form:
# ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE
# 1 Raw_Read_Error_Rate 0x000f 074 063 044 Pre-fail Always - 26816470
# 3 Spin_Up_Time 0x0003 094 094 000 Pre-fail Always - 0
# 4 Start_Stop_Count 0x0032 100 100 020 Old_age Always - 314
# 5 Reallocated_Sector_Ct 0x0033 100 100 036 Pre-fail Always - 1
# 7 Seek_Error_Rate 0x000f 073 060 030 Pre-fail Always - 21595176
# 9 Power_On_Hours 0x0032 096 096 000 Old_age Always - 3851
# 10 Spin_Retry_Count 0x0013 100 100 097 Pre-fail Always - 0
# 12 Power_Cycle_Count 0x0032 100 100 020 Old_age Always - 271
# 184 End-to-End_Error 0x0032 100 100 099 Old_age Always - 0
# 187 Reported_Uncorrect 0x0032 100 100 000 Old_age Always - 0
# 188 Command_Timeout 0x0032 100 100 000 Old_age Always - 0
# 189 High_Fly_Writes 0x003a 100 100 000 Old_age Always - 0
# 190 Airflow_Temperature_Cel 0x0022 064 057 045 Old_age Always - 36 (Min/Max 24/40)
# 191 G-Sense_Error_Rate 0x0032 100 100 000 Old_age Always - 0
# 192 Power-Off_Retract_Count 0x0032 100 100 000 Old_age Always - 147
# 193 Load_Cycle_Count 0x0032 099 099 000 Old_age Always - 2690
# 194 Temperature_Celsius 0x0022 036 043 000 Old_age Always - 36 (0 11 0 0 0)
# 195 Hardware_ECC_Recovered 0x001a 110 099 000 Old_age Always - 26816470
# 197 Current_Pending_Sector 0x0012 100 100 000 Old_age Always - 0
# 198 Offline_Uncorrectable 0x0010 100 100 000 Old_age Offline - 0
# 199 UDMA_CRC_Error_Count 0x003e 200 200 000 Old_age Always - 0
#
# ===========================================================================
if "SAS" in smart_str:
self.type = "SAS"
smart_str_arr = linux.search_regex_strings(smart_str, " *(?:write:|read:|verify:).+")
for line in smart_str_arr:
tmp = line.split()
dict_tmp = {
"errorEccFast": tmp[1].strip(),
"errorEccDelayed": tmp[2].strip(),
"errorEccByRereadsRewrite": tmp[3].strip(),
"totalErrorsCorrected": tmp[4].strip(),
"correctionAlgorithmInvocations": tmp[5].strip(),
"byte10_9": tmp[6].strip(),
"totalUncorrectedError": tmp[7].strip()
}
self.smart_attr[tmp[0].replace(":", " ").strip()] = dict_tmp
smart_str_arr = linux.search_regex_strings(
self.smart,
"(?:Invalid DWORD|Running disparity|Loss of DWORD|Phy reset problem).+=.+"
)
i = 0
dict_tmp = {}
for it in smart_str_arr:
tmp = it.split("=")
dict_tmp[tmp[0].strip()] = tmp[1].strip()
if 3 == i:
self.smart_attr["channel0Error"] = dict_tmp
dict_tmp = {}
if 7 == i:
self.smart_attr["channel1Error"] = dict_tmp
dict_tmp = {}
i += 1
# fill in age
# 'data_gb' is float number
# age: {
# 'start_stop_count': '10',
# 'data_gb': '5999'
# }
if isinstance(self.smart, str) and ("start-stop" in self.smart):
self.age["start_stop_count"] = linux.search_regex_one_line_string_column(self.smart, ".+start-stop.+",
":", 1)
all_gb = float(self.smart_attr["read"]["byte10_9"]) + float(
self.smart_attr["write"]["byte10_9"]) + float(self.smart_attr["verify"]["byte10_9"])
self.age["data_gb"] = str(all_gb)
if "SATA" in smart_str:
self.type = "SATA"
dict_tmp = linux.search_regex_strings(smart_str, ".*[0-9]+.+0x.+(?:In_the_past|-|FAILING_NOW) +[0-9]+")
for line in dict_tmp:
tmp = line.split()
dict_tmp = {
"ID": tmp[0].strip(),
"FLAG": tmp[2].strip(),
"VALUE": tmp[3].strip(),
"WORST": tmp[4].strip(),
"THRESH": tmp[5].strip(),
"TYPE": tmp[6].strip(),
"UPDATED": tmp[7].strip(),
"WHEN_FAILED": tmp[8].strip(),
"RAW_VALUE": tmp[9].strip(),
}
self.smart_attr[tmp[1]] = dict_tmp
if "Start_Stop_Count" in self.smart_attr:
self.age["start_stop_count"] = self.smart_attr["Start_Stop_Count"]["RAW_VALUE"]
self.age["power_on_hours"] = self.smart_attr["Power_On_Hours"]["RAW_VALUE"]
def get_wearout_status(self):
if self.flash is True and "Media_Wearout_Indicator" in self.smart_attr:
value = self.smart_attr["Media_Wearout_Indicator"]["VALUE"]
return self.dev_name, value
else:
return None
def to_json(self):
struct = {
"dev": self.dev_name,
"model": self.model,
"fw": self.fw,
"SN": self.sn,
"type": self.type,
"vendor": self.vendor,
"smart": self.smart_attr,
"hctl": self.hctl,
"wwn": self.wwn
}
json_str = json.dumps(struct, indent=1)
return json_str
class DiskFromLsiSas2(DiskFromLsiSas3):
| def __init__(self, sn, name):
super(DiskFromLsiSas2, self).__init__(sn, name) | identifier_body | |
disk.py | _oj):
""" return True if smart info of disk_oj has error, else return False """
if "SAS" in disk_oj.smart:
if int(disk_oj.smart_attr["channel0Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Phy reset problem"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Phy reset problem"]) > 0:
return True
else:
return False
if "SATA" in disk_oj.smart:
if "No Errors Logged" not in disk_oj.smart:
return False
for attr_ in SATA_SMART_ERROR_LIST:
if disk_oj.smart_attr[attr_]["RAW_VALUE"] > 0:
return False
return True
@staticmethod
def get_over_agelimit_disks(disk_list):
""" return sas and sata disk list witch start_stop_hours/count or data is over the limit """
over_sas_disk = []
over_sata_disk = []
for disk in disk_list:
if disk.type == "SAS":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or float(disk.age["data_gb"]) > SAS_LIMIT_GB:
over_sas_disk.append(disk)
if disk.type == "SATA":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or int(
disk.age["power_on_hours"]) > SATA_LIMIT_HOURS:
over_sata_disk.append(disk)
return over_sas_disk, over_sata_disk
@staticmethod
def get_overage_disks_json(disk_list):
""" get_overage_disks function's json model """
pass
@staticmethod
def get_err_disk_dict():
""" return disk dict has error """
err_disk_dict = {}
disks = Disk.get_all_disk()
for i in disks:
if Disk.__if_smart_err(i):
struct = {
"dev": i.dev_name,
"model": i.model,
"fw": i.fw,
"SN": i.sn,
"type": i.type,
"vendor": i.vendor,
"smart": i.smart_attr,
"hctl": i.hctl,
"wwn": i.wwn
}
err_disk_dict[i.dev_name] = struct
return err_disk_dict
@staticmethod
def get_wearout_ssd_status():
""" return ssd wearout status dict """
disks = Disk.get_all_disk()
ssd_status = {}
for i in disks:
tmp = i.get_wearout_status()
# tmp[0] is dev_name, tmp[1] is wearout %
if tmp is not None:
ssd_status[tmp[0]] = tmp[1]
if len(ssd_status) == 0:
return None
return ssd_status
class DiskFromLsiSas3(Disk):
def __init__(self, sn, name):
super(DiskFromLsiSas3, self).__init__()
self.sn = sn
self.dev_name = name
def fill_attrs(self):
smart_str = linux.exe_shell("smartctl -a /dev/%s" % self.dev_name)
smartx_str = linux.exe_shell("smartctl -x /dev/%s" % self.dev_name)
self.smart = smartx_str
try:
self.model = linux.search_regex_one_line_string_column(smart_str, "(?:Device Model|Product):.+", ":",
1).strip()
self.fw = linux.search_regex_one_line_string_column(smart_str, "(?:Firmware|Revision).+", ":", 1).strip()
self.vendor = linux.search_regex_one_line_string_column(smart_str, "(?:SATA Ver|Vendor).+", ":", 1).split()[
0].strip()
self.sn = linux.search_regex_one_line_string_column(smart_str, "Serial (?:N|n)umber.+", ":", 1).strip()
map_temp = self.map_disk_wwn_hctl(self.dev_name)
self.wwn = map_temp["wwn"] if map_temp is not None else ""
self.hctl = map_temp["hctl"] if map_temp is not None else ""
rotational = linux.read_file(os.path.join("/sys/block", self.dev_name, "queue/rotational"))
if rotational.strip() == "0":
self.flash = True
except IOError:
print("%s read_file rotational err." % self.dev_name)
except Exception:
print("disk %s is not exists." % self.dev_name)
# fill in smart_attr
# ==========================================================================
# SAS disk
# smart_attr: {
# 'channel0Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'channel1Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'read': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# 'verify': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# 'write': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8', | # }
#
# SATA disk
# smart_attr: {
# 'Raw_Read_Error_Rate': {
# 'ID': '1',
# 'FLAG': '0x000f',
# 'VALUE': '074',
# 'WORST': '063',
# 'THRESH': '044',
# 'TYPE': 'Pre-fail',
# 'UPDATED': 'Always',
# 'WHEN_FAILED': '-',
# 'RAW_VALUE': '26816470'
# }
# 'Spin_Up_Time': {
# ...(According to the following form)
# }
# }
# SATA smart form:
# ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE
# 1 Raw_Read_Error_Rate 0x000f 074 063 044 Pre-fail Always - 26816470
# 3 Spin_Up_Time 0x0003 094 094 000 Pre-fail Always - 0
# 4 Start_Stop_Count 0x0032 100 100 020 Old_age Always - 314
# 5 Reallocated_Sector_Ct | # 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# } | random_line_split |
disk.py | j):
""" return True if smart info of disk_oj has error, else return False """
if "SAS" in disk_oj.smart:
|
if "SATA" in disk_oj.smart:
if "No Errors Logged" not in disk_oj.smart:
return False
for attr_ in SATA_SMART_ERROR_LIST:
if disk_oj.smart_attr[attr_]["RAW_VALUE"] > 0:
return False
return True
@staticmethod
def get_over_agelimit_disks(disk_list):
""" return sas and sata disk list witch start_stop_hours/count or data is over the limit """
over_sas_disk = []
over_sata_disk = []
for disk in disk_list:
if disk.type == "SAS":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or float(disk.age["data_gb"]) > SAS_LIMIT_GB:
over_sas_disk.append(disk)
if disk.type == "SATA":
if int(disk.age["start_stop_count"]) > SAS_LIMIT_COUNT or int(
disk.age["power_on_hours"]) > SATA_LIMIT_HOURS:
over_sata_disk.append(disk)
return over_sas_disk, over_sata_disk
@staticmethod
def get_overage_disks_json(disk_list):
""" get_overage_disks function's json model """
pass
@staticmethod
def get_err_disk_dict():
""" return disk dict has error """
err_disk_dict = {}
disks = Disk.get_all_disk()
for i in disks:
if Disk.__if_smart_err(i):
struct = {
"dev": i.dev_name,
"model": i.model,
"fw": i.fw,
"SN": i.sn,
"type": i.type,
"vendor": i.vendor,
"smart": i.smart_attr,
"hctl": i.hctl,
"wwn": i.wwn
}
err_disk_dict[i.dev_name] = struct
return err_disk_dict
@staticmethod
def get_wearout_ssd_status():
""" return ssd wearout status dict """
disks = Disk.get_all_disk()
ssd_status = {}
for i in disks:
tmp = i.get_wearout_status()
# tmp[0] is dev_name, tmp[1] is wearout %
if tmp is not None:
ssd_status[tmp[0]] = tmp[1]
if len(ssd_status) == 0:
return None
return ssd_status
class DiskFromLsiSas3(Disk):
def __init__(self, sn, name):
super(DiskFromLsiSas3, self).__init__()
self.sn = sn
self.dev_name = name
def fill_attrs(self):
smart_str = linux.exe_shell("smartctl -a /dev/%s" % self.dev_name)
smartx_str = linux.exe_shell("smartctl -x /dev/%s" % self.dev_name)
self.smart = smartx_str
try:
self.model = linux.search_regex_one_line_string_column(smart_str, "(?:Device Model|Product):.+", ":",
1).strip()
self.fw = linux.search_regex_one_line_string_column(smart_str, "(?:Firmware|Revision).+", ":", 1).strip()
self.vendor = linux.search_regex_one_line_string_column(smart_str, "(?:SATA Ver|Vendor).+", ":", 1).split()[
0].strip()
self.sn = linux.search_regex_one_line_string_column(smart_str, "Serial (?:N|n)umber.+", ":", 1).strip()
map_temp = self.map_disk_wwn_hctl(self.dev_name)
self.wwn = map_temp["wwn"] if map_temp is not None else ""
self.hctl = map_temp["hctl"] if map_temp is not None else ""
rotational = linux.read_file(os.path.join("/sys/block", self.dev_name, "queue/rotational"))
if rotational.strip() == "0":
self.flash = True
except IOError:
print("%s read_file rotational err." % self.dev_name)
except Exception:
print("disk %s is not exists." % self.dev_name)
# fill in smart_attr
# ==========================================================================
# SAS disk
# smart_attr: {
# 'channel0Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'channel1Error': {
# 'Invalid DWORD count': '0',
# 'Loss of DWORD synchronization': '0',
# 'Phy reset problem': '0',
# 'Running disparity error count': '0'
# }
# 'read': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# 'verify': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# 'write': {
# 'byte10_9': '59036.419',
# 'correctionAlgorithmInvocations': '414271',
# 'errorEccByRereadsRewrite': '0',
# 'errorEccDelayed': '8',
# 'errorEccFast': '0',
# 'totalErrorsCorrected': '8',
# 'totalUncorrectedError': '0'
# }
# }
#
# SATA disk
# smart_attr: {
# 'Raw_Read_Error_Rate': {
# 'ID': '1',
# 'FLAG': '0x000f',
# 'VALUE': '074',
# 'WORST': '063',
# 'THRESH': '044',
# 'TYPE': 'Pre-fail',
# 'UPDATED': 'Always',
# 'WHEN_FAILED': '-',
# 'RAW_VALUE': '26816470'
# }
# 'Spin_Up_Time': {
# ...(According to the following form)
# }
# }
# SATA smart form:
# ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE
# 1 Raw_Read_Error_Rate 0x000f 074 063 044 Pre-fail Always - 26816470
# 3 Spin_Up_Time 0x0003 094 094 000 Pre-fail Always - 0
# 4 Start_Stop_Count 0x0032 100 100 020 Old_age Always - 314
# 5 Reallocated_Sector_Ct 0 | if int(disk_oj.smart_attr["channel0Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel0Error"]["Phy reset problem"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Invalid DWORD count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Running disparity error count"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Loss of DWORD synchronization"]) > 0 or \
int(disk_oj.smart_attr["channel1Error"]["Phy reset problem"]) > 0:
return True
else:
return False | conditional_block |
load_drives.py | 793
bmean = 54.14884297660608
rstd = 57.696159704394354
gstd = 53.739380109203445
bstd = 47.66536771313241
#print( "Default normalization" )
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
else:
rmean = np.mean(images[:,:,:,0])
gmean= np.mean(images[:,:,:,1])
bmean= np.mean(images[:,:,:,2])
rstd = np.std(images[:,:,:,0])
gstd = np.std(images[:,:,:,1])
bstd = np.std(images[:,:,:,2])
print( "Image means: {}/{}/{}".format( rmean, gmean, bmean ) )
print( "Image stds: {}/{}/{}".format( rstd, gstd, bstd ) )
# should only do this for the training data, not val/test, but I'm not sure how to do that when Keras makes the train/val split
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
def embedActions( actions ):
embedding = { "stop":0, "forward":1, "left":2, "right":3, "backward":4 }
emb = []
prev_act = 0
for act in actions:
try:
if not act.startswith("speed"):
prev_act = embedding[act]
if prev_act is None:
print( "Invalid action: {}".format( act ) )
raise ValueError("Invalid action: " + str(act) )
emb.append( embedding[act] )
else:
emb.append( prev_act )
except Exception as ex:
print( ex )
print( act )
return emb
class | (Sequence):
""" Loads MaLPi drive data
From: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly.html"""
def __init__(self, filelist, image_size=(120,120), batch_size=32, shuffle=True, max_load=30000, auxName=None, images_only=False ):
""" Input a list of drive directories.
Pre-load each to count number of samples.
load one file and use it to generate batches until we run out.
load the next file, repeat
Re-shuffle on each epoch end
"""
'Initialization'
self.files = filelist
self.size = image_size
self.batch_size = batch_size
self.shuffle = shuffle
self.max_load = max_load
self.auxName = auxName
self.images_only = images_only
self.image_norm = False
self.next_dir_index = 0
self.images = []
self.actions = []
self.current_start = 0
self.categorical = None
self.input_dim = None
self.num_actions = None
self.batch_shape = (self.batch_size,) + self.size + (3,)
print( "Batch shape: {}".format( self.batch_shape ) )
self.count = self.__count()
self.on_epoch_end()
def __len__(self):
'The number of batches per epoch'
return int(np.floor(self.count / self.batch_size))
def __getitem__(self, index):
sample_beg = index * self.batch_size
sample_beg -= self.current_start
sample_end = sample_beg + self.batch_size
#print( "getitem {} {}:{} {}".format( index, sample_beg, sample_end, self.current_start ) )
prev_len = len(self.images)
if (sample_beg < len(self.images)) and (sample_end < len(self.images)):
images = self.images[sample_beg:sample_end]
actions = self.actions[sample_beg:sample_end]
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (1): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
if sample_beg <= len(self.images):
images = self.images[sample_beg:]
actions = self.actions[sample_beg:]
#sample_end = len(self.images) - sample_beg
sample_end = self.batch_size - len(images)
self.images, self.actions = self.__load_next_max()
try:
i2 = self.images[0:sample_end]
images = np.append(images,i2,axis=0)
except Exception as ex:
print( ex )
print( "images {} {}".format( images.shape, i2.shape ) )
print( "{}".format( images ) )
try:
a2 = self.actions[0:sample_end]
actions = np.append(actions,a2,axis=0)
except Exception as ex:
print( ex )
print( "actions {} {}".format( actions.shape, a2.shape ) )
print( "{}".format( actions ) )
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (2): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
print( "Invalid batch indexes: {} {}:{}".format( index, sample_beg, sample_end ) )
def __load_next_max(self):
self.current_start += len(self.images)
images = []
actions = []
while len(images) <= self.max_load and self.next_dir_index < len(self.files):
fname = self.files[self.next_dir_index]
dimages, dactions = self.loadOneDrive( fname )
images.extend(dimages)
actions.extend(dactions)
self.next_dir_index += 1
if self.shuffle == True:
if self.images_only:
images = sklearn.utils.shuffle(images)
else:
images, actions = sklearn.utils.shuffle(images,actions)
images = np.array(images)
actions = np.array(actions)
if self.image_norm:
normalize_images(images)
return images, actions
def loadOneDrive( self, drive_dir, count_only=False ):
actions = []
if not self.images_only:
if self.auxName is not None:
aux = getAuxFromMeta( drive_dir, self.auxName )
if aux is not None:
actions = loadOneAux( drive_dir, aux )
if len(actions) == 0:
actions_file = os.path.join( drive_dir, "image_actions.npy" )
if os.path.exists(actions_file):
actions = np.load(actions_file)
else:
actions_file = os.path.join( drive_dir, "image_actions.pickle" )
with open(actions_file,'r') as f:
actions = pickle.load(f)
if len(actions) > 0:
categorical = True
if isinstance(actions[0], basestring):
actions = embedActions( actions )
actions = to_categorical( actions, num_classes=5 )
categorical = True
elif type(actions) == list:
actions = np.array(actions).astype('float')
categorical = False
elif type(actions) == np.ndarray:
actions = np.array(actions).astype('float')
categorical = False
else:
print("Unknown actions format: {} {} as {}".format( type(actions), actions[0], type(actions[0]) ))
if self.categorical is None:
self.categorical = categorical
elif self.categorical != categorical:
print( "Mixed cat/non-cat action space: {}".format( drive_dir ) )
# Need an option for this
#if not self.categorical:
# actions = self.addActionDiff(actions)
if self.num_actions is None:
self.num_actions = len(actions[0])
if count_only:
return len(actions)
basename = "images_{}x{}".format( self.size[0], self.size[1] )
im_file = os.path.join( drive_dir, basename+".npy" )
if os.path.exists(im_file):
images = np.load(im_file)
else:
im_file = os.path.join( drive_dir, basename+".pickle" )
with open(im_file,'r') as f:
images = pickle.load(f)
if count_only:
return len(images)
if not self.images_only and (len(images) != len(actions)):
print( "Data mismatch: {}".format( drive_dir ) )
print( " images: {}".format( images.shape ) )
print( " actions: {}".format( actions.shape ) )
return images, actions
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
np.random.shuffle(self.files)
self.images = []
self.actions = []
self.next_dir_index = 0
self.current_start = 0
| DriveDataGenerator | identifier_name |
load_drives.py | 793
bmean = 54.14884297660608
rstd = 57.696159704394354
gstd = 53.739380109203445
bstd = 47.66536771313241
#print( "Default normalization" )
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
else:
rmean = np.mean(images[:,:,:,0])
gmean= np.mean(images[:,:,:,1])
bmean= np.mean(images[:,:,:,2])
rstd = np.std(images[:,:,:,0])
gstd = np.std(images[:,:,:,1])
bstd = np.std(images[:,:,:,2])
print( "Image means: {}/{}/{}".format( rmean, gmean, bmean ) )
print( "Image stds: {}/{}/{}".format( rstd, gstd, bstd ) )
# should only do this for the training data, not val/test, but I'm not sure how to do that when Keras makes the train/val split
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
def embedActions( actions ):
embedding = { "stop":0, "forward":1, "left":2, "right":3, "backward":4 }
emb = []
prev_act = 0
for act in actions:
try:
if not act.startswith("speed"):
prev_act = embedding[act]
if prev_act is None:
print( "Invalid action: {}".format( act ) )
raise ValueError("Invalid action: " + str(act) )
emb.append( embedding[act] )
else:
emb.append( prev_act )
except Exception as ex:
print( ex )
print( act )
return emb
class DriveDataGenerator(Sequence):
| self.actions = []
self.current_start = 0
self.categorical = None
self.input_dim = None
self.num_actions = None
self.batch_shape = (self.batch_size,) + self.size + (3,)
print( "Batch shape: {}".format( self.batch_shape ) )
self.count = self.__count()
self.on_epoch_end()
def __len__(self):
'The number of batches per epoch'
return int(np.floor(self.count / self.batch_size))
def __getitem__(self, index):
sample_beg = index * self.batch_size
sample_beg -= self.current_start
sample_end = sample_beg + self.batch_size
#print( "getitem {} {}:{} {}".format( index, sample_beg, sample_end, self.current_start ) )
prev_len = len(self.images)
if (sample_beg < len(self.images)) and (sample_end < len(self.images)):
images = self.images[sample_beg:sample_end]
actions = self.actions[sample_beg:sample_end]
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (1): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
if sample_beg <= len(self.images):
images = self.images[sample_beg:]
actions = self.actions[sample_beg:]
#sample_end = len(self.images) - sample_beg
sample_end = self.batch_size - len(images)
self.images, self.actions = self.__load_next_max()
try:
i2 = self.images[0:sample_end]
images = np.append(images,i2,axis=0)
except Exception as ex:
print( ex )
print( "images {} {}".format( images.shape, i2.shape ) )
print( "{}".format( images ) )
try:
a2 = self.actions[0:sample_end]
actions = np.append(actions,a2,axis=0)
except Exception as ex:
print( ex )
print( "actions {} {}".format( actions.shape, a2.shape ) )
print( "{}".format( actions ) )
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (2): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
print( "Invalid batch indexes: {} {}:{}".format( index, sample_beg, sample_end ) )
def __load_next_max(self):
self.current_start += len(self.images)
images = []
actions = []
while len(images) <= self.max_load and self.next_dir_index < len(self.files):
fname = self.files[self.next_dir_index]
dimages, dactions = self.loadOneDrive( fname )
images.extend(dimages)
actions.extend(dactions)
self.next_dir_index += 1
if self.shuffle == True:
if self.images_only:
images = sklearn.utils.shuffle(images)
else:
images, actions = sklearn.utils.shuffle(images,actions)
images = np.array(images)
actions = np.array(actions)
if self.image_norm:
normalize_images(images)
return images, actions
def loadOneDrive( self, drive_dir, count_only=False ):
actions = []
if not self.images_only:
if self.auxName is not None:
aux = getAuxFromMeta( drive_dir, self.auxName )
if aux is not None:
actions = loadOneAux( drive_dir, aux )
if len(actions) == 0:
actions_file = os.path.join( drive_dir, "image_actions.npy" )
if os.path.exists(actions_file):
actions = np.load(actions_file)
else:
actions_file = os.path.join( drive_dir, "image_actions.pickle" )
with open(actions_file,'r') as f:
actions = pickle.load(f)
if len(actions) > 0:
categorical = True
if isinstance(actions[0], basestring):
actions = embedActions( actions )
actions = to_categorical( actions, num_classes=5 )
categorical = True
elif type(actions) == list:
actions = np.array(actions).astype('float')
categorical = False
elif type(actions) == np.ndarray:
actions = np.array(actions).astype('float')
categorical = False
else:
print("Unknown actions format: {} {} as {}".format( type(actions), actions[0], type(actions[0]) ))
if self.categorical is None:
self.categorical = categorical
elif self.categorical != categorical:
print( "Mixed cat/non-cat action space: {}".format( drive_dir ) )
# Need an option for this
#if not self.categorical:
# actions = self.addActionDiff(actions)
if self.num_actions is None:
self.num_actions = len(actions[0])
if count_only:
return len(actions)
basename = "images_{}x{}".format( self.size[0], self.size[1] )
im_file = os.path.join( drive_dir, basename+".npy" )
if os.path.exists(im_file):
images = np.load(im_file)
else:
im_file = os.path.join( drive_dir, basename+".pickle" )
with open(im_file,'r') as f:
images = pickle.load(f)
if count_only:
return len(images)
if not self.images_only and (len(images) != len(actions)):
print( "Data mismatch: {}".format( drive_dir ) )
print( " images: {}".format( images.shape ) )
print( " actions: {}".format( actions.shape ) )
return images, actions
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
np.random.shuffle(self.files)
self.images = []
self.actions = []
self.next_dir_index = 0
self.current_start = 0
self | """ Loads MaLPi drive data
From: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly.html"""
def __init__(self, filelist, image_size=(120,120), batch_size=32, shuffle=True, max_load=30000, auxName=None, images_only=False ):
""" Input a list of drive directories.
Pre-load each to count number of samples.
load one file and use it to generate batches until we run out.
load the next file, repeat
Re-shuffle on each epoch end
"""
'Initialization'
self.files = filelist
self.size = image_size
self.batch_size = batch_size
self.shuffle = shuffle
self.max_load = max_load
self.auxName = auxName
self.images_only = images_only
self.image_norm = False
self.next_dir_index = 0
self.images = [] | identifier_body |
load_drives.py | 793
bmean = 54.14884297660608
rstd = 57.696159704394354
gstd = 53.739380109203445
bstd = 47.66536771313241
#print( "Default normalization" )
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
else:
rmean = np.mean(images[:,:,:,0])
gmean= np.mean(images[:,:,:,1])
bmean= np.mean(images[:,:,:,2])
rstd = np.std(images[:,:,:,0])
gstd = np.std(images[:,:,:,1])
bstd = np.std(images[:,:,:,2])
print( "Image means: {}/{}/{}".format( rmean, gmean, bmean ) )
print( "Image stds: {}/{}/{}".format( rstd, gstd, bstd ) )
# should only do this for the training data, not val/test, but I'm not sure how to do that when Keras makes the train/val split
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
def embedActions( actions ):
embedding = { "stop":0, "forward":1, "left":2, "right":3, "backward":4 }
emb = []
prev_act = 0
for act in actions:
try:
if not act.startswith("speed"):
|
else:
emb.append( prev_act )
except Exception as ex:
print( ex )
print( act )
return emb
class DriveDataGenerator(Sequence):
""" Loads MaLPi drive data
From: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly.html"""
def __init__(self, filelist, image_size=(120,120), batch_size=32, shuffle=True, max_load=30000, auxName=None, images_only=False ):
""" Input a list of drive directories.
Pre-load each to count number of samples.
load one file and use it to generate batches until we run out.
load the next file, repeat
Re-shuffle on each epoch end
"""
'Initialization'
self.files = filelist
self.size = image_size
self.batch_size = batch_size
self.shuffle = shuffle
self.max_load = max_load
self.auxName = auxName
self.images_only = images_only
self.image_norm = False
self.next_dir_index = 0
self.images = []
self.actions = []
self.current_start = 0
self.categorical = None
self.input_dim = None
self.num_actions = None
self.batch_shape = (self.batch_size,) + self.size + (3,)
print( "Batch shape: {}".format( self.batch_shape ) )
self.count = self.__count()
self.on_epoch_end()
def __len__(self):
'The number of batches per epoch'
return int(np.floor(self.count / self.batch_size))
def __getitem__(self, index):
sample_beg = index * self.batch_size
sample_beg -= self.current_start
sample_end = sample_beg + self.batch_size
#print( "getitem {} {}:{} {}".format( index, sample_beg, sample_end, self.current_start ) )
prev_len = len(self.images)
if (sample_beg < len(self.images)) and (sample_end < len(self.images)):
images = self.images[sample_beg:sample_end]
actions = self.actions[sample_beg:sample_end]
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (1): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
if sample_beg <= len(self.images):
images = self.images[sample_beg:]
actions = self.actions[sample_beg:]
#sample_end = len(self.images) - sample_beg
sample_end = self.batch_size - len(images)
self.images, self.actions = self.__load_next_max()
try:
i2 = self.images[0:sample_end]
images = np.append(images,i2,axis=0)
except Exception as ex:
print( ex )
print( "images {} {}".format( images.shape, i2.shape ) )
print( "{}".format( images ) )
try:
a2 = self.actions[0:sample_end]
actions = np.append(actions,a2,axis=0)
except Exception as ex:
print( ex )
print( "actions {} {}".format( actions.shape, a2.shape ) )
print( "{}".format( actions ) )
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (2): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
print( "Invalid batch indexes: {} {}:{}".format( index, sample_beg, sample_end ) )
def __load_next_max(self):
self.current_start += len(self.images)
images = []
actions = []
while len(images) <= self.max_load and self.next_dir_index < len(self.files):
fname = self.files[self.next_dir_index]
dimages, dactions = self.loadOneDrive( fname )
images.extend(dimages)
actions.extend(dactions)
self.next_dir_index += 1
if self.shuffle == True:
if self.images_only:
images = sklearn.utils.shuffle(images)
else:
images, actions = sklearn.utils.shuffle(images,actions)
images = np.array(images)
actions = np.array(actions)
if self.image_norm:
normalize_images(images)
return images, actions
def loadOneDrive( self, drive_dir, count_only=False ):
actions = []
if not self.images_only:
if self.auxName is not None:
aux = getAuxFromMeta( drive_dir, self.auxName )
if aux is not None:
actions = loadOneAux( drive_dir, aux )
if len(actions) == 0:
actions_file = os.path.join( drive_dir, "image_actions.npy" )
if os.path.exists(actions_file):
actions = np.load(actions_file)
else:
actions_file = os.path.join( drive_dir, "image_actions.pickle" )
with open(actions_file,'r') as f:
actions = pickle.load(f)
if len(actions) > 0:
categorical = True
if isinstance(actions[0], basestring):
actions = embedActions( actions )
actions = to_categorical( actions, num_classes=5 )
categorical = True
elif type(actions) == list:
actions = np.array(actions).astype('float')
categorical = False
elif type(actions) == np.ndarray:
actions = np.array(actions).astype('float')
categorical = False
else:
print("Unknown actions format: {} {} as {}".format( type(actions), actions[0], type(actions[0]) ))
if self.categorical is None:
self.categorical = categorical
elif self.categorical != categorical:
print( "Mixed cat/non-cat action space: {}".format( drive_dir ) )
# Need an option for this
#if not self.categorical:
# actions = self.addActionDiff(actions)
if self.num_actions is None:
self.num_actions = len(actions[0])
if count_only:
return len(actions)
basename = "images_{}x{}".format( self.size[0], self.size[1] )
im_file = os.path.join( drive_dir, basename+".npy" )
if os.path.exists(im_file):
images = np.load(im_file)
else:
im_file = os.path.join( drive_dir, basename+".pickle" )
with open(im_file,'r') as f:
images = pickle.load(f)
if count_only:
return len(images)
if not self.images_only and (len(images) != len(actions)):
print( "Data mismatch: {}".format( drive_dir ) )
print( " images: {}".format( images.shape ) )
print( " actions: {}".format( actions.shape ) )
return images, actions
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
np.random.shuffle(self.files)
self.images = []
self.actions = []
self.next_dir_index = 0
self.current_start = 0
| prev_act = embedding[act]
if prev_act is None:
print( "Invalid action: {}".format( act ) )
raise ValueError("Invalid action: " + str(act) )
emb.append( embedding[act] ) | conditional_block |
load_drives.py | 793
bmean = 54.14884297660608
rstd = 57.696159704394354
gstd = 53.739380109203445
bstd = 47.66536771313241
#print( "Default normalization" )
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd
else:
rmean = np.mean(images[:,:,:,0])
gmean= np.mean(images[:,:,:,1])
bmean= np.mean(images[:,:,:,2])
rstd = np.std(images[:,:,:,0])
gstd = np.std(images[:,:,:,1])
bstd = np.std(images[:,:,:,2])
print( "Image means: {}/{}/{}".format( rmean, gmean, bmean ) )
print( "Image stds: {}/{}/{}".format( rstd, gstd, bstd ) )
# should only do this for the training data, not val/test, but I'm not sure how to do that when Keras makes the train/val split
images[:,:,:,0] -= rmean
images[:,:,:,1] -= gmean
images[:,:,:,2] -= bmean
images[:,:,:,0] /= rstd
images[:,:,:,1] /= gstd
images[:,:,:,2] /= bstd |
def embedActions( actions ):
embedding = { "stop":0, "forward":1, "left":2, "right":3, "backward":4 }
emb = []
prev_act = 0
for act in actions:
try:
if not act.startswith("speed"):
prev_act = embedding[act]
if prev_act is None:
print( "Invalid action: {}".format( act ) )
raise ValueError("Invalid action: " + str(act) )
emb.append( embedding[act] )
else:
emb.append( prev_act )
except Exception as ex:
print( ex )
print( act )
return emb
class DriveDataGenerator(Sequence):
""" Loads MaLPi drive data
From: https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly.html"""
def __init__(self, filelist, image_size=(120,120), batch_size=32, shuffle=True, max_load=30000, auxName=None, images_only=False ):
""" Input a list of drive directories.
Pre-load each to count number of samples.
load one file and use it to generate batches until we run out.
load the next file, repeat
Re-shuffle on each epoch end
"""
'Initialization'
self.files = filelist
self.size = image_size
self.batch_size = batch_size
self.shuffle = shuffle
self.max_load = max_load
self.auxName = auxName
self.images_only = images_only
self.image_norm = False
self.next_dir_index = 0
self.images = []
self.actions = []
self.current_start = 0
self.categorical = None
self.input_dim = None
self.num_actions = None
self.batch_shape = (self.batch_size,) + self.size + (3,)
print( "Batch shape: {}".format( self.batch_shape ) )
self.count = self.__count()
self.on_epoch_end()
def __len__(self):
'The number of batches per epoch'
return int(np.floor(self.count / self.batch_size))
def __getitem__(self, index):
sample_beg = index * self.batch_size
sample_beg -= self.current_start
sample_end = sample_beg + self.batch_size
#print( "getitem {} {}:{} {}".format( index, sample_beg, sample_end, self.current_start ) )
prev_len = len(self.images)
if (sample_beg < len(self.images)) and (sample_end < len(self.images)):
images = self.images[sample_beg:sample_end]
actions = self.actions[sample_beg:sample_end]
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (1): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
if sample_beg <= len(self.images):
images = self.images[sample_beg:]
actions = self.actions[sample_beg:]
#sample_end = len(self.images) - sample_beg
sample_end = self.batch_size - len(images)
self.images, self.actions = self.__load_next_max()
try:
i2 = self.images[0:sample_end]
images = np.append(images,i2,axis=0)
except Exception as ex:
print( ex )
print( "images {} {}".format( images.shape, i2.shape ) )
print( "{}".format( images ) )
try:
a2 = self.actions[0:sample_end]
actions = np.append(actions,a2,axis=0)
except Exception as ex:
print( ex )
print( "actions {} {}".format( actions.shape, a2.shape ) )
print( "{}".format( actions ) )
if self.images_only:
if images.shape != self.batch_shape:
print( "Invalid batch shape (2): {} {} {} {}".format( images.shape, sample_beg, sample_end, prev_len) )
return images, images
else:
return images, actions
print( "Invalid batch indexes: {} {}:{}".format( index, sample_beg, sample_end ) )
def __load_next_max(self):
self.current_start += len(self.images)
images = []
actions = []
while len(images) <= self.max_load and self.next_dir_index < len(self.files):
fname = self.files[self.next_dir_index]
dimages, dactions = self.loadOneDrive( fname )
images.extend(dimages)
actions.extend(dactions)
self.next_dir_index += 1
if self.shuffle == True:
if self.images_only:
images = sklearn.utils.shuffle(images)
else:
images, actions = sklearn.utils.shuffle(images,actions)
images = np.array(images)
actions = np.array(actions)
if self.image_norm:
normalize_images(images)
return images, actions
def loadOneDrive( self, drive_dir, count_only=False ):
actions = []
if not self.images_only:
if self.auxName is not None:
aux = getAuxFromMeta( drive_dir, self.auxName )
if aux is not None:
actions = loadOneAux( drive_dir, aux )
if len(actions) == 0:
actions_file = os.path.join( drive_dir, "image_actions.npy" )
if os.path.exists(actions_file):
actions = np.load(actions_file)
else:
actions_file = os.path.join( drive_dir, "image_actions.pickle" )
with open(actions_file,'r') as f:
actions = pickle.load(f)
if len(actions) > 0:
categorical = True
if isinstance(actions[0], basestring):
actions = embedActions( actions )
actions = to_categorical( actions, num_classes=5 )
categorical = True
elif type(actions) == list:
actions = np.array(actions).astype('float')
categorical = False
elif type(actions) == np.ndarray:
actions = np.array(actions).astype('float')
categorical = False
else:
print("Unknown actions format: {} {} as {}".format( type(actions), actions[0], type(actions[0]) ))
if self.categorical is None:
self.categorical = categorical
elif self.categorical != categorical:
print( "Mixed cat/non-cat action space: {}".format( drive_dir ) )
# Need an option for this
#if not self.categorical:
# actions = self.addActionDiff(actions)
if self.num_actions is None:
self.num_actions = len(actions[0])
if count_only:
return len(actions)
basename = "images_{}x{}".format( self.size[0], self.size[1] )
im_file = os.path.join( drive_dir, basename+".npy" )
if os.path.exists(im_file):
images = np.load(im_file)
else:
im_file = os.path.join( drive_dir, basename+".pickle" )
with open(im_file,'r') as f:
images = pickle.load(f)
if count_only:
return len(images)
if not self.images_only and (len(images) != len(actions)):
print( "Data mismatch: {}".format( drive_dir ) )
print( " images: {}".format( images.shape ) )
print( " actions: {}".format( actions.shape ) )
return images, actions
def on_epoch_end(self):
'Updates indexes after each epoch'
if self.shuffle == True:
np.random.shuffle(self.files)
self.images = []
self.actions = []
self.next_dir_index = 0
self.current_start = 0
self | random_line_split | |
run.go | {
case 0:
cmdData.DockerCommand = args[doubleDashInd:]
case 1:
cmdData.ImageName = args[0]
cmdData.DockerCommand = args[doubleDashInd:]
default:
return fmt.Errorf("unsupported position args format")
}
} else {
switch len(args) {
case 0:
case 1:
cmdData.ImageName = args[0]
default:
return fmt.Errorf("unsupported position args format")
}
}
return nil
}
func checkDetachDockerOption() error {
for _, value := range cmdData.DockerOptions {
if value == "-d" || value == "--detach" {
return nil
}
}
return fmt.Errorf("the container must be launched in the background (in follow mode): pass -d/--detach with --docker-options option")
}
func getContainerName() string {
for ind, value := range cmdData.DockerOptions {
if value == "--name" {
if ind+1 < len(cmdData.DockerOptions) {
return cmdData.DockerOptions[ind+1]
}
} else if strings.HasPrefix(value, "--name=") {
return strings.TrimPrefix(value, "--name=")
}
}
return ""
}
func runMain(ctx context.Context) error {
global_warnings.PostponeMultiwerfNotUpToDateWarning()
if err := werf.Init(*commonCmdData.TmpDir, *commonCmdData.HomeDir); err != nil {
return fmt.Errorf("initialization error: %w", err)
}
containerBackend, processCtx, err := common.InitProcessContainerBackend(ctx, &commonCmdData)
if err != nil {
return err
}
ctx = processCtx
gitDataManager, err := gitdata.GetHostGitDataManager(ctx)
if err != nil {
return fmt.Errorf("error getting host git data manager: %w", err)
}
if err := git_repo.Init(gitDataManager); err != nil {
return err
}
if err := image.Init(); err != nil {
return err
}
if err := lrumeta.Init(); err != nil {
return err
}
if err := true_git.Init(ctx, true_git.Options{LiveGitOutput: *commonCmdData.LogDebug}); err != nil {
return err
}
if err := common.DockerRegistryInit(ctx, &commonCmdData); err != nil {
return err
}
giterminismManager, err := common.GetGiterminismManager(ctx, &commonCmdData)
if err != nil {
return err
}
common.ProcessLogProjectDir(&commonCmdData, giterminismManager.ProjectDir())
if err := ssh_agent.Init(ctx, common.GetSSHKey(&commonCmdData)); err != nil {
return fmt.Errorf("cannot initialize ssh agent: %w", err)
}
defer func() {
err := ssh_agent.Terminate()
if err != nil {
logboek.Warn().LogF("WARNING: ssh agent termination failed: %s\n", err)
}
}()
if *commonCmdData.Follow {
if cmdData.Shell || cmdData.Bash {
return fmt.Errorf("follow mode does not work with --shell and --bash options")
}
if err := checkDetachDockerOption(); err != nil {
return err
}
containerName := getContainerName()
if containerName == "" {
return fmt.Errorf("follow mode does not work without specific container name: pass --name=CONTAINER_NAME with --docker-options option")
}
return common.FollowGitHead(ctx, &commonCmdData, func(ctx context.Context, headCommitGiterminismManager giterminism_manager.Interface) error {
if err := safeDockerCliRmFunc(ctx, containerName); err != nil {
return err
}
if err := run(ctx, containerBackend, headCommitGiterminismManager); err != nil {
return err
}
go func() {
time.Sleep(500 * time.Millisecond)
fmt.Printf("Attaching to container %s ...\n", containerName)
resp, err := docker.ContainerAttach(ctx, containerName, types.ContainerAttachOptions{
Stream: true,
Stdout: true,
Stderr: true,
Logs: true,
})
if err != nil {
_, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
}
if _, err := stdcopy.StdCopy(os.Stdout, os.Stderr, resp.Reader); err != nil {
_, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
}
}()
return nil
})
} else {
if err := run(ctx, containerBackend, giterminismManager); err != nil {
if statusErr, ok := err.(cli.StatusError); ok {
common.TerminateWithError(err.Error(), statusErr.StatusCode)
}
return err
}
return nil
}
}
func run(ctx context.Context, containerBackend container_backend.ContainerBackend, giterminismManager giterminism_manager.Interface) error {
_, werfConfig, err := common.GetRequiredWerfConfig(ctx, &commonCmdData, giterminismManager, common.GetWerfConfigOptions(&commonCmdData, false))
if err != nil {
return fmt.Errorf("unable to load werf config: %w", err)
}
projectName := werfConfig.Meta.Project
projectTmpDir, err := tmp_manager.CreateProjectDir(ctx)
if err != nil {
return fmt.Errorf("getting project tmp dir failed: %w", err)
}
defer tmp_manager.ReleaseProjectDir(projectTmpDir)
imageName := cmdData.ImageName
if imageName == "" && len(werfConfig.GetAllImages()) == 1 {
imageName = werfConfig.GetAllImages()[0].GetName()
}
if !werfConfig.HasImage(imageName) {
return fmt.Errorf("image %q is not defined in werf.yaml", logging.ImageLogName(imageName, false))
}
stagesStorage, err := common.GetStagesStorage(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
finalStagesStorage, err := common.GetOptionalFinalStagesStorage(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
synchronization, err := common.GetSynchronization(ctx, &commonCmdData, projectName, stagesStorage)
if err != nil {
return err
}
storageLockManager, err := common.GetStorageLockManager(ctx, synchronization)
if err != nil {
return err
}
secondaryStagesStorageList, err := common.GetSecondaryStagesStorageList(ctx, stagesStorage, containerBackend, &commonCmdData)
if err != nil {
return err
}
cacheStagesStorageList, err := common.GetCacheStagesStorageList(ctx, containerBackend, &commonCmdData)
if err != nil {
return err
}
storageManager := manager.NewStorageManager(projectName, stagesStorage, finalStagesStorage, secondaryStagesStorageList, cacheStagesStorageList, storageLockManager)
logboek.Context(ctx).Info().LogOptionalLn()
imagesToProcess := build.NewImagesToProcess([]string{imageName}, false)
conveyorOptions, err := common.GetConveyorOptions(ctx, &commonCmdData, imagesToProcess)
if err != nil {
return err
}
conveyorWithRetry := build.NewConveyorWithRetryWrapper(werfConfig, giterminismManager, giterminismManager.ProjectDir(), projectTmpDir, ssh_agent.SSHAuthSock, containerBackend, storageManager, storageLockManager, conveyorOptions)
defer conveyorWithRetry.Terminate()
var dockerImageName string
if err := conveyorWithRetry.WithRetryBlock(ctx, func(c *build.Conveyor) error {
if common.GetRequireBuiltImages(ctx, &commonCmdData) {
if err := c.ShouldBeBuilt(ctx, build.ShouldBeBuiltOptions{}); err != nil {
return err
}
} else {
if err := c.Build(ctx, build.BuildOptions{SkipImageMetadataPublication: *commonCmdData.Dev}); err != nil {
return err
}
}
dockerImageName, err = c.GetFullImageName(ctx, imageName)
if err != nil {
return fmt.Errorf("unable to get full name for image %q: %w", imageName, err)
}
return nil
}); err != nil {
return err
}
var dockerRunArgs []string
dockerRunArgs = append(dockerRunArgs, cmdData.DockerOptions...)
dockerRunArgs = append(dockerRunArgs, dockerImageName)
dockerRunArgs = append(dockerRunArgs, cmdData.DockerCommand...)
if *commonCmdData.DryRun {
fmt.Printf("docker run %s\n", strings.Join(dockerRunArgs, " "))
return nil
} else {
return logboek.Streams().DoErrorWithoutProxyStreamDataFormatting(func() error {
return common.WithoutTerminationSignalsTrap(func() error {
return docker.CliRun_LiveOutput(ctx, dockerRunArgs...)
})
})
}
}
func safeDockerCliRmFunc(ctx context.Context, containerName string) error {
if exist, err := docker.ContainerExist(ctx, containerName); err != nil | {
return fmt.Errorf("unable to check container %s existence: %w", containerName, err)
} | conditional_block | |
run.go | command
$ werf run --docker-options="-d -p 5000:5000 --restart=always --name registry" -- /app/run.sh
# Print a resulting docker run command
$ werf run --shell --dry-run
docker run -ti --rm image-stage-test:1ffe83860127e68e893b6aece5b0b7619f903f8492a285c6410371c87018c6a0 /bin/sh`,
Annotations: map[string]string{
common.DisableOptionsInUseLineAnno: "1",
common.DocsLongMD: GetRunDocs().LongMD,
},
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
defer global_warnings.PrintGlobalWarnings(ctx)
if err := common.ProcessLogOptions(&commonCmdData); err != nil {
common.PrintHelp(cmd)
return err
}
if err := processArgs(cmd, args); err != nil {
common.PrintHelp(cmd)
return err
}
if cmdData.RawDockerOptions != "" {
cmdData.DockerOptions = strings.Fields(cmdData.RawDockerOptions)
}
if cmdData.Shell && cmdData.Bash {
return fmt.Errorf("cannot use --shell and --bash options at the same time")
}
if cmdData.Shell || cmdData.Bash {
if len(cmdData.DockerOptions) == 0 && len(cmdData.DockerCommand) == 0 {
cmdData.DockerOptions = []string{"-ti", "--rm"}
if cmdData.Shell {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/sh")
}
if cmdData.Bash {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/bash")
}
} else {
common.PrintHelp(cmd)
return fmt.Errorf("shell option cannot be used with other docker run arguments")
}
} else if len(cmdData.DockerOptions) == 0 {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--rm")
}
return runMain(ctx)
},
})
common.SetupDir(&commonCmdData, cmd)
common.SetupGitWorkTree(&commonCmdData, cmd)
common.SetupConfigTemplatesDir(&commonCmdData, cmd)
common.SetupConfigPath(&commonCmdData, cmd)
common.SetupEnvironment(&commonCmdData, cmd)
common.SetupGiterminismOptions(&commonCmdData, cmd)
common.SetupTmpDir(&commonCmdData, cmd, common.SetupTmpDirOptions{})
common.SetupHomeDir(&commonCmdData, cmd, common.SetupHomeDirOptions{})
common.SetupSSHKey(&commonCmdData, cmd)
common.SetupSecondaryStagesStorageOptions(&commonCmdData, cmd)
common.SetupCacheStagesStorageOptions(&commonCmdData, cmd)
common.SetupRepoOptions(&commonCmdData, cmd, common.RepoDataOptions{OptionalRepo: true})
common.SetupFinalRepo(&commonCmdData, cmd)
common.SetupSkipBuild(&commonCmdData, cmd)
common.SetupRequireBuiltImages(&commonCmdData, cmd)
common.SetupFollow(&commonCmdData, cmd)
common.SetupDockerConfig(&commonCmdData, cmd, "Command needs granted permissions to read and pull images from the specified repo")
common.SetupInsecureRegistry(&commonCmdData, cmd)
common.SetupInsecureHelmDependencies(&commonCmdData, cmd)
common.SetupSkipTlsVerifyRegistry(&commonCmdData, cmd)
common.SetupLogOptions(&commonCmdData, cmd)
common.SetupLogProjectDir(&commonCmdData, cmd)
common.SetupSynchronization(&commonCmdData, cmd)
common.SetupKubeConfig(&commonCmdData, cmd)
common.SetupKubeConfigBase64(&commonCmdData, cmd)
common.SetupKubeContext(&commonCmdData, cmd)
common.SetupDryRun(&commonCmdData, cmd)
common.SetupVirtualMerge(&commonCmdData, cmd)
commonCmdData.SetupPlatform(cmd)
cmd.Flags().BoolVarP(&cmdData.Shell, "shell", "", false, "Use predefined docker options and command for debug")
cmd.Flags().BoolVarP(&cmdData.Bash, "bash", "", false, "Use predefined docker options and command for debug")
cmd.Flags().StringVarP(&cmdData.RawDockerOptions, "docker-options", "", os.Getenv("WERF_DOCKER_OPTIONS"), "Define docker run options (default $WERF_DOCKER_OPTIONS)")
return cmd
}
func processArgs(cmd *cobra.Command, args []string) error | case 0:
case 1:
cmdData.ImageName = args[0]
default:
return fmt.Errorf("unsupported position args format")
}
}
return nil
}
func checkDetachDockerOption() error {
for _, value := range cmdData.DockerOptions {
if value == "-d" || value == "--detach" {
return nil
}
}
return fmt.Errorf("the container must be launched in the background (in follow mode): pass -d/--detach with --docker-options option")
}
func getContainerName() string {
for ind, value := range cmdData.DockerOptions {
if value == "--name" {
if ind+1 < len(cmdData.DockerOptions) {
return cmdData.DockerOptions[ind+1]
}
} else if strings.HasPrefix(value, "--name=") {
return strings.TrimPrefix(value, "--name=")
}
}
return ""
}
func runMain(ctx context.Context) error {
global_warnings.PostponeMultiwerfNotUpToDateWarning()
if err := werf.Init(*commonCmdData.TmpDir, *commonCmdData.HomeDir); err != nil {
return fmt.Errorf("initialization error: %w", err)
}
containerBackend, processCtx, err := common.InitProcessContainerBackend(ctx, &commonCmdData)
if err != nil {
return err
}
ctx = processCtx
gitDataManager, err := gitdata.GetHostGitDataManager(ctx)
if err != nil {
return fmt.Errorf("error getting host git data manager: %w", err)
}
if err := git_repo.Init(gitDataManager); err != nil {
return err
}
if err := image.Init(); err != nil {
return err
}
if err := lrumeta.Init(); err != nil {
return err
}
if err := true_git.Init(ctx, true_git.Options{LiveGitOutput: *commonCmdData.LogDebug}); err != nil {
return err
}
if err := common.DockerRegistryInit(ctx, &commonCmdData); err != nil {
return err
}
giterminismManager, err := common.GetGiterminismManager(ctx, &commonCmdData)
if err != nil {
return err
}
common.ProcessLogProjectDir(&commonCmdData, giterminismManager.ProjectDir())
if err := ssh_agent.Init(ctx, common.GetSSHKey(&commonCmdData)); err != nil {
return fmt.Errorf("cannot initialize ssh agent: %w", err)
}
defer func() {
err := ssh_agent.Terminate()
if err != nil {
logboek.Warn().LogF("WARNING: ssh agent termination failed: %s\n", err)
}
}()
if *commonCmdData.Follow {
if cmdData.Shell || cmdData.Bash {
return fmt.Errorf("follow mode does not work with --shell and --bash options")
}
if err := checkDetachDockerOption(); err != nil {
return err
}
containerName := getContainerName()
if containerName == "" {
return fmt.Errorf("follow mode does not work without specific container name: pass --name=CONTAINER_NAME with --docker-options option")
}
return common.FollowGitHead(ctx, &commonCmdData, func(ctx context.Context, headCommitGiterminismManager giterminism_manager.Interface) error {
if err := safeDockerCliRmFunc(ctx, containerName); err != nil {
return err
}
if err := run(ctx, containerBackend, headCommitGiterminismManager); err != nil {
return err
}
go func() {
time.Sleep(500 * time.Millisecond)
fmt.Printf("Attaching to container %s ...\n", containerName)
resp, err := docker.ContainerAttach(ctx, containerName, types.ContainerAttachOptions{
Stream: true,
Stdout: true,
Stderr: true,
Logs: true,
})
if err != nil {
_, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
}
if _, err := stdcopy.StdCopy(os.Stdout, os.Stderr, resp.Reader); err | {
doubleDashInd := cmd.ArgsLenAtDash()
doubleDashExist := cmd.ArgsLenAtDash() != -1
if doubleDashExist {
if doubleDashInd == len(args) {
return fmt.Errorf("unsupported position args format")
}
switch doubleDashInd {
case 0:
cmdData.DockerCommand = args[doubleDashInd:]
case 1:
cmdData.ImageName = args[0]
cmdData.DockerCommand = args[doubleDashInd:]
default:
return fmt.Errorf("unsupported position args format")
}
} else {
switch len(args) { | identifier_body |
run.go | command
$ werf run --docker-options="-d -p 5000:5000 --restart=always --name registry" -- /app/run.sh
# Print a resulting docker run command
$ werf run --shell --dry-run
docker run -ti --rm image-stage-test:1ffe83860127e68e893b6aece5b0b7619f903f8492a285c6410371c87018c6a0 /bin/sh`,
Annotations: map[string]string{
common.DisableOptionsInUseLineAnno: "1",
common.DocsLongMD: GetRunDocs().LongMD,
},
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
defer global_warnings.PrintGlobalWarnings(ctx)
if err := common.ProcessLogOptions(&commonCmdData); err != nil {
common.PrintHelp(cmd)
return err
}
if err := processArgs(cmd, args); err != nil {
common.PrintHelp(cmd)
return err
}
if cmdData.RawDockerOptions != "" {
cmdData.DockerOptions = strings.Fields(cmdData.RawDockerOptions)
}
if cmdData.Shell && cmdData.Bash {
return fmt.Errorf("cannot use --shell and --bash options at the same time")
}
if cmdData.Shell || cmdData.Bash {
if len(cmdData.DockerOptions) == 0 && len(cmdData.DockerCommand) == 0 {
cmdData.DockerOptions = []string{"-ti", "--rm"}
if cmdData.Shell {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/sh")
}
if cmdData.Bash {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/bash")
}
} else {
common.PrintHelp(cmd)
return fmt.Errorf("shell option cannot be used with other docker run arguments")
}
} else if len(cmdData.DockerOptions) == 0 {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--rm")
}
return runMain(ctx)
},
})
common.SetupDir(&commonCmdData, cmd)
common.SetupGitWorkTree(&commonCmdData, cmd)
common.SetupConfigTemplatesDir(&commonCmdData, cmd)
common.SetupConfigPath(&commonCmdData, cmd)
common.SetupEnvironment(&commonCmdData, cmd)
common.SetupGiterminismOptions(&commonCmdData, cmd)
common.SetupTmpDir(&commonCmdData, cmd, common.SetupTmpDirOptions{})
common.SetupHomeDir(&commonCmdData, cmd, common.SetupHomeDirOptions{})
common.SetupSSHKey(&commonCmdData, cmd)
common.SetupSecondaryStagesStorageOptions(&commonCmdData, cmd)
common.SetupCacheStagesStorageOptions(&commonCmdData, cmd)
common.SetupRepoOptions(&commonCmdData, cmd, common.RepoDataOptions{OptionalRepo: true})
common.SetupFinalRepo(&commonCmdData, cmd)
common.SetupSkipBuild(&commonCmdData, cmd)
common.SetupRequireBuiltImages(&commonCmdData, cmd)
common.SetupFollow(&commonCmdData, cmd)
common.SetupDockerConfig(&commonCmdData, cmd, "Command needs granted permissions to read and pull images from the specified repo")
common.SetupInsecureRegistry(&commonCmdData, cmd)
common.SetupInsecureHelmDependencies(&commonCmdData, cmd)
common.SetupSkipTlsVerifyRegistry(&commonCmdData, cmd)
common.SetupLogOptions(&commonCmdData, cmd)
common.SetupLogProjectDir(&commonCmdData, cmd)
common.SetupSynchronization(&commonCmdData, cmd)
common.SetupKubeConfig(&commonCmdData, cmd)
common.SetupKubeConfigBase64(&commonCmdData, cmd)
common.SetupKubeContext(&commonCmdData, cmd)
common.SetupDryRun(&commonCmdData, cmd)
common.SetupVirtualMerge(&commonCmdData, cmd)
commonCmdData.SetupPlatform(cmd)
cmd.Flags().BoolVarP(&cmdData.Shell, "shell", "", false, "Use predefined docker options and command for debug")
cmd.Flags().BoolVarP(&cmdData.Bash, "bash", "", false, "Use predefined docker options and command for debug")
cmd.Flags().StringVarP(&cmdData.RawDockerOptions, "docker-options", "", os.Getenv("WERF_DOCKER_OPTIONS"), "Define docker run options (default $WERF_DOCKER_OPTIONS)")
return cmd
}
func processArgs(cmd *cobra.Command, args []string) error {
doubleDashInd := cmd.ArgsLenAtDash()
doubleDashExist := cmd.ArgsLenAtDash() != -1
if doubleDashExist {
if doubleDashInd == len(args) {
return fmt.Errorf("unsupported position args format")
}
switch doubleDashInd {
case 0:
cmdData.DockerCommand = args[doubleDashInd:]
case 1:
cmdData.ImageName = args[0]
cmdData.DockerCommand = args[doubleDashInd:]
default:
return fmt.Errorf("unsupported position args format")
}
} else {
switch len(args) {
case 0:
case 1:
cmdData.ImageName = args[0]
default:
return fmt.Errorf("unsupported position args format")
}
}
return nil
}
func checkDetachDockerOption() error {
for _, value := range cmdData.DockerOptions {
if value == "-d" || value == "--detach" {
return nil
}
}
return fmt.Errorf("the container must be launched in the background (in follow mode): pass -d/--detach with --docker-options option")
}
func | () string {
for ind, value := range cmdData.DockerOptions {
if value == "--name" {
if ind+1 < len(cmdData.DockerOptions) {
return cmdData.DockerOptions[ind+1]
}
} else if strings.HasPrefix(value, "--name=") {
return strings.TrimPrefix(value, "--name=")
}
}
return ""
}
func runMain(ctx context.Context) error {
global_warnings.PostponeMultiwerfNotUpToDateWarning()
if err := werf.Init(*commonCmdData.TmpDir, *commonCmdData.HomeDir); err != nil {
return fmt.Errorf("initialization error: %w", err)
}
containerBackend, processCtx, err := common.InitProcessContainerBackend(ctx, &commonCmdData)
if err != nil {
return err
}
ctx = processCtx
gitDataManager, err := gitdata.GetHostGitDataManager(ctx)
if err != nil {
return fmt.Errorf("error getting host git data manager: %w", err)
}
if err := git_repo.Init(gitDataManager); err != nil {
return err
}
if err := image.Init(); err != nil {
return err
}
if err := lrumeta.Init(); err != nil {
return err
}
if err := true_git.Init(ctx, true_git.Options{LiveGitOutput: *commonCmdData.LogDebug}); err != nil {
return err
}
if err := common.DockerRegistryInit(ctx, &commonCmdData); err != nil {
return err
}
giterminismManager, err := common.GetGiterminismManager(ctx, &commonCmdData)
if err != nil {
return err
}
common.ProcessLogProjectDir(&commonCmdData, giterminismManager.ProjectDir())
if err := ssh_agent.Init(ctx, common.GetSSHKey(&commonCmdData)); err != nil {
return fmt.Errorf("cannot initialize ssh agent: %w", err)
}
defer func() {
err := ssh_agent.Terminate()
if err != nil {
logboek.Warn().LogF("WARNING: ssh agent termination failed: %s\n", err)
}
}()
if *commonCmdData.Follow {
if cmdData.Shell || cmdData.Bash {
return fmt.Errorf("follow mode does not work with --shell and --bash options")
}
if err := checkDetachDockerOption(); err != nil {
return err
}
containerName := getContainerName()
if containerName == "" {
return fmt.Errorf("follow mode does not work without specific container name: pass --name=CONTAINER_NAME with --docker-options option")
}
return common.FollowGitHead(ctx, &commonCmdData, func(ctx context.Context, headCommitGiterminismManager giterminism_manager.Interface) error {
if err := safeDockerCliRmFunc(ctx, containerName); err != nil {
return err
}
if err := run(ctx, containerBackend, headCommitGiterminismManager); err != nil {
return err
}
go func() {
time.Sleep(500 * time.Millisecond)
fmt.Printf("Attaching to container %s ...\n", containerName)
resp, err := docker.ContainerAttach(ctx, containerName, types.ContainerAttachOptions{
Stream: true,
Stdout: true,
Stderr: true,
Logs: true,
})
if err != nil {
_, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
}
if _, err := stdcopy.StdCopy(os.Stdout, os.Stderr, resp.Reader); err | getContainerName | identifier_name |
run.go | 000 --restart=always --name registry" -- /app/run.sh
# Print a resulting docker run command
$ werf run --shell --dry-run
docker run -ti --rm image-stage-test:1ffe83860127e68e893b6aece5b0b7619f903f8492a285c6410371c87018c6a0 /bin/sh`,
Annotations: map[string]string{
common.DisableOptionsInUseLineAnno: "1",
common.DocsLongMD: GetRunDocs().LongMD,
},
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
defer global_warnings.PrintGlobalWarnings(ctx)
if err := common.ProcessLogOptions(&commonCmdData); err != nil {
common.PrintHelp(cmd)
return err
}
if err := processArgs(cmd, args); err != nil {
common.PrintHelp(cmd)
return err
}
if cmdData.RawDockerOptions != "" {
cmdData.DockerOptions = strings.Fields(cmdData.RawDockerOptions)
}
if cmdData.Shell && cmdData.Bash {
return fmt.Errorf("cannot use --shell and --bash options at the same time")
}
if cmdData.Shell || cmdData.Bash {
if len(cmdData.DockerOptions) == 0 && len(cmdData.DockerCommand) == 0 {
cmdData.DockerOptions = []string{"-ti", "--rm"}
if cmdData.Shell {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/sh")
}
if cmdData.Bash {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--entrypoint=/bin/bash")
}
} else {
common.PrintHelp(cmd)
return fmt.Errorf("shell option cannot be used with other docker run arguments")
}
} else if len(cmdData.DockerOptions) == 0 {
cmdData.DockerOptions = append(cmdData.DockerOptions, "--rm")
}
return runMain(ctx)
},
})
common.SetupDir(&commonCmdData, cmd)
common.SetupGitWorkTree(&commonCmdData, cmd)
common.SetupConfigTemplatesDir(&commonCmdData, cmd)
common.SetupConfigPath(&commonCmdData, cmd)
common.SetupEnvironment(&commonCmdData, cmd)
common.SetupGiterminismOptions(&commonCmdData, cmd)
common.SetupTmpDir(&commonCmdData, cmd, common.SetupTmpDirOptions{})
common.SetupHomeDir(&commonCmdData, cmd, common.SetupHomeDirOptions{})
common.SetupSSHKey(&commonCmdData, cmd)
common.SetupSecondaryStagesStorageOptions(&commonCmdData, cmd)
common.SetupCacheStagesStorageOptions(&commonCmdData, cmd)
common.SetupRepoOptions(&commonCmdData, cmd, common.RepoDataOptions{OptionalRepo: true})
common.SetupFinalRepo(&commonCmdData, cmd)
common.SetupSkipBuild(&commonCmdData, cmd)
common.SetupRequireBuiltImages(&commonCmdData, cmd)
common.SetupFollow(&commonCmdData, cmd)
common.SetupDockerConfig(&commonCmdData, cmd, "Command needs granted permissions to read and pull images from the specified repo")
common.SetupInsecureRegistry(&commonCmdData, cmd)
common.SetupInsecureHelmDependencies(&commonCmdData, cmd)
common.SetupSkipTlsVerifyRegistry(&commonCmdData, cmd)
common.SetupLogOptions(&commonCmdData, cmd)
common.SetupLogProjectDir(&commonCmdData, cmd)
common.SetupSynchronization(&commonCmdData, cmd)
common.SetupKubeConfig(&commonCmdData, cmd)
common.SetupKubeConfigBase64(&commonCmdData, cmd)
common.SetupKubeContext(&commonCmdData, cmd)
common.SetupDryRun(&commonCmdData, cmd)
common.SetupVirtualMerge(&commonCmdData, cmd)
commonCmdData.SetupPlatform(cmd)
cmd.Flags().BoolVarP(&cmdData.Shell, "shell", "", false, "Use predefined docker options and command for debug")
cmd.Flags().BoolVarP(&cmdData.Bash, "bash", "", false, "Use predefined docker options and command for debug")
cmd.Flags().StringVarP(&cmdData.RawDockerOptions, "docker-options", "", os.Getenv("WERF_DOCKER_OPTIONS"), "Define docker run options (default $WERF_DOCKER_OPTIONS)")
return cmd
}
func processArgs(cmd *cobra.Command, args []string) error {
doubleDashInd := cmd.ArgsLenAtDash()
doubleDashExist := cmd.ArgsLenAtDash() != -1
if doubleDashExist {
if doubleDashInd == len(args) {
return fmt.Errorf("unsupported position args format")
}
switch doubleDashInd {
case 0:
cmdData.DockerCommand = args[doubleDashInd:]
case 1:
cmdData.ImageName = args[0]
cmdData.DockerCommand = args[doubleDashInd:]
default:
return fmt.Errorf("unsupported position args format")
}
} else {
switch len(args) {
case 0:
case 1:
cmdData.ImageName = args[0]
default:
return fmt.Errorf("unsupported position args format")
}
}
return nil
}
func checkDetachDockerOption() error {
for _, value := range cmdData.DockerOptions {
if value == "-d" || value == "--detach" {
return nil
}
}
return fmt.Errorf("the container must be launched in the background (in follow mode): pass -d/--detach with --docker-options option")
}
func getContainerName() string {
for ind, value := range cmdData.DockerOptions {
if value == "--name" {
if ind+1 < len(cmdData.DockerOptions) {
return cmdData.DockerOptions[ind+1]
}
} else if strings.HasPrefix(value, "--name=") {
return strings.TrimPrefix(value, "--name=")
}
}
return ""
}
func runMain(ctx context.Context) error {
global_warnings.PostponeMultiwerfNotUpToDateWarning()
if err := werf.Init(*commonCmdData.TmpDir, *commonCmdData.HomeDir); err != nil {
return fmt.Errorf("initialization error: %w", err)
}
containerBackend, processCtx, err := common.InitProcessContainerBackend(ctx, &commonCmdData)
if err != nil {
return err
}
ctx = processCtx
gitDataManager, err := gitdata.GetHostGitDataManager(ctx)
if err != nil {
return fmt.Errorf("error getting host git data manager: %w", err)
}
if err := git_repo.Init(gitDataManager); err != nil {
return err
}
if err := image.Init(); err != nil {
return err
}
if err := lrumeta.Init(); err != nil {
return err
}
if err := true_git.Init(ctx, true_git.Options{LiveGitOutput: *commonCmdData.LogDebug}); err != nil {
return err
}
if err := common.DockerRegistryInit(ctx, &commonCmdData); err != nil {
return err
}
giterminismManager, err := common.GetGiterminismManager(ctx, &commonCmdData)
if err != nil {
return err
}
common.ProcessLogProjectDir(&commonCmdData, giterminismManager.ProjectDir())
if err := ssh_agent.Init(ctx, common.GetSSHKey(&commonCmdData)); err != nil {
return fmt.Errorf("cannot initialize ssh agent: %w", err)
}
defer func() {
err := ssh_agent.Terminate()
if err != nil {
logboek.Warn().LogF("WARNING: ssh agent termination failed: %s\n", err)
}
}()
if *commonCmdData.Follow {
if cmdData.Shell || cmdData.Bash {
return fmt.Errorf("follow mode does not work with --shell and --bash options")
}
if err := checkDetachDockerOption(); err != nil {
return err
}
containerName := getContainerName()
if containerName == "" {
return fmt.Errorf("follow mode does not work without specific container name: pass --name=CONTAINER_NAME with --docker-options option")
}
return common.FollowGitHead(ctx, &commonCmdData, func(ctx context.Context, headCommitGiterminismManager giterminism_manager.Interface) error {
if err := safeDockerCliRmFunc(ctx, containerName); err != nil {
return err
}
if err := run(ctx, containerBackend, headCommitGiterminismManager); err != nil {
return err
}
go func() {
time.Sleep(500 * time.Millisecond)
fmt.Printf("Attaching to container %s ...\n", containerName)
resp, err := docker.ContainerAttach(ctx, containerName, types.ContainerAttachOptions{
Stream: true,
Stdout: true,
Stderr: true,
Logs: true,
})
if err != nil {
_, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
}
if _, err := stdcopy.StdCopy(os.Stdout, os.Stderr, resp.Reader); err != nil { | _, _ = fmt.Fprintln(os.Stderr, "WARNING:", err)
} | random_line_split | |
lib.rs | /index.html):
//!
//! ```
//! use parkour::prelude::*;
//! ```
//!
//! First, create a struct containing all the data you want to parse. For
//! example:
//!
//! ```
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! struct Show {
//! pos1: String,
//! out: ColorSpace,
//! size: u8,
//! }
//!
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! `bool`, `u8` and `String` can all be parsed by default. To parse
//! `ColorSpace`, we have to implement the [`FromInputValue`] trait. This
//! easiest by using the derive macro:
//!
//! ```
//! # use parkour::prelude::*;
//! #[derive(FromInputValue)]
//! enum ColorSpace {
//! Rgb,
//! Cmy,
//! Cmyk,
//! Hsv,
//! Hsl,
//! CieLab,
//! }
//! ```
//!
//! This parses the names of the enum variants case-insensitively. When an
//! invalid value is provided, the error message will say something like:
//!
//! ```text
//! unexpected value, got `foo`, expected rgb, cmy, cmyk, hsv, hsl or cielab
//! ```
//!
//! Now let's implement `Show` as a subcommand. Unfortunately, there's no
//! convenient derive macro (yet):
//!
//! ```
//! # use parkour::prelude::*;
//! # #[derive(FromInputValue)]
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! #
//! struct Show {
//! pos1: String,
//! color_space: ColorSpace,
//! size: u8,
//! }
//!
//! impl FromInput<'static> for Show {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! if input.parse_command("show") {
//! let mut pos1 = None;
//! let mut color_space = None;
//! let mut size = None;
//!
//! while !input.is_empty() {
//! if SetOnce(&mut color_space)
//! .apply(input, &Flag::LongShort("color-space", "c").into())? {
//! continue;
//! }
//!
//! if SetOnce(&mut size)
//! .apply(input, &Flag::LongShort("size", "s").into())? {
//! continue;
//! }
//!
//! if pos1.is_none()
//! && SetPositional(&mut pos1).apply(input, &"pos1".into())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//!
//! Ok(Show {
//! pos1: pos1.ok_or_else(|| parkour::Error::missing_argument("pos1"))?,
//! color_space: color_space
//! .ok_or_else(|| parkour::Error::missing_argument("--color-space"))?,
//! size: size.unwrap_or(4),
//! })
//! } else {
//! Err(parkour::Error::no_value())
//! }
//! }
//! }
//! ```
//!
//! To parse a subcommand, we implement the [`FromInput`] trait. We first check
//! if the next argument is the word `show`. If that's the case, we iterate over
//! the remaining input, until it is empty.
//!
//! In the subcommand, we expect two named arguments (`--color-space` and
//! `--size`) and a positional argument (`pos`). Therefore, in each iteration,
//! we first check if we can parse the named arguments, and then the positional
//! argument. If none of them succeeds and there is still input left, then
//! `input.expect_empty()?` throws an error.
//!
//! Producing the `Show` struct is rather straightforward (`pos` and
//! `--color-space` are required, `--size` defaults to `4`). However, parsing
//! the values involves some type system magic. `SetOnce` and `SetPositional`
//! are [actions], they check if the referenced types can be parsed, and if so,
//! assign the parsed value to the variable automatically. They also ensure that
//! each argument is parsed at most once.
//!
//! Whenever something is parsed, a _context_ is provided that can contain
//! information about _how_ the value should be parsed. In the above example,
//! `Flag::LongShort("color-space", "c").into()` is a context that instructs the
//! parser to parse the color space after the `--color-space` or the `-c` flag.
//!
//! The main command can be implemented similarly:
//!
//! ```
//! # use parkour::prelude::*;
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! # struct Show {
//! # pos1: String, | //! # impl FromInput<'static> for Show {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # todo!()
//! # }
//! # }
//! #
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! // discard the first argument, which is the path to the executable
//! input.bump_argument().unwrap();
//!
//! let mut show = None;
//! let mut color = None;
//!
//! while !input.is_empty() {
//! if SetOnce(&mut color).apply(input, &Flag::LongShort("color", "c").into())? {
//! continue;
//! }
//!
//! if SetSubcommand(&mut show).apply(input, &())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! This is pretty self-explanatory. Now let's proceed to the main function:
//!
//! ```
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! # impl FromInput<'static> for Command {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # Ok(Command { color: None, show: None })
//! # }
//! # }
//! #
//! use std::error::Error;
//!
//! fn main() {
//! match Command::from_input(&mut parkour::parser(), &()) {
//! Ok(command) => {
//! println!("parsed successfully");
//! }
//! Err(e) if e.is_early_exit() => {}
//! Err(e) => {
//! eprint!("{}", e);
//! let mut source = e.source();
//! while let Some(s) = source {
//! eprint!(": {}", s);
//! source = s.source();
//! }
//! eprintln!();
//! }
//! }
//! }
//! ```
//!
//! The [`parser`] function creates a new parser instance, which
//! implements [`Parse`]. This is used to parse the `Command`. If it fails, we
//! print the error with its sources. I will implement a more convenient method
//! for this, I just haven't gotten around to it yet. I also plan to implement
//! ANSI color support.
//!
//! What's with the `e.is_early_exit()`, you might wonder? This error is
//! returned when parsing was aborted and can be ignored. This error can be used
//! e.g. when the `--help` flag is encountered:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while !input.is_empty() {
//! if input.parse_long_flag("help") || input.parse_short_flag("h") {
//! println!("Usage:\n\
//! my-program [-h,--help]\n\
//! my-program show POS1 -c,--color-space VALUE [-s,--size N]");
//!
//! return Err(parkour::Error::early_exit());
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! There is one special case that isn't handled yet: The argument `--` usually
//! causes the remaining tokens to be treated as positional arguments, even if
//! they start with a dash. This is easily implemented:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! | //! # color_space: ColorSpace,
//! # size: u8,
//! # } | random_line_split |
lib.rs | , cmyk, hsv, hsl or cielab
//! ```
//!
//! Now let's implement `Show` as a subcommand. Unfortunately, there's no
//! convenient derive macro (yet):
//!
//! ```
//! # use parkour::prelude::*;
//! # #[derive(FromInputValue)]
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! #
//! struct Show {
//! pos1: String,
//! color_space: ColorSpace,
//! size: u8,
//! }
//!
//! impl FromInput<'static> for Show {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! if input.parse_command("show") {
//! let mut pos1 = None;
//! let mut color_space = None;
//! let mut size = None;
//!
//! while !input.is_empty() {
//! if SetOnce(&mut color_space)
//! .apply(input, &Flag::LongShort("color-space", "c").into())? {
//! continue;
//! }
//!
//! if SetOnce(&mut size)
//! .apply(input, &Flag::LongShort("size", "s").into())? {
//! continue;
//! }
//!
//! if pos1.is_none()
//! && SetPositional(&mut pos1).apply(input, &"pos1".into())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//!
//! Ok(Show {
//! pos1: pos1.ok_or_else(|| parkour::Error::missing_argument("pos1"))?,
//! color_space: color_space
//! .ok_or_else(|| parkour::Error::missing_argument("--color-space"))?,
//! size: size.unwrap_or(4),
//! })
//! } else {
//! Err(parkour::Error::no_value())
//! }
//! }
//! }
//! ```
//!
//! To parse a subcommand, we implement the [`FromInput`] trait. We first check
//! if the next argument is the word `show`. If that's the case, we iterate over
//! the remaining input, until it is empty.
//!
//! In the subcommand, we expect two named arguments (`--color-space` and
//! `--size`) and a positional argument (`pos`). Therefore, in each iteration,
//! we first check if we can parse the named arguments, and then the positional
//! argument. If none of them succeeds and there is still input left, then
//! `input.expect_empty()?` throws an error.
//!
//! Producing the `Show` struct is rather straightforward (`pos` and
//! `--color-space` are required, `--size` defaults to `4`). However, parsing
//! the values involves some type system magic. `SetOnce` and `SetPositional`
//! are [actions], they check if the referenced types can be parsed, and if so,
//! assign the parsed value to the variable automatically. They also ensure that
//! each argument is parsed at most once.
//!
//! Whenever something is parsed, a _context_ is provided that can contain
//! information about _how_ the value should be parsed. In the above example,
//! `Flag::LongShort("color-space", "c").into()` is a context that instructs the
//! parser to parse the color space after the `--color-space` or the `-c` flag.
//!
//! The main command can be implemented similarly:
//!
//! ```
//! # use parkour::prelude::*;
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! # struct Show {
//! # pos1: String,
//! # color_space: ColorSpace,
//! # size: u8,
//! # }
//! # impl FromInput<'static> for Show {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # todo!()
//! # }
//! # }
//! #
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! // discard the first argument, which is the path to the executable
//! input.bump_argument().unwrap();
//!
//! let mut show = None;
//! let mut color = None;
//!
//! while !input.is_empty() {
//! if SetOnce(&mut color).apply(input, &Flag::LongShort("color", "c").into())? {
//! continue;
//! }
//!
//! if SetSubcommand(&mut show).apply(input, &())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! This is pretty self-explanatory. Now let's proceed to the main function:
//!
//! ```
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! # impl FromInput<'static> for Command {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # Ok(Command { color: None, show: None })
//! # }
//! # }
//! #
//! use std::error::Error;
//!
//! fn main() {
//! match Command::from_input(&mut parkour::parser(), &()) {
//! Ok(command) => {
//! println!("parsed successfully");
//! }
//! Err(e) if e.is_early_exit() => {}
//! Err(e) => {
//! eprint!("{}", e);
//! let mut source = e.source();
//! while let Some(s) = source {
//! eprint!(": {}", s);
//! source = s.source();
//! }
//! eprintln!();
//! }
//! }
//! }
//! ```
//!
//! The [`parser`] function creates a new parser instance, which
//! implements [`Parse`]. This is used to parse the `Command`. If it fails, we
//! print the error with its sources. I will implement a more convenient method
//! for this, I just haven't gotten around to it yet. I also plan to implement
//! ANSI color support.
//!
//! What's with the `e.is_early_exit()`, you might wonder? This error is
//! returned when parsing was aborted and can be ignored. This error can be used
//! e.g. when the `--help` flag is encountered:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while !input.is_empty() {
//! if input.parse_long_flag("help") || input.parse_short_flag("h") {
//! println!("Usage:\n\
//! my-program [-h,--help]\n\
//! my-program show POS1 -c,--color-space VALUE [-s,--size N]");
//!
//! return Err(parkour::Error::early_exit());
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! There is one special case that isn't handled yet: The argument `--` usually
//! causes the remaining tokens to be treated as positional arguments, even if
//! they start with a dash. This is easily implemented:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while !input.is_empty() {
//! if input.parse_long_flag("") {
//! input.set_ignore_dashes(true);
//! continue;
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! Unfortunately, this must be repeated in every subcommand.
#![forbid(unsafe_code)]
#![warn(missing_docs)]
pub use error::{Error, ErrorInner};
pub use from_input::{FromInput, FromInputValue};
pub use parse::Parse;
pub use palex::ArgsInput;
#[cfg(feature = "derive")]
pub use parkour_derive::{FromInput, FromInputValue};
pub mod actions;
mod error;
mod from_input;
pub mod help;
pub mod impls;
mod parse;
pub mod util;
/// A parkour result.
pub type Result<T> = std::result::Result<T, Error>;
/// Create a new parser, which can be used to parse the
/// command-line arguments of the program.
pub fn | parser | identifier_name | |
lib.rs |
//! ```
//!
//! Now let's implement `Show` as a subcommand. Unfortunately, there's no
//! convenient derive macro (yet):
//!
//! ```
//! # use parkour::prelude::*;
//! # #[derive(FromInputValue)]
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! #
//! struct Show {
//! pos1: String,
//! color_space: ColorSpace,
//! size: u8,
//! }
//!
//! impl FromInput<'static> for Show {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! if input.parse_command("show") {
//! let mut pos1 = None;
//! let mut color_space = None;
//! let mut size = None;
//!
//! while !input.is_empty() {
//! if SetOnce(&mut color_space)
//! .apply(input, &Flag::LongShort("color-space", "c").into())? {
//! continue;
//! }
//!
//! if SetOnce(&mut size)
//! .apply(input, &Flag::LongShort("size", "s").into())? {
//! continue;
//! }
//!
//! if pos1.is_none()
//! && SetPositional(&mut pos1).apply(input, &"pos1".into())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//!
//! Ok(Show {
//! pos1: pos1.ok_or_else(|| parkour::Error::missing_argument("pos1"))?,
//! color_space: color_space
//! .ok_or_else(|| parkour::Error::missing_argument("--color-space"))?,
//! size: size.unwrap_or(4),
//! })
//! } else {
//! Err(parkour::Error::no_value())
//! }
//! }
//! }
//! ```
//!
//! To parse a subcommand, we implement the [`FromInput`] trait. We first check
//! if the next argument is the word `show`. If that's the case, we iterate over
//! the remaining input, until it is empty.
//!
//! In the subcommand, we expect two named arguments (`--color-space` and
//! `--size`) and a positional argument (`pos`). Therefore, in each iteration,
//! we first check if we can parse the named arguments, and then the positional
//! argument. If none of them succeeds and there is still input left, then
//! `input.expect_empty()?` throws an error.
//!
//! Producing the `Show` struct is rather straightforward (`pos` and
//! `--color-space` are required, `--size` defaults to `4`). However, parsing
//! the values involves some type system magic. `SetOnce` and `SetPositional`
//! are [actions], they check if the referenced types can be parsed, and if so,
//! assign the parsed value to the variable automatically. They also ensure that
//! each argument is parsed at most once.
//!
//! Whenever something is parsed, a _context_ is provided that can contain
//! information about _how_ the value should be parsed. In the above example,
//! `Flag::LongShort("color-space", "c").into()` is a context that instructs the
//! parser to parse the color space after the `--color-space` or the `-c` flag.
//!
//! The main command can be implemented similarly:
//!
//! ```
//! # use parkour::prelude::*;
//! # enum ColorSpace { Rgb, Cmy, Cmyk, Hsv, Hsl, CieLab }
//! # struct Show {
//! # pos1: String,
//! # color_space: ColorSpace,
//! # size: u8,
//! # }
//! # impl FromInput<'static> for Show {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # todo!()
//! # }
//! # }
//! #
//! struct Command {
//! color: Option<bool>,
//! show: Option<Show>,
//! }
//!
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! // discard the first argument, which is the path to the executable
//! input.bump_argument().unwrap();
//!
//! let mut show = None;
//! let mut color = None;
//!
//! while !input.is_empty() {
//! if SetOnce(&mut color).apply(input, &Flag::LongShort("color", "c").into())? {
//! continue;
//! }
//!
//! if SetSubcommand(&mut show).apply(input, &())? {
//! continue;
//! }
//!
//! input.expect_empty()?;
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! This is pretty self-explanatory. Now let's proceed to the main function:
//!
//! ```
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! # impl FromInput<'static> for Command {
//! # type Context = ();
//! # fn from_input(input: &mut ArgsInput, _: &()) -> parkour::Result<Self> {
//! # Ok(Command { color: None, show: None })
//! # }
//! # }
//! #
//! use std::error::Error;
//!
//! fn main() {
//! match Command::from_input(&mut parkour::parser(), &()) {
//! Ok(command) => {
//! println!("parsed successfully");
//! }
//! Err(e) if e.is_early_exit() => {}
//! Err(e) => {
//! eprint!("{}", e);
//! let mut source = e.source();
//! while let Some(s) = source {
//! eprint!(": {}", s);
//! source = s.source();
//! }
//! eprintln!();
//! }
//! }
//! }
//! ```
//!
//! The [`parser`] function creates a new parser instance, which
//! implements [`Parse`]. This is used to parse the `Command`. If it fails, we
//! print the error with its sources. I will implement a more convenient method
//! for this, I just haven't gotten around to it yet. I also plan to implement
//! ANSI color support.
//!
//! What's with the `e.is_early_exit()`, you might wonder? This error is
//! returned when parsing was aborted and can be ignored. This error can be used
//! e.g. when the `--help` flag is encountered:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while !input.is_empty() {
//! if input.parse_long_flag("help") || input.parse_short_flag("h") {
//! println!("Usage:\n\
//! my-program [-h,--help]\n\
//! my-program show POS1 -c,--color-space VALUE [-s,--size N]");
//!
//! return Err(parkour::Error::early_exit());
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! There is one special case that isn't handled yet: The argument `--` usually
//! causes the remaining tokens to be treated as positional arguments, even if
//! they start with a dash. This is easily implemented:
//!
//! ```no_run
//! # use parkour::prelude::*;
//! # struct Command {
//! # color: Option<bool>,
//! # show: Option<()>,
//! # }
//! impl FromInput<'static> for Command {
//! type Context = ();
//!
//! fn from_input(input: &mut ArgsInput, _: &()) -> Result<Self, parkour::Error> {
//! # let color = None;
//! # let show = None;
//! // <snip>
//! while !input.is_empty() {
//! if input.parse_long_flag("") {
//! input.set_ignore_dashes(true);
//! continue;
//! }
//!
//! // <snip>
//! }
//! Ok(Command { show, color })
//! }
//! }
//! ```
//!
//! Unfortunately, this must be repeated in every subcommand.
#![forbid(unsafe_code)]
#![warn(missing_docs)]
pub use error::{Error, ErrorInner};
pub use from_input::{FromInput, FromInputValue};
pub use parse::Parse;
pub use palex::ArgsInput;
#[cfg(feature = "derive")]
pub use parkour_derive::{FromInput, FromInputValue};
pub mod actions;
mod error;
mod from_input;
pub mod help;
pub mod impls;
mod parse;
pub mod util;
/// A parkour result.
pub type Result<T> = std::result::Result<T, Error>;
/// Create a new parser, which can be used to parse the
/// command-line arguments of the program.
pub fn parser() -> ArgsInput | {
ArgsInput::from_args()
} | identifier_body | |
wikipedia.py | fsan Sarikaya.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Wikipedia search support to the auto-tagger. Requires the
BeautifulSoup library.
"""
from __future__ import division, absolute_import, print_function
from beets.autotag.hooks import Distance
from beets.plugins import BeetsPlugin
from requests.exceptions import ConnectionError
import time
import urllib.request
from bs4 import BeautifulSoup
info_boxList = ['Released', 'Genre', 'Length', 'Label']
# -----------------------------------------------------------------------
# is Info Box check
def is_in_info(liste):
for i in info_boxList:
if(len(liste) != 0 and liste[0] == i):
return True
return False
def get_track_length(duration):
"""
Returns the track length in seconds for a wiki duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
# ----------------------------------------------------------------------
"""
WikiAlbum class is being served like AlbumInfo object which keeps all album
meta data in itself.
"""
class WikiAlbum(object):
def __init__(self, artist, album_name):
self.album = album_name
self.artist = artist
self.tracks = []
self.album_length = ""
self.label = None
self.year = None
self.data_source = "Wikipedia"
self.data_url = ""
self.album_id = 1
self.va = False
self.artist_id = 1
self.asin = None
self.albumtype = None
self.year = None
self.month = None
self.day = None
self.mediums = 1
self.artist_sort = None
self.releasegroup_id = None
self.catalognum = None
self.script = None
self.language = None
self.country = None
self.albumstatus = None
self.media = None
self.albumdisambig = None
self.artist_credit = None
self.original_year = None
self.original_month = None
self.original_day = None
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
# in case of album name has (Deluxe) extension
url = 'https://en.wikipedia.org/wiki/' +\
album_name[:-9] + '_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
raise urllib.error.HTTPError
except ConnectionError:
raise ConnectionError
self.data_url = url
soup = BeautifulSoup(html, "lxml")
# ------------------ INFOBOX PARSING ----------------------#
info_box = soup.findAll("table", {"class": "infobox"})
info_counter = 1
for info in info_box:
for row in info.findAll("tr"):
if (self.artist == "" and info_counter == 3):
self.artist = row.getText().split()[-1]
data = (row.getText()).split('\n')
data = list(filter(None, data))
if (is_in_info(data)):
if(data[0] == 'Label'):
self.label = str(data[1:])
elif(data[0] == 'Released'):
if (data[1][-1] == ")"):
self.year = int(data[1][-11:-7])
self.month = int(data[1][-6:-4])
self.day = int(data[1][-3:-1])
else:
self.year = int(data[1][-4:])
# Album length which is converted into beets length format
elif(data[0] == "Length"):
self.album_length = get_track_length(data[1])
# getting Genre
elif(data[0] == "Genre"):
fixed_genre = ""
for character in data[1]:
if (character != ("[" or "{" or "(")):
fixed_genre += character
else:
break
self.genre = fixed_genre
info_counter += 1
track_tables = soup.findAll("table", {"class": "tracklist"})
# set the MediumTotal,total number of tracks in an album is required
track_counter = 0
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct
# table data & checks track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
track_counter += 1
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct table
# data and checking track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
one_track = Track(row_data)
one_track.set_data_url(self.data_url)
one_track.set_medium_total(track_counter)
self.tracks.append(one_track)
def get_album_len(self):
return self.album_length
def get_tracks(self):
return self.tracks
# keeps the metadata of tracks which are gathered from wikipedia
# like TrackInfo object in beets
class Track(object):
def __init__(self, row):
#####
self.medium = 1
self.disctitle = "CD"
#####
self.medium_index = int(row[0][:-1])
self.track_id = int(row[0][:-1])
self.index = int(row[0][:-1])
# wiping out the character (") from track name
temp_name = ""
for i in row[1]:
|
self.title = str(temp_name)
self.writer = list(row[2].split(','))
self.producers = row[3:-1]
self.length = get_track_length(row[-1])
self.artist = None
self.artist_id = None
self.media = None
self.medium_total = None
self.artist_sort = None
self.artist_credit = None
self.data_source = "Wikipedia"
self.data_url = None
self.lyricist = None
self.composer = None
self.composer_sort = None
self.arranger = None
self.track_alt = None
self.track = self.index
self.disc = self.medium
self.disctotal = 2
self.mb_trackid = self.track_id
self.mb_albumid = None
self.mb_album_artistid = None
self.mb_artist_id = None
self.mb_releasegroupid = None
self.comp = 0
self.tracktotal = None
self.albumartist_sort = None
self.albumartist_credit = None
def set_medium_total(self, num):
self.medium_total = num
self.track_total = num
def set_data_url(self, url):
self.data_url = url
def get_name(self):
return self.title
def get_writer(self):
return self.writer
def get_producers(self):
return self.producers
def get_length(self):
return self.length
class Wikipedia(BeetsPlugin):
def __init__(self):
super(Wikipedia, self).__init__()
self.config.add({
'source_weight': 0.50
})
# ----------------------------------------------
""" Track_distance
item --> track to be matched(Item Object)
info is the TrackInfo object that proposed as a match
should return a (dist,dist_max) pair of floats indicating the distance
"""
def track_distance(self, item, info):
dist = Distance()
return dist
# ----------------------------------------------
"""
album_info --> AlbumInfo Object reflecting the album to be compared.
items --> sequence of all Item objects that will be matched
mapping --> dictionary mapping Items to TrackInfo objects
"""
def album_distance(self, items, album_info, mapping):
"""
Returns the album distance.
"""
dist = Distance()
if (album_info.data_source == 'Wikipedia'):
dist.add(' | if(i != '"'):
temp_name += i | conditional_block |
wikipedia.py | an Sarikaya.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Wikipedia search support to the auto-tagger. Requires the
BeautifulSoup library.
"""
from __future__ import division, absolute_import, print_function
from beets.autotag.hooks import Distance
from beets.plugins import BeetsPlugin
from requests.exceptions import ConnectionError
import time
import urllib.request
from bs4 import BeautifulSoup
info_boxList = ['Released', 'Genre', 'Length', 'Label']
# -----------------------------------------------------------------------
# is Info Box check
def is_in_info(liste):
for i in info_boxList:
if(len(liste) != 0 and liste[0] == i):
return True
return False
def get_track_length(duration):
"""
Returns the track length in seconds for a wiki duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
# ----------------------------------------------------------------------
"""
WikiAlbum class is being served like AlbumInfo object which keeps all album
meta data in itself.
"""
class WikiAlbum(object):
def __init__(self, artist, album_name):
self.album = album_name
self.artist = artist
self.tracks = []
self.album_length = ""
self.label = None
self.year = None
self.data_source = "Wikipedia"
self.data_url = ""
self.album_id = 1
self.va = False
self.artist_id = 1
self.asin = None
self.albumtype = None
self.year = None
self.month = None
self.day = None
self.mediums = 1
self.artist_sort = None
self.releasegroup_id = None
self.catalognum = None
self.script = None
self.language = None
self.country = None
self.albumstatus = None
self.media = None
self.albumdisambig = None
self.artist_credit = None
self.original_year = None
self.original_month = None
self.original_day = None
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
# in case of album name has (Deluxe) extension
url = 'https://en.wikipedia.org/wiki/' +\
album_name[:-9] + '_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
raise urllib.error.HTTPError
except ConnectionError:
raise ConnectionError
self.data_url = url
soup = BeautifulSoup(html, "lxml")
# ------------------ INFOBOX PARSING ----------------------#
info_box = soup.findAll("table", {"class": "infobox"})
info_counter = 1
for info in info_box:
for row in info.findAll("tr"):
if (self.artist == "" and info_counter == 3):
self.artist = row.getText().split()[-1]
data = (row.getText()).split('\n')
data = list(filter(None, data))
if (is_in_info(data)):
if(data[0] == 'Label'):
self.label = str(data[1:])
elif(data[0] == 'Released'):
if (data[1][-1] == ")"):
self.year = int(data[1][-11:-7])
self.month = int(data[1][-6:-4])
self.day = int(data[1][-3:-1])
else:
self.year = int(data[1][-4:])
# Album length which is converted into beets length format
elif(data[0] == "Length"):
self.album_length = get_track_length(data[1])
# getting Genre
elif(data[0] == "Genre"):
fixed_genre = ""
for character in data[1]:
if (character != ("[" or "{" or "(")):
fixed_genre += character
else:
break
self.genre = fixed_genre
info_counter += 1
track_tables = soup.findAll("table", {"class": "tracklist"})
# set the MediumTotal,total number of tracks in an album is required
track_counter = 0
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct
# table data & checks track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
track_counter += 1
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct table
# data and checking track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
one_track = Track(row_data)
one_track.set_data_url(self.data_url)
one_track.set_medium_total(track_counter)
self.tracks.append(one_track)
def get_album_len(self):
return self.album_length
def get_tracks(self):
return self.tracks
# keeps the metadata of tracks which are gathered from wikipedia
# like TrackInfo object in beets
class Track(object):
def __init__(self, row):
#####
self.medium = 1
self.disctitle = "CD"
#####
self.medium_index = int(row[0][:-1])
self.track_id = int(row[0][:-1])
self.index = int(row[0][:-1])
# wiping out the character (") from track name
temp_name = ""
for i in row[1]:
if(i != '"'):
temp_name += i
self.title = str(temp_name)
self.writer = list(row[2].split(','))
self.producers = row[3:-1]
self.length = get_track_length(row[-1])
self.artist = None
self.artist_id = None
self.media = None
self.medium_total = None
self.artist_sort = None
self.artist_credit = None
self.data_source = "Wikipedia"
self.data_url = None
self.lyricist = None
self.composer = None
self.composer_sort = None
self.arranger = None
self.track_alt = None
self.track = self.index
self.disc = self.medium
self.disctotal = 2
self.mb_trackid = self.track_id
self.mb_albumid = None
self.mb_album_artistid = None
self.mb_artist_id = None
self.mb_releasegroupid = None
self.comp = 0
self.tracktotal = None
self.albumartist_sort = None
self.albumartist_credit = None
def set_medium_total(self, num):
self.medium_total = num
self.track_total = num
def set_data_url(self, url):
self.data_url = url
def get_name(self):
return self.title
def get_writer(self):
return self.writer
def get_producers(self):
return self.producers
def get_length(self):
return self.length
class Wikipedia(BeetsPlugin):
def | (self):
super(Wikipedia, self).__init__()
self.config.add({
'source_weight': 0.50
})
# ----------------------------------------------
""" Track_distance
item --> track to be matched(Item Object)
info is the TrackInfo object that proposed as a match
should return a (dist,dist_max) pair of floats indicating the distance
"""
def track_distance(self, item, info):
dist = Distance()
return dist
# ----------------------------------------------
"""
album_info --> AlbumInfo Object reflecting the album to be compared.
items --> sequence of all Item objects that will be matched
mapping --> dictionary mapping Items to TrackInfo objects
"""
def album_distance(self, items, album_info, mapping):
"""
Returns the album distance.
"""
dist = Distance()
if (album_info.data_source == 'Wikipedia'):
dist.add(' | __init__ | identifier_name |
wikipedia.py | fsan Sarikaya.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Wikipedia search support to the auto-tagger. Requires the
BeautifulSoup library.
"""
from __future__ import division, absolute_import, print_function
from beets.autotag.hooks import Distance
from beets.plugins import BeetsPlugin
from requests.exceptions import ConnectionError
import time
import urllib.request
from bs4 import BeautifulSoup
info_boxList = ['Released', 'Genre', 'Length', 'Label']
# -----------------------------------------------------------------------
# is Info Box check
def is_in_info(liste):
for i in info_boxList:
if(len(liste) != 0 and liste[0] == i):
return True
return False
def get_track_length(duration):
"""
Returns the track length in seconds for a wiki duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
# ----------------------------------------------------------------------
"""
WikiAlbum class is being served like AlbumInfo object which keeps all album
meta data in itself.
"""
class WikiAlbum(object):
def __init__(self, artist, album_name):
self.album = album_name
self.artist = artist
self.tracks = []
self.album_length = ""
self.label = None
self.year = None
self.data_source = "Wikipedia"
self.data_url = ""
self.album_id = 1
self.va = False
self.artist_id = 1
self.asin = None
self.albumtype = None
self.year = None
self.month = None
self.day = None
self.mediums = 1
self.artist_sort = None
self.releasegroup_id = None
self.catalognum = None
self.script = None
self.language = None
self.country = None
self.albumstatus = None
self.media = None
self.albumdisambig = None
self.artist_credit = None
self.original_year = None
self.original_month = None
self.original_day = None
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
# in case of album name has (Deluxe) extension
url = 'https://en.wikipedia.org/wiki/' +\
album_name[:-9] + '_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
raise urllib.error.HTTPError
except ConnectionError:
raise ConnectionError
self.data_url = url
soup = BeautifulSoup(html, "lxml")
# ------------------ INFOBOX PARSING ----------------------#
info_box = soup.findAll("table", {"class": "infobox"})
info_counter = 1
for info in info_box:
for row in info.findAll("tr"):
if (self.artist == "" and info_counter == 3):
self.artist = row.getText().split()[-1]
data = (row.getText()).split('\n')
data = list(filter(None, data))
if (is_in_info(data)):
if(data[0] == 'Label'):
self.label = str(data[1:])
elif(data[0] == 'Released'):
if (data[1][-1] == ")"):
self.year = int(data[1][-11:-7])
self.month = int(data[1][-6:-4])
self.day = int(data[1][-3:-1])
else:
self.year = int(data[1][-4:])
# Album length which is converted into beets length format
elif(data[0] == "Length"):
self.album_length = get_track_length(data[1])
# getting Genre
elif(data[0] == "Genre"):
fixed_genre = ""
for character in data[1]:
if (character != ("[" or "{" or "(")):
fixed_genre += character
else:
break
self.genre = fixed_genre
info_counter += 1
track_tables = soup.findAll("table", {"class": "tracklist"})
# set the MediumTotal,total number of tracks in an album is required
track_counter = 0
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct
# table data & checks track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
track_counter += 1
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct table
# data and checking track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
one_track = Track(row_data)
one_track.set_data_url(self.data_url)
one_track.set_medium_total(track_counter)
self.tracks.append(one_track)
def get_album_len(self):
return self.album_length
def get_tracks(self):
return self.tracks
# keeps the metadata of tracks which are gathered from wikipedia
# like TrackInfo object in beets
class Track(object):
def __init__(self, row):
#####
self.medium = 1
self.disctitle = "CD"
#####
self.medium_index = int(row[0][:-1])
self.track_id = int(row[0][:-1])
self.index = int(row[0][:-1])
# wiping out the character (") from track name
temp_name = ""
for i in row[1]:
if(i != '"'):
temp_name += i
self.title = str(temp_name)
self.writer = list(row[2].split(','))
self.producers = row[3:-1]
self.length = get_track_length(row[-1])
self.artist = None
self.artist_id = None
self.media = None
self.medium_total = None
self.artist_sort = None
self.artist_credit = None
self.data_source = "Wikipedia"
self.data_url = None
self.lyricist = None
self.composer = None
self.composer_sort = None
self.arranger = None
self.track_alt = None
self.track = self.index
self.disc = self.medium
self.disctotal = 2
self.mb_trackid = self.track_id
self.mb_albumid = None
self.mb_album_artistid = None
self.mb_artist_id = None
self.mb_releasegroupid = None
self.comp = 0
self.tracktotal = None
self.albumartist_sort = None
self.albumartist_credit = None
def set_medium_total(self, num):
self.medium_total = num
self.track_total = num
def set_data_url(self, url):
self.data_url = url
def get_name(self):
return self.title
def get_writer(self):
return self.writer
def get_producers(self):
return self.producers
def get_length(self):
return self.length
class Wikipedia(BeetsPlugin):
def __init__(self):
super(Wikipedia, self).__init__()
self.config.add({
'source_weight': 0.50
})
# ----------------------------------------------
""" Track_distance
item --> track to be matched(Item Object)
info is the TrackInfo object that proposed as a match
should return a (dist,dist_max) pair of floats indicating the distance
"""
def track_distance(self, item, info):
dist = Distance()
return dist
# ----------------------------------------------
"""
album_info --> AlbumInfo Object reflecting the album to be compared.
items --> sequence of all Item objects that will be matched
mapping --> dictionary mapping Items to TrackInfo objects
"""
def album_distance(self, items, album_info, mapping): | Returns the album distance.
"""
dist = Distance()
if (album_info.data_source == 'Wikipedia'):
dist.add(' | """ | random_line_split |
wikipedia.py | fsan Sarikaya.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Wikipedia search support to the auto-tagger. Requires the
BeautifulSoup library.
"""
from __future__ import division, absolute_import, print_function
from beets.autotag.hooks import Distance
from beets.plugins import BeetsPlugin
from requests.exceptions import ConnectionError
import time
import urllib.request
from bs4 import BeautifulSoup
info_boxList = ['Released', 'Genre', 'Length', 'Label']
# -----------------------------------------------------------------------
# is Info Box check
def is_in_info(liste):
for i in info_boxList:
if(len(liste) != 0 and liste[0] == i):
return True
return False
def get_track_length(duration):
"""
Returns the track length in seconds for a wiki duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
# ----------------------------------------------------------------------
"""
WikiAlbum class is being served like AlbumInfo object which keeps all album
meta data in itself.
"""
class WikiAlbum(object):
def __init__(self, artist, album_name):
self.album = album_name
self.artist = artist
self.tracks = []
self.album_length = ""
self.label = None
self.year = None
self.data_source = "Wikipedia"
self.data_url = ""
self.album_id = 1
self.va = False
self.artist_id = 1
self.asin = None
self.albumtype = None
self.year = None
self.month = None
self.day = None
self.mediums = 1
self.artist_sort = None
self.releasegroup_id = None
self.catalognum = None
self.script = None
self.language = None
self.country = None
self.albumstatus = None
self.media = None
self.albumdisambig = None
self.artist_credit = None
self.original_year = None
self.original_month = None
self.original_day = None
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name + \
'_(album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
url = 'https://en.wikipedia.org/wiki/' + album_name
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
try:
# in case of album name has (Deluxe) extension
url = 'https://en.wikipedia.org/wiki/' +\
album_name[:-9] + '_(' + artist + '_album)'
html = urllib.request.urlopen(url).read()
except urllib.error.HTTPError:
raise urllib.error.HTTPError
except ConnectionError:
raise ConnectionError
self.data_url = url
soup = BeautifulSoup(html, "lxml")
# ------------------ INFOBOX PARSING ----------------------#
info_box = soup.findAll("table", {"class": "infobox"})
info_counter = 1
for info in info_box:
for row in info.findAll("tr"):
if (self.artist == "" and info_counter == 3):
self.artist = row.getText().split()[-1]
data = (row.getText()).split('\n')
data = list(filter(None, data))
if (is_in_info(data)):
if(data[0] == 'Label'):
self.label = str(data[1:])
elif(data[0] == 'Released'):
if (data[1][-1] == ")"):
self.year = int(data[1][-11:-7])
self.month = int(data[1][-6:-4])
self.day = int(data[1][-3:-1])
else:
self.year = int(data[1][-4:])
# Album length which is converted into beets length format
elif(data[0] == "Length"):
self.album_length = get_track_length(data[1])
# getting Genre
elif(data[0] == "Genre"):
fixed_genre = ""
for character in data[1]:
if (character != ("[" or "{" or "(")):
fixed_genre += character
else:
break
self.genre = fixed_genre
info_counter += 1
track_tables = soup.findAll("table", {"class": "tracklist"})
# set the MediumTotal,total number of tracks in an album is required
track_counter = 0
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct
# table data & checks track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
track_counter += 1
for table in track_tables:
for row in table.findAll("tr"):
row_data = (row.getText()).split('\n')
row_data = list(filter(None, row_data))
# pick only the tracks not irrelevant parts of the tables.
# len(row_data) check is used for getting the correct table
# data and checking track numbers whether it is exist or not
if (row_data[0][:-1].isdigit() and len(row_data) > 3):
one_track = Track(row_data)
one_track.set_data_url(self.data_url)
one_track.set_medium_total(track_counter)
self.tracks.append(one_track)
def get_album_len(self):
return self.album_length
def get_tracks(self):
return self.tracks
# keeps the metadata of tracks which are gathered from wikipedia
# like TrackInfo object in beets
class Track(object):
def __init__(self, row):
#####
self.medium = 1
self.disctitle = "CD"
#####
self.medium_index = int(row[0][:-1])
self.track_id = int(row[0][:-1])
self.index = int(row[0][:-1])
# wiping out the character (") from track name
temp_name = ""
for i in row[1]:
if(i != '"'):
temp_name += i
self.title = str(temp_name)
self.writer = list(row[2].split(','))
self.producers = row[3:-1]
self.length = get_track_length(row[-1])
self.artist = None
self.artist_id = None
self.media = None
self.medium_total = None
self.artist_sort = None
self.artist_credit = None
self.data_source = "Wikipedia"
self.data_url = None
self.lyricist = None
self.composer = None
self.composer_sort = None
self.arranger = None
self.track_alt = None
self.track = self.index
self.disc = self.medium
self.disctotal = 2
self.mb_trackid = self.track_id
self.mb_albumid = None
self.mb_album_artistid = None
self.mb_artist_id = None
self.mb_releasegroupid = None
self.comp = 0
self.tracktotal = None
self.albumartist_sort = None
self.albumartist_credit = None
def set_medium_total(self, num):
self.medium_total = num
self.track_total = num
def set_data_url(self, url):
self.data_url = url
def get_name(self):
return self.title
def get_writer(self):
|
def get_producers(self):
return self.producers
def get_length(self):
return self.length
class Wikipedia(BeetsPlugin):
def __init__(self):
super(Wikipedia, self).__init__()
self.config.add({
'source_weight': 0.50
})
# ----------------------------------------------
""" Track_distance
item --> track to be matched(Item Object)
info is the TrackInfo object that proposed as a match
should return a (dist,dist_max) pair of floats indicating the distance
"""
def track_distance(self, item, info):
dist = Distance()
return dist
# ----------------------------------------------
"""
album_info --> AlbumInfo Object reflecting the album to be compared.
items --> sequence of all Item objects that will be matched
mapping --> dictionary mapping Items to TrackInfo objects
"""
def album_distance(self, items, album_info, mapping):
"""
Returns the album distance.
"""
dist = Distance()
if (album_info.data_source == 'Wikipedia'):
dist.add(' | return self.writer | identifier_body |
import-docs.py | 8')
rowTrm = rowStr.strip()
rowLwr = rowTrm.lower()
gold_actions.append(rowLwr)
gold_initiators = []
for row in C[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_initiators.append(rowTrm)
gold_targets = []
for row in D[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_targets.append(rowTrm)
gold_months = []
for row in E[1:]:
try:
gold_months.append(int(row))
except:
gold_months.append(1) # default to January
gold_days = []
for row in F[1:]:
try:
gold_days.append(int(row))
except:
gold_days.append(1) # default to first of month
gold_years = []
for row in G[1:]:
try:
gold_years.append(int(row))
except:
gold_years.append(None)
# gs_mids is a dictionary of document keys and their integer MID levels
gs_mids = {}
gs_actions = {}
gs_initiators = {}
gs_targets = {}
gs_stdates = {}
# loop through the spreadsheet columns, selecting the highest MID for each doc
for i in range(0,len(gold_actions)):
thisAction = gold_actions[i]
if (gold_actions[i] is not None) and (gold_actions[i] is not ""):
# convert string to int (e.g., 'Clash (17)' to 17)
junk,mid_level = thisAction.split('(')
mid_level,junk = mid_level.split(')')
mid_level = int(mid_level)
# use key to check if this document already has an MID level
if gold_keys[i] in gs_mids:
if mid_level > gold_keys[i]:
update_dict = True
else:
update_dict = False
else:
update_dict = True
# if the key is undefined or the new mid_level is higher
# then update the dictionary
if update_dict:
gs_mids[gold_keys[i]] = mid_level
gs_actions[gold_keys[i]] = gold_actions[i]
gs_initiators[gold_keys[i]] = gold_initiators[i]
gs_targets[gold_keys[i]] = gold_targets[i]
if gold_years[i] is not None:
gs_stdates[gold_keys[i]] = date(gold_years[i],gold_months[i],gold_days[i])
else:
gs_stdates[gold_keys[i]] = None
if len(sys.argv)>1:
in_file = sys.argv[1]
# db structure
# Docs table (ID, filename, title, timestamp, source, svm(float), text)
# Hits table (primary key: ID, char: AMT_HitID, foreignkey: doc)
from models import *
assignments.drop(engine, checkfirst=True)
docs.drop(engine, checkfirst=True)
metadata.create_all(engine) # create if it doesn't exist yet
def | (file):
raw = file.readline()
if not raw:
raise EOFError()
if raw.find('-------------------------------------------------')>=0:
raise ValueError()
return raw.decode(fileencoding)
file = open(in_file, 'r')
readdocs = []
count = 0
while file:
meta = {}
try:
while True:
# read meta-data
dataline = nextline(file).rstrip()
if '>>>>>>>' in dataline:
break
elif ':' in dataline:
# split at the first colon
tag,value = dataline.split(':',1)
tag = tag.lower()
tag = tag.strip()
tag = tag.replace(' ','_')
# the Countries meta-data is represented as a list of
# ('Country', freq) pairs, where 'Country' is the name
# of the country and freq is the frequency it occurs in
# the document. We need to parse this and select the 5
# most frequent countries for inclusion in the database
if tag == 'countries':
valueList = value.strip().split(')')
countryList = []
for countryFreq in valueList:
countryTuple = tuple(countryFreq.strip()[1:].split(','))
if countryTuple[0] != '':
country = countryTuple[0]
freq = int(countryTuple[1])
countryList += [[country,freq]]
# sort countries in descending order by frequency
countryList.sort(key=itemgetter(1),reverse=True)
# get the 5 most frequent countries
# or all countries if there are less than five
for index in range(1, min(5, len(countryList)) + 1):
(country, freq) = countryList[index - 1]
meta['country'+str(index)] = country.strip("'")
else:
# add tag and value to the dictionary
meta[tag] = value.strip()
# get text of document
text = u""
while True:
t = nextline(file)
if t.find('<<<<<<<<<<<<<<<')>=0:
break
t = t.replace('\r\n','\n').replace('\r','\n').rstrip()
text += t + u"\n"
meta['text'] = text
# get NER and NELL-CAT from pipeline data
# open pipeline data file (HTML version)
# store the frequency of each NELL-CAT / NER location in document
#try:
locFreq = {}
counted = [False]*len(text)
with open('/data/mid/docs/' + meta['key'].strip()) as nell_file:
for line in nell_file:
if line.startswith('Type: ner'):
if line.find('Value: LOCATION') > -1:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('Type: mid-attr'):
# parse out list of MID actions
# and the pipeline's confidence in each
# from the HTML output of the pipeline
preamble,values = line.split('Value: ',1)
valueList = values.split(',')
valueDict = {}
for value in valueList:
key,num = value.split(':',1)
if num.find('<') > -1:
num,html = num.split('<',1)
valueDict[key] = float(num)
# filter out actions other than those on
# our list of actions we care about
actionDict = {}
for action in mid_action_types:
# if there is a defined strength for this action
if action in valueDict:
actionDict[action] = valueDict[action]
else:
actionDict[action] = 0
# Set 'NO_MILITARIZED_ACTION' to at least threshold
if actionDict['NO_MILITARIZED_ACTION'] < MID_Threshold:
actionDict['NO_MILITARIZED_ACTION'] = MID_Threshold
elif line.startswith('Type: nell-cat'):
isLoc = False
if line.find('Value: location') > -1:
isLoc = True
if line.find('Value: country') > -1:
isLoc = True
if line.find('Value: geo') > -1:
isLoc = True
if isLoc:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('<li><div class="annotation"'):
spanList = line.split(' ')
for span in spanList:
if span.startswith('spanStart='):
itemList = span.split('"')
spanStart = int(itemList[1])
elif span.startswith('spanEnd='):
itemList = span.split('"')
spanEnd = int(itemList[1])
# add mid action classification to document data
# pick the top 4 most confident classifications
# if the 4th classification is below threshold,
# suggest "No militarized action" instead
bestAction = sorted(actionDict, key=actionDict.get, reverse=True)
for index in range(1,5):
meta['mid_attr'+str(index)] = mid_dict[bestAction[index - 1]]
# pick the top 5 most frequent locations
bestLoc = sorted(locFreq, key=locFreq.get | nextline | identifier_name |
import-docs.py | 8')
rowTrm = rowStr.strip()
rowLwr = rowTrm.lower()
gold_actions.append(rowLwr)
gold_initiators = []
for row in C[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_initiators.append(rowTrm)
gold_targets = []
for row in D[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_targets.append(rowTrm)
gold_months = []
for row in E[1:]:
try:
gold_months.append(int(row))
except:
gold_months.append(1) # default to January
gold_days = []
for row in F[1:]:
try:
gold_days.append(int(row))
except:
gold_days.append(1) # default to first of month
gold_years = []
for row in G[1:]:
try:
gold_years.append(int(row))
except:
gold_years.append(None)
# gs_mids is a dictionary of document keys and their integer MID levels
gs_mids = {}
gs_actions = {}
gs_initiators = {}
gs_targets = {}
gs_stdates = {}
# loop through the spreadsheet columns, selecting the highest MID for each doc
for i in range(0,len(gold_actions)):
thisAction = gold_actions[i]
if (gold_actions[i] is not None) and (gold_actions[i] is not ""):
# convert string to int (e.g., 'Clash (17)' to 17)
junk,mid_level = thisAction.split('(')
mid_level,junk = mid_level.split(')')
mid_level = int(mid_level)
# use key to check if this document already has an MID level
if gold_keys[i] in gs_mids:
if mid_level > gold_keys[i]:
update_dict = True
else:
update_dict = False
else:
update_dict = True
# if the key is undefined or the new mid_level is higher
# then update the dictionary
if update_dict:
gs_mids[gold_keys[i]] = mid_level
gs_actions[gold_keys[i]] = gold_actions[i]
gs_initiators[gold_keys[i]] = gold_initiators[i]
gs_targets[gold_keys[i]] = gold_targets[i]
if gold_years[i] is not None:
gs_stdates[gold_keys[i]] = date(gold_years[i],gold_months[i],gold_days[i])
else:
gs_stdates[gold_keys[i]] = None
if len(sys.argv)>1:
in_file = sys.argv[1]
# db structure
# Docs table (ID, filename, title, timestamp, source, svm(float), text)
# Hits table (primary key: ID, char: AMT_HitID, foreignkey: doc)
from models import *
assignments.drop(engine, checkfirst=True)
docs.drop(engine, checkfirst=True)
metadata.create_all(engine) # create if it doesn't exist yet
def nextline (file):
raw = file.readline()
if not raw:
raise EOFError()
if raw.find('-------------------------------------------------')>=0:
raise ValueError()
return raw.decode(fileencoding)
file = open(in_file, 'r')
readdocs = []
count = 0
while file:
meta = {}
try:
while True:
# read meta-data
dataline = nextline(file).rstrip()
if '>>>>>>>' in dataline:
break
elif ':' in dataline:
# split at the first colon
tag,value = dataline.split(':',1)
tag = tag.lower()
tag = tag.strip()
tag = tag.replace(' ','_')
# the Countries meta-data is represented as a list of
# ('Country', freq) pairs, where 'Country' is the name
# of the country and freq is the frequency it occurs in
# the document. We need to parse this and select the 5
# most frequent countries for inclusion in the database
if tag == 'countries':
valueList = value.strip().split(')')
countryList = []
for countryFreq in valueList:
countryTuple = tuple(countryFreq.strip()[1:].split(','))
if countryTuple[0] != '':
country = countryTuple[0]
freq = int(countryTuple[1])
countryList += [[country,freq]]
# sort countries in descending order by frequency
countryList.sort(key=itemgetter(1),reverse=True)
# get the 5 most frequent countries
# or all countries if there are less than five
for index in range(1, min(5, len(countryList)) + 1):
(country, freq) = countryList[index - 1]
meta['country'+str(index)] = country.strip("'")
else:
# add tag and value to the dictionary
meta[tag] = value.strip()
# get text of document
text = u""
while True:
|
meta['text'] = text
# get NER and NELL-CAT from pipeline data
# open pipeline data file (HTML version)
# store the frequency of each NELL-CAT / NER location in document
#try:
locFreq = {}
counted = [False]*len(text)
with open('/data/mid/docs/' + meta['key'].strip()) as nell_file:
for line in nell_file:
if line.startswith('Type: ner'):
if line.find('Value: LOCATION') > -1:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('Type: mid-attr'):
# parse out list of MID actions
# and the pipeline's confidence in each
# from the HTML output of the pipeline
preamble,values = line.split('Value: ',1)
valueList = values.split(',')
valueDict = {}
for value in valueList:
key,num = value.split(':',1)
if num.find('<') > -1:
num,html = num.split('<',1)
valueDict[key] = float(num)
# filter out actions other than those on
# our list of actions we care about
actionDict = {}
for action in mid_action_types:
# if there is a defined strength for this action
if action in valueDict:
actionDict[action] = valueDict[action]
else:
actionDict[action] = 0
# Set 'NO_MILITARIZED_ACTION' to at least threshold
if actionDict['NO_MILITARIZED_ACTION'] < MID_Threshold:
actionDict['NO_MILITARIZED_ACTION'] = MID_Threshold
elif line.startswith('Type: nell-cat'):
isLoc = False
if line.find('Value: location') > -1:
isLoc = True
if line.find('Value: country') > -1:
isLoc = True
if line.find('Value: geo') > -1:
isLoc = True
if isLoc:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('<li><div class="annotation"'):
spanList = line.split(' ')
for span in spanList:
if span.startswith('spanStart='):
itemList = span.split('"')
spanStart = int(itemList[1])
elif span.startswith('spanEnd='):
itemList = span.split('"')
spanEnd = int(itemList[1])
# add mid action classification to document data
# pick the top 4 most confident classifications
# if the 4th classification is below threshold,
# suggest "No militarized action" instead
bestAction = sorted(actionDict, key=actionDict.get, reverse=True)
for index in range(1,5):
meta['mid_attr'+str(index)] = mid_dict[bestAction[index - 1]]
# pick the top 5 most frequent locations
bestLoc = sorted(locFreq, key=locFreq.get | t = nextline(file)
if t.find('<<<<<<<<<<<<<<<')>=0:
break
t = t.replace('\r\n','\n').replace('\r','\n').rstrip()
text += t + u"\n" | conditional_block |
import-docs.py | 8')
rowTrm = rowStr.strip()
rowLwr = rowTrm.lower()
gold_actions.append(rowLwr)
gold_initiators = []
for row in C[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_initiators.append(rowTrm)
gold_targets = []
for row in D[1:]:
rowStr = row.encode('utf-8')
try:
rowNme,rowCOW = rowStr.split('(')
except:
rowNme = rowStr
rowTrm = rowNme.strip()
#rowUpr = rowTrm.upper()
if len(rowTrm) < 1:
rowTrm = 'None'
gold_targets.append(rowTrm)
gold_months = []
for row in E[1:]:
try:
gold_months.append(int(row))
except:
gold_months.append(1) # default to January
gold_days = []
for row in F[1:]:
try:
gold_days.append(int(row))
except:
gold_days.append(1) # default to first of month
gold_years = []
for row in G[1:]:
try:
gold_years.append(int(row))
except:
gold_years.append(None)
# gs_mids is a dictionary of document keys and their integer MID levels
gs_mids = {}
gs_actions = {}
gs_initiators = {}
gs_targets = {}
gs_stdates = {}
# loop through the spreadsheet columns, selecting the highest MID for each doc
for i in range(0,len(gold_actions)):
thisAction = gold_actions[i]
if (gold_actions[i] is not None) and (gold_actions[i] is not ""):
# convert string to int (e.g., 'Clash (17)' to 17)
junk,mid_level = thisAction.split('(')
mid_level,junk = mid_level.split(')')
mid_level = int(mid_level)
# use key to check if this document already has an MID level
if gold_keys[i] in gs_mids:
if mid_level > gold_keys[i]:
update_dict = True
else:
update_dict = False
else:
update_dict = True
# if the key is undefined or the new mid_level is higher
# then update the dictionary
if update_dict:
gs_mids[gold_keys[i]] = mid_level
gs_actions[gold_keys[i]] = gold_actions[i]
gs_initiators[gold_keys[i]] = gold_initiators[i]
gs_targets[gold_keys[i]] = gold_targets[i]
if gold_years[i] is not None:
gs_stdates[gold_keys[i]] = date(gold_years[i],gold_months[i],gold_days[i])
else:
gs_stdates[gold_keys[i]] = None
if len(sys.argv)>1:
in_file = sys.argv[1]
# db structure
# Docs table (ID, filename, title, timestamp, source, svm(float), text)
# Hits table (primary key: ID, char: AMT_HitID, foreignkey: doc)
from models import *
assignments.drop(engine, checkfirst=True)
docs.drop(engine, checkfirst=True)
metadata.create_all(engine) # create if it doesn't exist yet
def nextline (file):
|
file = open(in_file, 'r')
readdocs = []
count = 0
while file:
meta = {}
try:
while True:
# read meta-data
dataline = nextline(file).rstrip()
if '>>>>>>>' in dataline:
break
elif ':' in dataline:
# split at the first colon
tag,value = dataline.split(':',1)
tag = tag.lower()
tag = tag.strip()
tag = tag.replace(' ','_')
# the Countries meta-data is represented as a list of
# ('Country', freq) pairs, where 'Country' is the name
# of the country and freq is the frequency it occurs in
# the document. We need to parse this and select the 5
# most frequent countries for inclusion in the database
if tag == 'countries':
valueList = value.strip().split(')')
countryList = []
for countryFreq in valueList:
countryTuple = tuple(countryFreq.strip()[1:].split(','))
if countryTuple[0] != '':
country = countryTuple[0]
freq = int(countryTuple[1])
countryList += [[country,freq]]
# sort countries in descending order by frequency
countryList.sort(key=itemgetter(1),reverse=True)
# get the 5 most frequent countries
# or all countries if there are less than five
for index in range(1, min(5, len(countryList)) + 1):
(country, freq) = countryList[index - 1]
meta['country'+str(index)] = country.strip("'")
else:
# add tag and value to the dictionary
meta[tag] = value.strip()
# get text of document
text = u""
while True:
t = nextline(file)
if t.find('<<<<<<<<<<<<<<<')>=0:
break
t = t.replace('\r\n','\n').replace('\r','\n').rstrip()
text += t + u"\n"
meta['text'] = text
# get NER and NELL-CAT from pipeline data
# open pipeline data file (HTML version)
# store the frequency of each NELL-CAT / NER location in document
#try:
locFreq = {}
counted = [False]*len(text)
with open('/data/mid/docs/' + meta['key'].strip()) as nell_file:
for line in nell_file:
if line.startswith('Type: ner'):
if line.find('Value: LOCATION') > -1:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('Type: mid-attr'):
# parse out list of MID actions
# and the pipeline's confidence in each
# from the HTML output of the pipeline
preamble,values = line.split('Value: ',1)
valueList = values.split(',')
valueDict = {}
for value in valueList:
key,num = value.split(':',1)
if num.find('<') > -1:
num,html = num.split('<',1)
valueDict[key] = float(num)
# filter out actions other than those on
# our list of actions we care about
actionDict = {}
for action in mid_action_types:
# if there is a defined strength for this action
if action in valueDict:
actionDict[action] = valueDict[action]
else:
actionDict[action] = 0
# Set 'NO_MILITARIZED_ACTION' to at least threshold
if actionDict['NO_MILITARIZED_ACTION'] < MID_Threshold:
actionDict['NO_MILITARIZED_ACTION'] = MID_Threshold
elif line.startswith('Type: nell-cat'):
isLoc = False
if line.find('Value: location') > -1:
isLoc = True
if line.find('Value: country') > -1:
isLoc = True
if line.find('Value: geo') > -1:
isLoc = True
if isLoc:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('<li><div class="annotation"'):
spanList = line.split(' ')
for span in spanList:
if span.startswith('spanStart='):
itemList = span.split('"')
spanStart = int(itemList[1])
elif span.startswith('spanEnd='):
itemList = span.split('"')
spanEnd = int(itemList[1])
# add mid action classification to document data
# pick the top 4 most confident classifications
# if the 4th classification is below threshold,
# suggest "No militarized action" instead
bestAction = sorted(actionDict, key=actionDict.get, reverse=True)
for index in range(1,5):
meta['mid_attr'+str(index)] = mid_dict[bestAction[index - 1]]
# pick the top 5 most frequent locations
bestLoc = sorted(locFreq, key=locFreq.get | raw = file.readline()
if not raw:
raise EOFError()
if raw.find('-------------------------------------------------')>=0:
raise ValueError()
return raw.decode(fileencoding) | identifier_body |
import-docs.py | _actions = {}
gs_initiators = {}
gs_targets = {}
gs_stdates = {}
# loop through the spreadsheet columns, selecting the highest MID for each doc
for i in range(0,len(gold_actions)):
thisAction = gold_actions[i]
if (gold_actions[i] is not None) and (gold_actions[i] is not ""):
# convert string to int (e.g., 'Clash (17)' to 17)
junk,mid_level = thisAction.split('(')
mid_level,junk = mid_level.split(')')
mid_level = int(mid_level)
# use key to check if this document already has an MID level
if gold_keys[i] in gs_mids:
if mid_level > gold_keys[i]:
update_dict = True
else:
update_dict = False
else:
update_dict = True
# if the key is undefined or the new mid_level is higher
# then update the dictionary
if update_dict:
gs_mids[gold_keys[i]] = mid_level
gs_actions[gold_keys[i]] = gold_actions[i]
gs_initiators[gold_keys[i]] = gold_initiators[i]
gs_targets[gold_keys[i]] = gold_targets[i]
if gold_years[i] is not None:
gs_stdates[gold_keys[i]] = date(gold_years[i],gold_months[i],gold_days[i])
else:
gs_stdates[gold_keys[i]] = None
if len(sys.argv)>1:
in_file = sys.argv[1]
# db structure
# Docs table (ID, filename, title, timestamp, source, svm(float), text)
# Hits table (primary key: ID, char: AMT_HitID, foreignkey: doc)
from models import *
assignments.drop(engine, checkfirst=True)
docs.drop(engine, checkfirst=True)
metadata.create_all(engine) # create if it doesn't exist yet
def nextline (file):
raw = file.readline()
if not raw:
raise EOFError()
if raw.find('-------------------------------------------------')>=0:
raise ValueError()
return raw.decode(fileencoding)
file = open(in_file, 'r')
readdocs = []
count = 0
while file:
meta = {}
try:
while True:
# read meta-data
dataline = nextline(file).rstrip()
if '>>>>>>>' in dataline:
break
elif ':' in dataline:
# split at the first colon
tag,value = dataline.split(':',1)
tag = tag.lower()
tag = tag.strip()
tag = tag.replace(' ','_')
# the Countries meta-data is represented as a list of
# ('Country', freq) pairs, where 'Country' is the name
# of the country and freq is the frequency it occurs in
# the document. We need to parse this and select the 5
# most frequent countries for inclusion in the database
if tag == 'countries':
valueList = value.strip().split(')')
countryList = []
for countryFreq in valueList:
countryTuple = tuple(countryFreq.strip()[1:].split(','))
if countryTuple[0] != '':
country = countryTuple[0]
freq = int(countryTuple[1])
countryList += [[country,freq]]
# sort countries in descending order by frequency
countryList.sort(key=itemgetter(1),reverse=True)
# get the 5 most frequent countries
# or all countries if there are less than five
for index in range(1, min(5, len(countryList)) + 1):
(country, freq) = countryList[index - 1]
meta['country'+str(index)] = country.strip("'")
else:
# add tag and value to the dictionary
meta[tag] = value.strip()
# get text of document
text = u""
while True:
t = nextline(file)
if t.find('<<<<<<<<<<<<<<<')>=0:
break
t = t.replace('\r\n','\n').replace('\r','\n').rstrip()
text += t + u"\n"
meta['text'] = text
# get NER and NELL-CAT from pipeline data
# open pipeline data file (HTML version)
# store the frequency of each NELL-CAT / NER location in document
#try:
locFreq = {}
counted = [False]*len(text)
with open('/data/mid/docs/' + meta['key'].strip()) as nell_file:
for line in nell_file:
if line.startswith('Type: ner'):
if line.find('Value: LOCATION') > -1:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('Type: mid-attr'):
# parse out list of MID actions
# and the pipeline's confidence in each
# from the HTML output of the pipeline
preamble,values = line.split('Value: ',1)
valueList = values.split(',')
valueDict = {}
for value in valueList:
key,num = value.split(':',1)
if num.find('<') > -1:
num,html = num.split('<',1)
valueDict[key] = float(num)
# filter out actions other than those on
# our list of actions we care about
actionDict = {}
for action in mid_action_types:
# if there is a defined strength for this action
if action in valueDict:
actionDict[action] = valueDict[action]
else:
actionDict[action] = 0
# Set 'NO_MILITARIZED_ACTION' to at least threshold
if actionDict['NO_MILITARIZED_ACTION'] < MID_Threshold:
actionDict['NO_MILITARIZED_ACTION'] = MID_Threshold
elif line.startswith('Type: nell-cat'):
isLoc = False
if line.find('Value: location') > -1:
isLoc = True
if line.find('Value: country') > -1:
isLoc = True
if line.find('Value: geo') > -1:
isLoc = True
if isLoc:
if not(counted[spanStart]):
counted[spanStart] = True
location = text[spanStart:spanEnd]
if location in locFreq:
locFreq[location] = locFreq[location] + 1
else:
locFreq[location] = 1
elif line.startswith('<li><div class="annotation"'):
spanList = line.split(' ')
for span in spanList:
if span.startswith('spanStart='):
itemList = span.split('"')
spanStart = int(itemList[1])
elif span.startswith('spanEnd='):
itemList = span.split('"')
spanEnd = int(itemList[1])
# add mid action classification to document data
# pick the top 4 most confident classifications
# if the 4th classification is below threshold,
# suggest "No militarized action" instead
bestAction = sorted(actionDict, key=actionDict.get, reverse=True)
for index in range(1,5):
meta['mid_attr'+str(index)] = mid_dict[bestAction[index - 1]]
# pick the top 5 most frequent locations
bestLoc = sorted(locFreq, key=locFreq.get, reverse=True)
for index in range(1,min(5, len(bestLoc)) + 1):
meta['nelloc'+str(index)] = bestLoc[index - 1]
# add gold standard MID classification to database
gold_key = meta['gskey']
if gold_key in gs_actions:
gold_action = gs_actions[gold_key]
if gold_action in gold_dict:
gold_db = gold_dict[gold_action]
meta['gsAction'] = gold_db
else:
meta['gsAction'] = 'Unknown'
if (gs_mids[gold_key] > 6) or (gs_mids[gold_key] == 0):
meta['gsActionNum'] = gs_mids[gold_key]
else: # don't discriminate between actions 1 thru 6
meta['gsActionNum'] = 6
# convert nations into Qualtrics format
ini = gs_initiators[gold_key].lower().strip()
tar = gs_targets[gold_key].lower().strip()
if ini in nationDict:
meta['gsInitiator'] = nationDict[ini]
else:
meta['gsInitiator'] = gs_initiators[gold_key]
if tar in nationDict:
meta['gsTargets'] = nationDict[tar]
else:
meta['gsTargets'] = gs_targets[gold_key]
meta['gsDate'] = gs_stdates[gold_key]
# get ISO codes for gold standard nations
if ini in isoDict: | meta['gsInitiatorISO'] = isoDict[ini]
else:
print "ISO for " + ini + " not found." | random_line_split | |
auth.controllers.js | */
let refreshTokensByID = {}; // { manager_id: refreshToken }
// cookie setting w/ access token
const accessCookieOptions = {
// cookie expires after 90 mins from the time it is set.
expires: new Date(
Date.now() + process.env.JWT_COOKIE_ACCESS_EXPIRES * 60 * 1000
),
httpOnly: true, // for security reason it's recommended to set httpOnly to true
sameSite: true,
};
// cookie setting w/ refresh token
const refreshCookieOptions = {
// cookie expires after 3 days from the time it is set.
expires: new Date(
Date.now() + process.env.JWT_COOKIE_REFRESH_EXPIRES * 60 * 60 * 1000
),
httpOnly: true, // for security reason it's recommended to set httpOnly to true
sameSite: true,
};
/**
* Admin sign up action
*
* @param {*} req
* @param {*} res
* @returns
*/
exports.adminSignupAction = (req, res) => {
const { username, firstname, lastname, password } = req.body;
const privilegeDefault = "3";
const activeDefault = "1";
const createdOn = datetimeConverter.toMySqlDateTime(moment(new Date()));
// check username
if (!Validation.validateUsername(username)) {
return Response.sendErrorResponse({
res,
message: "Please provide a valid username",
statusCode: 400,
});
}
// check password
if (!Validation.validatePassword(password)) {
return Response.sendErrorResponse({
res,
message: "Please provide a valid password",
statusCode: 400,
});
}
// hash + salt password
const hash = Utils.hashPassword(password);
// new user info
const vals = [
username,
firstname || null,
lastname || null,
hash,
privilegeDefault,
activeDefault,
createdOn,
];
authDB
.adminSignup(req, res, vals)
.then(async (rows) => {
console.log("auth.controllers - signup - rows = ", rows);
// TODO: check and remove password if exists in response
// dbResponse = rows[0];
// delete dbResponse.password;
return Response.sendResponse({
res,
responseBody: { user: rows /*dbResponse*/ },
statusCode: 201,
message: "User successfully created",
});
})
.catch((err) => {
console.log(err, "error");
return Response.sendErrorResponse({
res,
message: error,
statusCode: 500,
});
});
};
/**
* Admin login action
*
* @param {*} req
* @param {*} res
* @returns
*/
exports.adminLoginAction = (req, res) => {
const { username, password } = req.body;
if (Validation.isEmpty(username) || !Validation.validateUsername(username)) {
return Response.sendErrorResponse({
res,
message: "Username is missing.",
statusCode: 400,
});
}
if (Validation.isEmpty(password) || !Validation.validatePassword(password)) {
return Response.sendErrorResponse({
res,
message: "Password is missing.",
statusCode: 400,
});
}
authDB
.adminLogin(req, res, username)
.then(async (results) => {
console.log("auth.controllers - login - results = ", results);
// results = [
// RowDataPacket {
// manager_id: 6,
// username: 'andy-01',
// firstname: 'Yayen',
// lastname: 'Lin',
// password: '$2a$10$LSBVvNc8IU.mA9lKHHKka.vmv./MpDVax.5XZRJoxqvqqPzFZJny6',
// privilege: '3',
// active: '1',
// createdOn: 2021-07-11T09:48:55.000Z
// }
// ]
const dbResponse = results[0];
if (!dbResponse)
return Response.sendErrorResponse({
res,
message: "Username does not exist.",
statusCode: 400,
});
if (!Validation.comparePassword(dbResponse.password, password))
return Response.sendErrorResponse({
res,
message: "The password you provided is incorrect",
statusCode: 400,
});
// login successfully
console.log("Logged in successfully!");
console.log("dbResponse", dbResponse);
// access token - give users access to protected resources
const token = Utils.generateJWT(dbResponse); // passing payload to jwt
// refresh token - allow users request new tokens
const refreshExpiry = moment()
.utc()
.add(3, "days")
.endOf("day")
.format("X");
const refreshToken = Utils.generateJWT({
exp: parseInt(refreshExpiry),
data: dbResponse.manager_id,
});
// add cookies to the response
res.cookie(process.env.JWT_ACCESS, token, accessCookieOptions);
res.cookie(process.env.JWT_REFRESH, refreshToken, refreshCookieOptions);
// add refreshToken to our refreshToken obj
refreshTokensByID[dbResponse.manager_id] = refreshToken;
console.log("added to refershTokensByID: ", refreshTokensByID);
// FIXME: create session for logged in user.
let sess = req.session;
sess.user_id = dbResponse.manager_id;
console.log("session", sess);
delete dbResponse.password; // removed password before return
return Response.sendResponse({
res,
responseBody: {
user: dbResponse,
token,
refresh: refreshToken,
},
message: "Login successful.",
});
})
.catch((err) => {
console.log(err);
return Response.sendErrorResponse({
res,
message: err,
statusCode: 500,
});
});
};
/**
* fetch logged in user info
*
* @param {*} req
* @param {*} res
* @returns current logged in user info
*/
exports.me = async (req, res) => {
const { user, token, tokenExp, toRefresh } = res;
console.log(res.user);
delete res.user.iat;
delete res.user.exp;
delete res.user.aud;
delete res.user.iss;
delete res.user.sub;
delete res.user.password;
try {
return Response.sendResponse({
res,
message: "User details successfully fetched",
responseBody: {
user: user,
token: token,
refresh: refreshTokensByID[user.manager_id],
tokenExp: tokenExp,
toRefresh: toRefresh,
},
});
} catch (error) {
console.log(error);
return Response.sendErrorResponse({
res,
message: "Unable to fetch currently logged in user information.",
statusCode: 400,
});
}
};
/**
* if access token has expired, renew the access token and call next();
* if not, call next(); directly.
*
* @param {*} req
* @param {*} res
* @param {*} next
*/
exports.refreshTokenAction = async (req, res) => {
// console.log("----------------------------------------- req");
// console.log(req.token);
// console.log(req.user);
// console.log(req.sessionID);
// console.log("----------------------------------------- res");
// console.log(res.token);
// console.log(res.user);
const { user } = req.body;
// console.log("res", res.user.manager_id);
const refresh = refreshTokensByID[user.manager_id];
// FIXME: refresh turns undefined sometimes
console.log("refresh", refresh);
// if refresh token missing
if (!refresh)
return Response.sendErrorResponse({
res,
message: "No refresh token provided.", | statusCode: 403,
});
// if refresh token expires
if (refresh) {
try {
const decoded = Utils.verifyJWT(refresh);
// {
// exp: 1626825599,
// data: 6,
// iat: 1626500973,
// aud: 'jwt-node',
// iss: 'jwt-node',
// sub: 'jwt-node'
// }
const exp = decoded.exp || null;
const now = new Date(Date.now()).getTime() / 1000;
// if no exp in decoded or id doesn't match
if (!exp || decoded.data !== user.manager_id)
return Response.sendErrorResponse({
res,
message: "Invalid refresh token.",
statusCode: 403,
});
console.log("Got here - 1");
// if refresh token expires
if (now > exp)
return Response.sendErrorResponse({
res,
message: "Refresh token expired, please log back in again.",
statusCode: 403,
});
console.log("Got here - 2");
// generate new access token using logged in user's info
const newToken = Utils.generateJWT(user);
// clear the old cookie
res.clearCookie(process.env.JWT_ACCESS);
// add the new cookie to response
res.cookie(process.env.JWT_ACCESS, newToken, accessCookieOptions);
console.log("Got here - 3");
console.log(user); | random_line_split | |
auth.controllers.js | */
let refreshTokensByID = {}; // { manager_id: refreshToken }
// cookie setting w/ access token
const accessCookieOptions = {
// cookie expires after 90 mins from the time it is set.
expires: new Date(
Date.now() + process.env.JWT_COOKIE_ACCESS_EXPIRES * 60 * 1000
),
httpOnly: true, // for security reason it's recommended to set httpOnly to true
sameSite: true,
};
// cookie setting w/ refresh token
const refreshCookieOptions = {
// cookie expires after 3 days from the time it is set.
expires: new Date(
Date.now() + process.env.JWT_COOKIE_REFRESH_EXPIRES * 60 * 60 * 1000
),
httpOnly: true, // for security reason it's recommended to set httpOnly to true
sameSite: true,
};
/**
* Admin sign up action
*
* @param {*} req
* @param {*} res
* @returns
*/
exports.adminSignupAction = (req, res) => {
const { username, firstname, lastname, password } = req.body;
const privilegeDefault = "3";
const activeDefault = "1";
const createdOn = datetimeConverter.toMySqlDateTime(moment(new Date()));
// check username
if (!Validation.validateUsername(username)) {
return Response.sendErrorResponse({
res,
message: "Please provide a valid username",
statusCode: 400,
});
}
// check password
if (!Validation.validatePassword(password)) {
return Response.sendErrorResponse({
res,
message: "Please provide a valid password",
statusCode: 400,
});
}
// hash + salt password
const hash = Utils.hashPassword(password);
// new user info
const vals = [
username,
firstname || null,
lastname || null,
hash,
privilegeDefault,
activeDefault,
createdOn,
];
authDB
.adminSignup(req, res, vals)
.then(async (rows) => {
console.log("auth.controllers - signup - rows = ", rows);
// TODO: check and remove password if exists in response
// dbResponse = rows[0];
// delete dbResponse.password;
return Response.sendResponse({
res,
responseBody: { user: rows /*dbResponse*/ },
statusCode: 201,
message: "User successfully created",
});
})
.catch((err) => {
console.log(err, "error");
return Response.sendErrorResponse({
res,
message: error,
statusCode: 500,
});
});
};
/**
* Admin login action
*
* @param {*} req
* @param {*} res
* @returns
*/
exports.adminLoginAction = (req, res) => {
const { username, password } = req.body;
if (Validation.isEmpty(username) || !Validation.validateUsername(username)) |
if (Validation.isEmpty(password) || !Validation.validatePassword(password)) {
return Response.sendErrorResponse({
res,
message: "Password is missing.",
statusCode: 400,
});
}
authDB
.adminLogin(req, res, username)
.then(async (results) => {
console.log("auth.controllers - login - results = ", results);
// results = [
// RowDataPacket {
// manager_id: 6,
// username: 'andy-01',
// firstname: 'Yayen',
// lastname: 'Lin',
// password: '$2a$10$LSBVvNc8IU.mA9lKHHKka.vmv./MpDVax.5XZRJoxqvqqPzFZJny6',
// privilege: '3',
// active: '1',
// createdOn: 2021-07-11T09:48:55.000Z
// }
// ]
const dbResponse = results[0];
if (!dbResponse)
return Response.sendErrorResponse({
res,
message: "Username does not exist.",
statusCode: 400,
});
if (!Validation.comparePassword(dbResponse.password, password))
return Response.sendErrorResponse({
res,
message: "The password you provided is incorrect",
statusCode: 400,
});
// login successfully
console.log("Logged in successfully!");
console.log("dbResponse", dbResponse);
// access token - give users access to protected resources
const token = Utils.generateJWT(dbResponse); // passing payload to jwt
// refresh token - allow users request new tokens
const refreshExpiry = moment()
.utc()
.add(3, "days")
.endOf("day")
.format("X");
const refreshToken = Utils.generateJWT({
exp: parseInt(refreshExpiry),
data: dbResponse.manager_id,
});
// add cookies to the response
res.cookie(process.env.JWT_ACCESS, token, accessCookieOptions);
res.cookie(process.env.JWT_REFRESH, refreshToken, refreshCookieOptions);
// add refreshToken to our refreshToken obj
refreshTokensByID[dbResponse.manager_id] = refreshToken;
console.log("added to refershTokensByID: ", refreshTokensByID);
// FIXME: create session for logged in user.
let sess = req.session;
sess.user_id = dbResponse.manager_id;
console.log("session", sess);
delete dbResponse.password; // removed password before return
return Response.sendResponse({
res,
responseBody: {
user: dbResponse,
token,
refresh: refreshToken,
},
message: "Login successful.",
});
})
.catch((err) => {
console.log(err);
return Response.sendErrorResponse({
res,
message: err,
statusCode: 500,
});
});
};
/**
* fetch logged in user info
*
* @param {*} req
* @param {*} res
* @returns current logged in user info
*/
exports.me = async (req, res) => {
const { user, token, tokenExp, toRefresh } = res;
console.log(res.user);
delete res.user.iat;
delete res.user.exp;
delete res.user.aud;
delete res.user.iss;
delete res.user.sub;
delete res.user.password;
try {
return Response.sendResponse({
res,
message: "User details successfully fetched",
responseBody: {
user: user,
token: token,
refresh: refreshTokensByID[user.manager_id],
tokenExp: tokenExp,
toRefresh: toRefresh,
},
});
} catch (error) {
console.log(error);
return Response.sendErrorResponse({
res,
message: "Unable to fetch currently logged in user information.",
statusCode: 400,
});
}
};
/**
* if access token has expired, renew the access token and call next();
* if not, call next(); directly.
*
* @param {*} req
* @param {*} res
* @param {*} next
*/
exports.refreshTokenAction = async (req, res) => {
// console.log("----------------------------------------- req");
// console.log(req.token);
// console.log(req.user);
// console.log(req.sessionID);
// console.log("----------------------------------------- res");
// console.log(res.token);
// console.log(res.user);
const { user } = req.body;
// console.log("res", res.user.manager_id);
const refresh = refreshTokensByID[user.manager_id];
// FIXME: refresh turns undefined sometimes
console.log("refresh", refresh);
// if refresh token missing
if (!refresh)
return Response.sendErrorResponse({
res,
message: "No refresh token provided.",
statusCode: 403,
});
// if refresh token expires
if (refresh) {
try {
const decoded = Utils.verifyJWT(refresh);
// {
// exp: 1626825599,
// data: 6,
// iat: 1626500973,
// aud: 'jwt-node',
// iss: 'jwt-node',
// sub: 'jwt-node'
// }
const exp = decoded.exp || null;
const now = new Date(Date.now()).getTime() / 1000;
// if no exp in decoded or id doesn't match
if (!exp || decoded.data !== user.manager_id)
return Response.sendErrorResponse({
res,
message: "Invalid refresh token.",
statusCode: 403,
});
console.log("Got here - 1");
// if refresh token expires
if (now > exp)
return Response.sendErrorResponse({
res,
message: "Refresh token expired, please log back in again.",
statusCode: 403,
});
console.log("Got here - 2");
// generate new access token using logged in user's info
const newToken = Utils.generateJWT(user);
// clear the old cookie
res.clearCookie(process.env.JWT_ACCESS);
// add the new cookie to response
res.cookie(process.env.JWT_ACCESS, newToken, accessCookieOptions);
console.log("Got here - 3");
console.log | {
return Response.sendErrorResponse({
res,
message: "Username is missing.",
statusCode: 400,
});
} | conditional_block |
main.py | H:%M:%S',time.localtime(time.time()))
self.textinput_action=TextInput(multiline=False)
self.layout_num.add_widget(self.textinput_num)
self.layout_time.add_widget(self.textinput_time)
self.layout_action.add_widget(self.textinput_action)
self.add_widget(self.layout_num)
self.add_widget(self.layout_time)
self.add_widget(self.layout_action)
self.add_widget(Button(text='确认',font_size='20sp',color=(0,0,0,1),font_name=font_name,on_press=self.ct_on_press))
def ct_on_press(self,event):
self.app_=App.get_running_app()
self.model_=self.app_.calendar_.dates_.model_
self.num=self.textinput_num.text
self.time=self.textinput_time.text
self.action=self.textinput_action.text
elevators=self.model_.samples
if self.num is not '':
idx=np.where(elevators['equip_no']==int(self.num))[0]
body=self.num+'\t'+self.time+'\t'+self.action+'\t'
if len(idx)>0:
infos=elevators[['Business type','Description','City','速度','设备型号','T']].iloc[idx[0]]
for info in infos:
body=body+str(info)+'\t'
body=body+'\n'
file_name='Log-New-Breakdown.tsv'
f=open(file_name,'a',encoding="utf-8")
f.write(body)
f.close()
print('成功输入新数据!')
else:
pass
self.textinput_num.text=''
self.textinput_time.text=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.textinput_action.text=''
# class for status.kv file
class Status(BoxLayout,EventDispatcher):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='horizontal'
self.add_widget(Button(on_press=self.on_press,text='输入走修数据',font_name=font_name,background_color=color_deep_blue,background_normal='',font_size='20sp',color=(1,1,1,1)))
def on_dismiss(self, arg):
pass
def on_press(self,event):
self.popup = Popup(title='Input Data',
content = Content(),
size_hint=(0.8,0.8))
self.popup.bind(on_dismiss=self.on_dismiss)
self.popup.open()
# ------------------------------------------------------------------------------------------------#
# class for Days.kv file
class Days(GridLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# ------------------------------------------------------------------------------------------------#
# class for select.kv file
class Select(BoxLayout):
lbl_ = ObjectProperty(None)
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.lbl_.text='通力电梯公司维保可视化产品'
self.lbl_.font_name=font_name
# class for Reminder in Dates
class Reminder(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.app_=App.get_running_app()
# Get the dates clicked
self.model_=self.app_.calendar_.dates_.model_
# Information to be added
self.body=self.model_.printSelectedResults()
# Layout arrangementw
self.orientation = 'vertical'
# Elevators information
self.layout_comp=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.layout_map=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.img_comp_1=Image(source='component_1.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.img_comp_2=Image(source='component_2.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.layout_comp.add_widget(self.img_comp_1)
self.layout_comp.add_widget(self.img_comp_2)
self.img_map_1=Image(source='map_kunshan.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.img_map_2=Image(source='map_sh.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.layout_map.add_widget(self.img_map_1)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='昆\n山\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_map.add_widget(self.img_map_2)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='上\n海\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_fig=BoxLayout(orientation = 'vertical' , size_hint = (1,0.7))
#self.layout_fig.add_widget(self.layout_comp)
self.layout_fig.add_widget(self.layout_map)
self.add_widget(self.layout_fig)
#self.layout_scroll_lb=Label(text=self.body,size_hint=(1,None))
#self.layout_scroll_lb.height=self.layout_scroll_lb.texture_size[1]
#self.layout_scroll=ScrollableLabel(text=self.body)
#self.layout_scroll.add_widget(self.layout_scroll_lb)
#self.layout1_title=Label(text='以下电梯预测将在30天内发生故障:\n'+self.body,font_name=font_name)
#self.layout1_title.size=self.layout1_title.texture_size
#self.layout1.add_widget(self.layout1_title)
#self.layout1.add_widget(self.layout_scroll)
# Plots
self.graph_theme = {
'label_options': {
'color': (0,0,0,1), # color of tick labels and titles
'bold': True},
'tick_color': (0,0,0,1)} # ticks and grid
self.graph=Graph(xlabel='Current time',ylabel='Maintenance date',
x_ticks_major=5, y_ticks_major=5,
y_grid_label=True, x_grid_label=True, padding=5,
x_grid=True, y_grid=True, xmin=-0, xmax=31, ymin=-0, ymax=31,**self.graph_theme)
self.plot = MeshLinePlot(color=[1, 0, 0, 1])
self.best_maint_dates=joblib.load('mon_best_int_np.asv')
self.best_maint_dates=self.best_maint_dates[self.model_.month-1]
self.plot.points = [(x+1, x+1+self.best_maint_dates[x]) for x in range(len(self.best_maint_dates))]
self.graph.add_plot(self.plot)
self.layout_graph=BoxLayout(orientation = 'vertical' , size_hint = (0.7,1))
self.layout_graph.add_widget(Label(text='本月最优维保日期随时间变化图',font_name=font_name,size_hint=(1,0.1),font_size='16sp',color=(0,0,0,1)))
self.layout_graph.add_widget(self.graph)
# Note for user
self.layout_info=BoxLayout(orientation = 'vertical' , size_hint = (0.3,1))
self.layout_info.add_widget(Label(text='待预防性维护电梯信息:\n设备编号\n设备所在区域类型\n故障信息\n所在城市\n电梯运行速度\n设备型号\n距离上一次维修天数',font_name=font_name,pos_hint={'x': 0.5, 'center_y': .5},font_size='16sp',color=(0,0,0,1)))
self.layout_note=BoxLayout(orientation = 'vertical' , size_hint = (0.5,0.8))
self.layout_note.add_widget(Button(on_press = self.on_press,text='输出\n电梯\n信息',font_name=font_name,pos_hint={'x': .5, 'y': 1},size_hint=(0.4,0.2),font_size='20sp',color=(0,0,0,1),background_color=color_shadow_blue))
self.layout_graph_note=BoxLayout(orientation = 'horizontal' , size_hint = (1,0.5))
self.layout_graph_note.add_widget(self.layout_graph)
self.layout_graph_note.add_widget(self.layout_info)
self.layout_graph_note.add_widget(self.layout_note)
self.add_widget(self.layout_graph_note)
self.layout2 = BoxLayout(orientation = 'horizontal' , size_hint = (1,.15))
self.add_widget(self.layout2)
self.layout2.add_widget(Label(text = "请按 'ESC'键或点击窗外以关闭窗口",font_name=font_name,font_size='20sp',color=(1,0,0,1)))
def on_release(self,event):
print ("Reminder OK Clicked!")
def on_press(self,event):
file_name='Log-{:}-{:}-{:}.tsv'.format(self.model_.year,self.model_.month,self.model_.day)
f=open(file_name,'w',encoding="utf-8")
f.write(self.body)
f.close()
# ------------------------------------------------------------------------------------------------#
# class for dates.kv file
class Dates(GridLayo | ut):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.cols = 7
self.month_=Months()# In order to get current month and day
| identifier_body | |
main.py | self.layout_4=BoxLayout(size_hint=(1,.1))
self.layout_4.add_widget(self.status_)
self.add_widget(self.layout_1)
self.add_widget(self.layout_2)
self.add_widget(self.layout_4)
# ------------------------------------------------------------------------------------------------# | def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='horizontal'
self.layout_num=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_time=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_action=BoxLayout(orientation='vertical',size_hint=(1,1))
self.layout_num.add_widget(Label(text='故障电梯编号',font_name=font_name,font_size='20sp'))
self.layout_time.add_widget(Label(text='故障时间',font_name=font_name,font_size='20sp'))
self.layout_action.add_widget(Label(text='采取操作',font_name=font_name,font_size='20sp'))
self.textinput_num=TextInput(multiline=False)
self.textinput_time=TextInput(multiline=False)
self.textinput_time.text=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.textinput_action=TextInput(multiline=False)
self.layout_num.add_widget(self.textinput_num)
self.layout_time.add_widget(self.textinput_time)
self.layout_action.add_widget(self.textinput_action)
self.add_widget(self.layout_num)
self.add_widget(self.layout_time)
self.add_widget(self.layout_action)
self.add_widget(Button(text='确认',font_size='20sp',color=(0,0,0,1),font_name=font_name,on_press=self.ct_on_press))
def ct_on_press(self,event):
self.app_=App.get_running_app()
self.model_=self.app_.calendar_.dates_.model_
self.num=self.textinput_num.text
self.time=self.textinput_time.text
self.action=self.textinput_action.text
elevators=self.model_.samples
if self.num is not '':
idx=np.where(elevators['equip_no']==int(self.num))[0]
body=self.num+'\t'+self.time+'\t'+self.action+'\t'
if len(idx)>0:
infos=elevators[['Business type','Description','City','速度','设备型号','T']].iloc[idx[0]]
for info in infos:
body=body+str(info)+'\t'
body=body+'\n'
file_name='Log-New-Breakdown.tsv'
f=open(file_name,'a',encoding="utf-8")
f.write(body)
f.close()
print('成功输入新数据!')
else:
pass
self.textinput_num.text=''
self.textinput_time.text=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.textinput_action.text=''
# class for status.kv file
class Status(BoxLayout,EventDispatcher):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='horizontal'
self.add_widget(Button(on_press=self.on_press,text='输入走修数据',font_name=font_name,background_color=color_deep_blue,background_normal='',font_size='20sp',color=(1,1,1,1)))
def on_dismiss(self, arg):
pass
def on_press(self,event):
self.popup = Popup(title='Input Data',
content = Content(),
size_hint=(0.8,0.8))
self.popup.bind(on_dismiss=self.on_dismiss)
self.popup.open()
# ------------------------------------------------------------------------------------------------#
# class for Days.kv file
class Days(GridLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# ------------------------------------------------------------------------------------------------#
# class for select.kv file
class Select(BoxLayout):
lbl_ = ObjectProperty(None)
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.lbl_.text='通力电梯公司维保可视化产品'
self.lbl_.font_name=font_name
# class for Reminder in Dates
class Reminder(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.app_=App.get_running_app()
# Get the dates clicked
self.model_=self.app_.calendar_.dates_.model_
# Information to be added
self.body=self.model_.printSelectedResults()
# Layout arrangementw
self.orientation = 'vertical'
# Elevators information
self.layout_comp=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.layout_map=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.img_comp_1=Image(source='component_1.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.img_comp_2=Image(source='component_2.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.layout_comp.add_widget(self.img_comp_1)
self.layout_comp.add_widget(self.img_comp_2)
self.img_map_1=Image(source='map_kunshan.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.img_map_2=Image(source='map_sh.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.layout_map.add_widget(self.img_map_1)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='昆\n山\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_map.add_widget(self.img_map_2)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='上\n海\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_fig=BoxLayout(orientation = 'vertical' , size_hint = (1,0.7))
#self.layout_fig.add_widget(self.layout_comp)
self.layout_fig.add_widget(self.layout_map)
self.add_widget(self.layout_fig)
#self.layout_scroll_lb=Label(text=self.body,size_hint=(1,None))
#self.layout_scroll_lb.height=self.layout_scroll_lb.texture_size[1]
#self.layout_scroll=ScrollableLabel(text=self.body)
#self.layout_scroll.add_widget(self.layout_scroll_lb)
#self.layout1_title=Label(text='以下电梯预测将在30天内发生故障:\n'+self.body,font_name=font_name)
#self.layout1_title.size=self.layout1_title.texture_size
#self.layout1.add_widget(self.layout1_title)
#self.layout1.add_widget(self.layout_scroll)
# Plots
self.graph_theme = {
'label_options': {
'color': (0,0,0,1), # color of tick labels and titles
'bold': True},
'tick_color': (0,0,0,1)} # ticks and grid
self.graph=Graph(xlabel='Current time',ylabel='Maintenance date',
x_ticks_major=5, y_ticks_major=5,
y_grid_label=True, x_grid_label=True, padding=5,
x_grid=True, y_grid=True, xmin=-0, xmax=31, ymin=-0, ymax=31,**self.graph_theme)
self.plot = MeshLinePlot(color=[1, 0, 0, 1])
self.best_maint_dates=joblib.load('mon_best_int_np.asv')
self.best_maint_dates=self.best_maint_dates[self.model_.month-1]
self.plot.points = [(x+1, x+1+self.best_maint_dates[x]) for x in range(len(self.best_maint_dates))]
self.graph.add_plot(self.plot)
self.layout_graph=BoxLayout(orientation = 'vertical' , size_hint = (0.7,1))
self.layout_graph.add_widget(Label(text='本月最优维保日期随时间变化图',font_name=font_name,size_hint=(1,0.1),font_size='16sp',color=(0,0,0,1)))
self.layout_graph.add_widget(self.graph)
# Note for user
self.layout_info=BoxLayout(orientation = 'vertical' , size_hint = (0.3,1))
self.layout_info.add_widget(Label(text='待预防性维护电梯信息:\n设备编号\n设备所在区域类型\n故障信息\n所在城市\n电梯运行速度\n设备型号\n距离上一次维修天数',font_name=font_name,pos_hint={'x': 0.5, 'center_y': .5},font_size='16sp',color=(0,0,0,1)))
self.layout_note=BoxLayout(orientation = 'vertical' , size_hint = (0.5,0.8))
self.layout_note.add_widget(Button(on_press = self.on_press,text='输出\n电梯\n信息',font_name=font_name,pos_hint={'x': .5, 'y': 1},size_hint=(0.4,0.2),font_size='20sp',color=(0,0,0,1),background_color=color_shadow_blue))
self.layout_graph_note=BoxLayout(orientation = 'horizontal' , size_hint = (1,0.5))
self.layout | class Content(BoxLayout): | random_line_split |
main.py | # class for select.kv file
class Select(BoxLayout):
lbl_ = ObjectProperty(None)
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.lbl_.text='通力电梯公司维保可视化产品'
self.lbl_.font_name=font_name
# class for Reminder in Dates
class Reminder(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.app_=App.get_running_app()
# Get the dates clicked
self.model_=self.app_.calendar_.dates_.model_
# Information to be added
self.body=self.model_.printSelectedResults()
# Layout arrangementw
self.orientation = 'vertical'
# Elevators information
self.layout_comp=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.layout_map=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.img_comp_1=Image(source='component_1.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.img_comp_2=Image(source='component_2.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.layout_comp.add_widget(self.img_comp_1)
self.layout_comp.add_widget(self.img_comp_2)
self.img_map_1=Image(source='map_kunshan.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.img_map_2=Image(source='map_sh.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.layout_map.add_widget(self.img_map_1)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='昆\n山\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_map.add_widget(self.img_map_2)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='上\n海\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_fig=BoxLayout(orientation = 'vertical' , size_hint = (1,0.7))
#self.layout_fig.add_widget(self.layout_comp)
self.layout_fig.add_widget(self.layout_map)
self.add_widget(self.layout_fig)
#self.layout_scroll_lb=Label(text=self.body,size_hint=(1,None))
#self.layout_scroll_lb.height=self.layout_scroll_lb.texture_size[1]
#self.layout_scroll=ScrollableLabel(text=self.body)
#self.layout_scroll.add_widget(self.layout_scroll_lb)
#self.layout1_title=Label(text='以下电梯预测将在30天内发生故障:\n'+self.body,font_name=font_name)
#self.layout1_title.size=self.layout1_title.texture_size
#self.layout1.add_widget(self.layout1_title)
#self.layout1.add_widget(self.layout_scroll)
# Plots
self.graph_theme = {
'label_options': {
'color': (0,0,0,1), # color of tick labels and titles
'bold': True},
'tick_color': (0,0,0,1)} # ticks and grid
self.graph=Graph(xlabel='Current time',ylabel='Maintenance date',
x_ticks_major=5, y_ticks_major=5,
y_grid_label=True, x_grid_label=True, padding=5,
x_grid=True, y_grid=True, xmin=-0, xmax=31, ymin=-0, ymax=31,**self.graph_theme)
self.plot = MeshLinePlot(color=[1, 0, 0, 1])
self.best_maint_dates=joblib.load('mon_best_int_np.asv')
self.best_maint_dates=self.best_maint_dates[self.model_.month-1]
self.plot.points = [(x+1, x+1+self.best_maint_dates[x]) for x in range(len(self.best_maint_dates))]
self.graph.add_plot(self.plot)
self.layout_graph=BoxLayout(orientation = 'vertical' , size_hint = (0.7,1))
self.layout_graph.add_widget(Label(text='本月最优维保日期随时间变化图',font_name=font_name,size_hint=(1,0.1),font_size='16sp',color=(0,0,0,1)))
self.layout_graph.add_widget(self.graph)
# Note for user
self.layout_info=BoxLayout(orientation = 'vertical' , size_hint = (0.3,1))
self.layout_info.add_widget(Label(text='待预防性维护电梯信息:\n设备编号\n设备所在区域类型\n故障信息\n所在城市\n电梯运行速度\n设备型号\n距离上一次维修天数',font_name=font_name,pos_hint={'x': 0.5, 'center_y': .5},font_size='16sp',color=(0,0,0,1)))
self.layout_note=BoxLayout(orientation = 'vertical' , size_hint = (0.5,0.8))
self.layout_note.add_widget(Button(on_press = self.on_press,text='输出\n电梯\n信息',font_name=font_name,pos_hint={'x': .5, 'y': 1},size_hint=(0.4,0.2),font_size='20sp',color=(0,0,0,1),background_color=color_shadow_blue))
self.layout_graph_note=BoxLayout(orientation = 'horizontal' , size_hint = (1,0.5))
self.layout_graph_note.add_widget(self.layout_graph)
self.layout_graph_note.add_widget(self.layout_info)
self.layout_graph_note.add_widget(self.layout_note)
self.add_widget(self.layout_graph_note)
self.layout2 = BoxLayout(orientation = 'horizontal' , size_hint = (1,.15))
self.add_widget(self.layout2)
self.layout2.add_widget(Label(text = "请按 'ESC'键或点击窗外以关闭窗口",font_name=font_name,font_size='20sp',color=(1,0,0,1)))
def on_release(self,event):
print ("Reminder OK Clicked!")
def on_press(self,event):
file_name='Log-{:}-{:}-{:}.tsv'.format(self.model_.year,self.model_.month,self.model_.day)
f=open(file_name,'w',encoding="utf-8")
f.write(self.body)
f.close()
# ------------------------------------------------------------------------------------------------#
# class for dates.kv file
class Dates(GridLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.cols = 7
self.month_=Months()# In order to get current month and day
self.model_=Model(year=self.month_.year,month=self.month_.month,day=self.month_.day)
# Best maintainance date
self.maintainance_timedelta=datetime.timedelta(days=self.model_.findBestMaintInterval())
self.best_maint_date=datetime.datetime(self.month_.year,self.month_.month,self.month_.day)
self.best_maint_date=self.best_maint_date+self.maintainance_timedelta
print('Best maintenance interval: {:} and best maintenance date: {:}'.format(self.maintainance_timedelta,self.best_maint_date))
# Update dates paddle when choose different months
self.update_dates(self.month_.year,self.month_.month)
def update_dates(self,year,month):
print('Update dates!')
self.clear_widgets()
c = calendar.monthcalendar(year,month)
# Show the best maintenance date if current month is clicked
if self.best_maint_date.month is month:
for i in c:
for j in i:
if j == 0:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=''),font_size='20sp',color=(0,0,0,1)))
elif j==self.best_maint_date.day:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=j),background_color=(1,0,0,1),font_size='20sp',color=(0,0,0,1)))
else:
self.add_widget(Button(on_press = self.on_press, on_release=self.on_release,text = '{j}'.format(j=j),font_size='20sp',color=(0,0,0,1)))
else:
for i in c:
for j in i:
if j == 0:
self.add_widget(Button(on_press = self.on_press,on_release=self.on_release,text = '{j}'.format(j=''),font_size='20sp',color=(0,0,0,1)))
else:
self.add_widget(Button(on_press = self.on_press, | on_release=self.on_release,text = '{j}'.format(j=j),font_size='20sp',color=(0,0,0,1)))
def on_dismiss(self, arg):
# Do something on close of popup
print('Popup dismiss')
pass
def on_release(self,event):
event.background_color = 154/256,226/256,248/256,1
def on_press(self,event):
print ("date clicked :" + event.text)
event.background_color | conditional_block | |
main.py | font_name,font_size='20sp'))
self.textinput_num=TextInput(multiline=False)
self.textinput_time=TextInput(multiline=False)
self.textinput_time.text=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.textinput_action=TextInput(multiline=False)
self.layout_num.add_widget(self.textinput_num)
self.layout_time.add_widget(self.textinput_time)
self.layout_action.add_widget(self.textinput_action)
self.add_widget(self.layout_num)
self.add_widget(self.layout_time)
self.add_widget(self.layout_action)
self.add_widget(Button(text='确认',font_size='20sp',color=(0,0,0,1),font_name=font_name,on_press=self.ct_on_press))
def ct_on_press(self,event):
self.app_=App.get_running_app()
self.model_=self.app_.calendar_.dates_.model_
self.num=self.textinput_num.text
self.time=self.textinput_time.text
self.action=self.textinput_action.text
elevators=self.model_.samples
if self.num is not '':
idx=np.where(elevators['equip_no']==int(self.num))[0]
body=self.num+'\t'+self.time+'\t'+self.action+'\t'
if len(idx)>0:
infos=elevators[['Business type','Description','City','速度','设备型号','T']].iloc[idx[0]]
for info in infos:
body=body+str(info)+'\t'
body=body+'\n'
file_name='Log-New-Breakdown.tsv'
f=open(file_name,'a',encoding="utf-8")
f.write(body)
f.close()
print('成功输入新数据!')
else:
pass
self.textinput_num.text=''
self.textinput_time.text=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
self.textinput_action.text=''
# class for status.kv file
class Status(BoxLayout,EventDispatcher):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='horizontal'
self.add_widget(Button(on_press=self.on_press,text='输入走修数据',font_name=font_name,background_color=color_deep_blue,background_normal='',font_size='20sp',color=(1,1,1,1)))
def on_dismiss(self, arg):
pass
def on_press(self,event):
self.popup = Popup(title='Input Data',
content = Content(),
size_hint=(0.8,0.8))
self.popup.bind(on_dismiss=self.on_dismiss)
self.popup.open()
# ------------------------------------------------------------------------------------------------#
# class for Days.kv file
class Days(GridLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
# ------------------------------------------------------------------------------------------------#
# class for select.kv file
class Select(BoxLayout):
lbl_ = ObjectProperty(None)
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.lbl_.text='通力电梯公司维保可视化产品'
self.lbl_.font_name=font_name
# class for Reminder in Dates
class Reminder(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.app_=App.get_running_app()
# Get the dates clicked
self.model_=self.app_.calendar_.dates_.model_
# Information to be added
self.body=self.model_.printSelectedResults()
# Layout arrangementw
self.orientation = 'vertical'
# Elevators information
self.layout_comp=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.layout_map=BoxLayout(orientation = 'horizontal' , size_hint = (1,1))
self.img_comp_1=Image(source='component_1.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.img_comp_2=Image(source='component_2.png',keep_ratio=False,size_hint=(1,1),allow_stretch=False,mipmap=True)
self.layout_comp.add_widget(self.img_comp_1)
self.layout_comp.add_widget(self.img_comp_2)
self.img_map_1=Image(source='map_kunshan.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.img_map_2=Image(source='map_sh.png',keep_ratio=False,size_hint=(1,1),allow_stretch=True)
self.layout_map.add_widget(self.img_map_1)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='昆\n山\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_map.add_widget(self.img_map_2)
self.layout_map.add_widget(Label(size_hint=(0.1,1),text='上\n海\n地\n区\n电\n梯\n分\n布\n图',font_name=font_name,font_size='20sp',color=(0,0,0,1)))
self.layout_fig=BoxLayout(orientation = 'vertical' , size_hint = (1,0.7))
#self.layout_fig.add_widget(self.layout_comp)
self.layout_fig.add_widget(self.layout_map)
self.add_widget(self.layout_fig)
#self.layout_scroll_lb=Label(text=self.body,size_hint=(1,None))
#self.layout_scroll_lb.height=self.layout_scroll_lb.texture_size[1]
#self.layout_scroll=ScrollableLabel(text=self.body)
#self.layout_scroll.add_widget(self.layout_scroll_lb)
#self.layout1_title=Label(text='以下电梯预测将在30天内发生故障:\n'+self.body,font_name=font_name)
#self.layout1_title.size=self.layout1_title.texture_size
#self.layout1.add_widget(self.layout1_title)
#self.layout1.add_widget(self.layout_scroll)
# Plots
self.graph_theme = {
'label_options': {
'color': (0,0,0,1), # color of tick labels and titles
'bold': True},
'tick_color': (0,0,0,1)} # ticks and grid
self.graph=Graph(xlabel='Current time',ylabel='Maintenance date',
x_ticks_major=5, y_ticks_major=5,
y_grid_label=True, x_grid_label=True, padding=5,
x_grid=True, y_grid=True, xmin=-0, xmax=31, ymin=-0, ymax=31,**self.graph_theme)
self.plot = MeshLinePlot(color=[1, 0, 0, 1])
self.best_maint_dates=joblib.load('mon_best_int_np.asv')
self.best_maint_dates=self.best_maint_dates[self.model_.month-1]
self.plot.points = [(x+1, x+1+self.best_maint_dates[x]) for x in range(len(self.best_maint_dates))]
self.graph.add_plot(self.plot)
self.layout_graph=BoxLayout(orientation = 'vertical' , size_hint = (0.7,1))
self.layout_graph.add_widget(Label(text='本月最优维保日期随时间变化图',font_name=font_name,size_hint=(1,0.1),font_size='16sp',color=(0,0,0,1)))
self.layout_graph.add_widget(self.graph)
# Note for user
self.layout_info=BoxLayout(orientation = 'vertical' , size_hint = (0.3,1))
self.layout_info.add_widget(Label(text='待预防性维护电梯信息:\n设备编号\n设备所在区域类型\n故障信息\n所在城市\n电梯运行速度\n设备型号\n距离上一次维修天数',font_name=font_name,pos_hint={'x': 0.5, 'center_y': .5},font_size='16sp',color=(0,0,0,1)))
self.layout_note=BoxLayout(orientation = 'vertical' , size_hint = (0.5,0.8))
self.layout_note.add_widget(Button(on_press = self.on_press,text='输出\n电梯\n信息',font_name=font_name,pos_hint={'x': .5, 'y': 1},size_hint=(0.4,0.2),font_size='20sp',color=(0,0,0,1),background_color=color_shadow_blue))
self.layout_graph_note=BoxLayout(orientation = 'horizontal' , size_hint = (1,0.5))
self.layout_graph_note.add_widget(self.layout_graph)
self.layout_graph_note.add_widget(self.layout_info)
self.layout_graph_note.add_widget(self.layout_note)
self.add_widget(self.layout_graph_note)
self.layout2 = BoxLayout(orientation = 'horizontal' , size_hint = (1,.15))
self.add_widget(self.layout2)
self.layout2.add_widget(Label(text = "请按 'ESC'键或点击窗外以关闭窗口",font_name=font_name,font_size='20sp',color=(1,0,0,1)))
def on_release(self,event):
print ("Reminder OK Clicked!")
def on_press(self,event):
file_name='Log-{:}-{:}-{:}.tsv'.format(self.model_.year,self.model_.month,self.model_.day)
f=open(file_name,'w',encoding="utf-8")
f.write(self.body)
f.close()
# ------------------------------------------------------------------------------------------------#
# class for date | s.kv fil | identifier_name | |
gossip.rs | Gossip for GossipServer {
fn exchange(&mut self, ctx: RpcContext, req: GossipData, sink: UnarySink<GossipData>) {
ctx.spawn(
self.sender
.clone()
.send(GossipEvent::GossipReceived(req))
.map(|_| ())
.map_err(|_| error!("Failed to update gossip state")),
);
let out = self.state.get_current();
ctx.spawn(
sink.success(out)
.map_err(|err| error!("Error exhanging gossip: {:?}", err)),
);
}
}
#[derive(Eq, PartialEq)]
enum ClientEvent {
GossipTick,
Done,
}
#[derive(Clone)]
pub enum PeerStateEvent {
PeerJoined(u64),
}
impl GossipServer {
pub fn new(node_id: u64, bootstrap: &[String], self_address: &str, clock: Clock) -> Self {
let (sender, receiver) = mpsc::channel(32);
let state = GossipState::new(node_id, self_address, bootstrap, sender.clone(), clock);
run_gossip_event_handler(receiver, state.new_ref(), node_id);
GossipServer { state, sender }
}
pub fn build_service(&self) -> Service {
create_gossip(self.clone())
}
pub fn state(&self) -> GossipState {
self.state.clone()
}
pub fn update_meta_leader(&self, id: u64) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::MetaLeaderChanged(id))
}
pub fn update_node_liveness(&self, peer: PeerState) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::PeerUpdate(peer))
}
fn event(&self, event: GossipEvent) -> impl Future<Item = (), Error = ()> {
self.sender.clone().send(event).map(|_| ()).map_err(|_| ())
}
}
fn run_gossip_event_handler(
receiver: mpsc::Receiver<GossipEvent>,
state: GossipStateRef,
self_id: u64,
) {
let f = receiver.for_each(move |event| {
match event {
GossipEvent::NewPeerDiscovered(address) => {
connect_to_client(state.upgrade(), self_id, &address);
}
GossipEvent::GossipReceived(data) => {
state.upgrade().merge_gossip(data);
}
GossipEvent::MetaLeaderChanged(id) => {
state.upgrade().update_meta_leader(id);
}
GossipEvent::PeerUpdate(peer) => {
state.upgrade().update_node_liveness(&peer);
}
};
Ok(())
});
tokio::spawn(f);
}
#[derive(Clone)]
pub struct GossipState {
inner: Arc<RwLock<InnerGossipState>>,
}
#[derive(Clone)]
pub struct GossipStateRef {
inner: Weak<RwLock<InnerGossipState>>,
}
struct InnerGossipState {
clock: Clock,
current: GossipData,
connections: HashMap<String, oneshot::Sender<()>>,
clients: HashMap<String, RpcClient>,
peers: HashMap<u64, GossipData>,
event_publisher: mpsc::Sender<GossipEvent>,
event_emitter: EventEmitter<PeerStateEvent>,
}
enum GossipEvent {
GossipReceived(GossipData),
NewPeerDiscovered(String),
MetaLeaderChanged(u64),
PeerUpdate(PeerState),
}
impl GossipState {
fn new(
node_id: u64,
self_address: &str,
bootstrap: &[String],
event_publisher: mpsc::Sender<GossipEvent>,
clock: Clock,
) -> Self {
let mut current = GossipData::new();
current.set_node_id(node_id);
current.set_address(self_address.to_string());
let event_emitter = EventEmitter::new(32);
let inner = InnerGossipState {
current,
event_publisher,
event_emitter,
clock,
connections: HashMap::new(),
clients: HashMap::new(),
peers: HashMap::new(),
};
inner.publish_peer_discovered(self_address);
bootstrap
.iter()
.for_each(|address| inner.publish_peer_discovered(address));
Self {
inner: Arc::new(RwLock::new(inner)),
}
}
fn get_current(&self) -> GossipData {
let locked = self.inner.read().unwrap();
let mut gossip = locked.current.clone();
gossip.set_updated_at(locked.clock.now().into());
gossip
}
pub fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.inner.read().unwrap().get_client(node_id)
}
pub fn get_meta_leader_client(&self) -> Result<RpcClient, Error> {
let locked = self.inner.read().unwrap();
locked
.meta_leader_id()
.ok_or_else(|| err_msg("Leader not available"))
.and_then(|node_id| self.get_client(node_id))
}
fn merge_gossip(&self, gossip: GossipData) {
self.inner.write().unwrap().merge_gossip(gossip)
}
pub fn update_meta_leader(&self, node_id: u64) {
self.inner.write().unwrap().update_meta_leader(node_id)
}
fn update_node_liveness(&self, peer_state: &PeerState) {
self.inner.write().unwrap().update_node_liveness(peer_state)
}
fn new_ref(&self) -> GossipStateRef {
GossipStateRef {
inner: Arc::downgrade(&self.inner),
}
}
fn update_clock(&self, peer_sent_at: HybridTimestamp) {
self.inner
.read()
.unwrap()
.clock
.update(&peer_sent_at)
.unwrap_or_else(|err| error!("Failed to update clock: {:?}", err));
}
}
impl GossipStateRef {
fn upgrade(&self) -> GossipState {
GossipState {
inner: self.inner.upgrade().unwrap(),
}
}
}
impl InnerGossipState {
fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.peers
.get(&node_id)
.and_then(|gossip| self.clients.get(gossip.get_address()))
.cloned()
.ok_or_else(|| format_err!("Not connected to '{}'", node_id))
}
fn add_connection(&mut self, addr: &str, sender: oneshot::Sender<()>, client: RpcClient) {
self.connections.insert(addr.to_string(), sender);
self.clients.insert(addr.to_string(), client);
}
fn merge_gossip(&mut self, gossip: GossipData) {
let peer_id = gossip.get_node_id();
let current_addrs = self.current.mut_peer_addresses();
if current_addrs.get(&peer_id).is_none() {
let address = gossip.get_address();
current_addrs.insert(peer_id, address.to_string());
self.publish_peer_discovered(address);
}
gossip
.get_node_liveness()
.values()
.for_each(|peer| self.update_node_liveness(peer));
gossip
.get_peer_addresses()
.iter()
.filter(|(id, _)| !self.peers.contains_key(id))
.for_each(|(_, address)| self.publish_peer_discovered(address));
self.peers.insert(peer_id, gossip);
}
fn publish_event(&self, event: GossipEvent) {
let f = self.event_publisher.clone().send(event);
tokio::spawn(f.map(|_| ()).map_err(|_| ()));
}
fn | (&self, address: &str) {
self.publish_event(GossipEvent::NewPeerDiscovered(address.to_string()));
}
fn update_meta_leader(&mut self, node_id: u64) {
self.current.set_meta_leader_id(node_id);
}
fn meta_leader_id(&self) -> Option<u64> {
if self.current.meta_leader_id != 0 {
return Some(self.current.meta_leader_id);
}
self.peers
.values()
.filter(|peer| peer.meta_leader_id != 0)
.max_by_key(|peer| -> HybridTimestamp { peer.get_updated_at().into() })
.map(|peer| peer.meta_leader_id)
}
fn update_node_liveness(&mut self, peer: &PeerState) {
let peer_id = peer.get_peer().id;
if self.current.get_node_liveness().get(&peer_id).is_none() {
self.emit_new_live_node(peer_id)
}
self.current
.mut_node_liveness()
.insert(peer_id, peer.clone());
}
fn emit_new_live_node(&self, peer_id: u64) {
self.event_emitter.emit(PeerStateEvent::PeerJoined(peer_id))
}
}
struct ClientContext {
state: GossipStateRef,
client: RpcClient,
}
fn connect_to_client(state: GossipState, self_id: u64, address: &str) {
let mut locked_state = state.inner.write().unwrap();
if locked_state.connections.contains_key(address) {
return;
}
info!("Discovered: {}", address);
let client = RpcClient::new(self_id, address);
let (sender, receiver) = oneshot::channel();
locked_state.add_connection(address, sender, | publish_peer_discovered | identifier_name |
gossip.rs | Gossip for GossipServer {
fn exchange(&mut self, ctx: RpcContext, req: GossipData, sink: UnarySink<GossipData>) {
ctx.spawn(
self.sender
.clone()
.send(GossipEvent::GossipReceived(req))
.map(|_| ())
.map_err(|_| error!("Failed to update gossip state")),
);
let out = self.state.get_current();
ctx.spawn(
sink.success(out)
.map_err(|err| error!("Error exhanging gossip: {:?}", err)),
);
}
}
#[derive(Eq, PartialEq)]
enum ClientEvent {
GossipTick,
Done,
}
#[derive(Clone)]
pub enum PeerStateEvent {
PeerJoined(u64),
}
impl GossipServer {
pub fn new(node_id: u64, bootstrap: &[String], self_address: &str, clock: Clock) -> Self |
pub fn build_service(&self) -> Service {
create_gossip(self.clone())
}
pub fn state(&self) -> GossipState {
self.state.clone()
}
pub fn update_meta_leader(&self, id: u64) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::MetaLeaderChanged(id))
}
pub fn update_node_liveness(&self, peer: PeerState) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::PeerUpdate(peer))
}
fn event(&self, event: GossipEvent) -> impl Future<Item = (), Error = ()> {
self.sender.clone().send(event).map(|_| ()).map_err(|_| ())
}
}
fn run_gossip_event_handler(
receiver: mpsc::Receiver<GossipEvent>,
state: GossipStateRef,
self_id: u64,
) {
let f = receiver.for_each(move |event| {
match event {
GossipEvent::NewPeerDiscovered(address) => {
connect_to_client(state.upgrade(), self_id, &address);
}
GossipEvent::GossipReceived(data) => {
state.upgrade().merge_gossip(data);
}
GossipEvent::MetaLeaderChanged(id) => {
state.upgrade().update_meta_leader(id);
}
GossipEvent::PeerUpdate(peer) => {
state.upgrade().update_node_liveness(&peer);
}
};
Ok(())
});
tokio::spawn(f);
}
#[derive(Clone)]
pub struct GossipState {
inner: Arc<RwLock<InnerGossipState>>,
}
#[derive(Clone)]
pub struct GossipStateRef {
inner: Weak<RwLock<InnerGossipState>>,
}
struct InnerGossipState {
clock: Clock,
current: GossipData,
connections: HashMap<String, oneshot::Sender<()>>,
clients: HashMap<String, RpcClient>,
peers: HashMap<u64, GossipData>,
event_publisher: mpsc::Sender<GossipEvent>,
event_emitter: EventEmitter<PeerStateEvent>,
}
enum GossipEvent {
GossipReceived(GossipData),
NewPeerDiscovered(String),
MetaLeaderChanged(u64),
PeerUpdate(PeerState),
}
impl GossipState {
fn new(
node_id: u64,
self_address: &str,
bootstrap: &[String],
event_publisher: mpsc::Sender<GossipEvent>,
clock: Clock,
) -> Self {
let mut current = GossipData::new();
current.set_node_id(node_id);
current.set_address(self_address.to_string());
let event_emitter = EventEmitter::new(32);
let inner = InnerGossipState {
current,
event_publisher,
event_emitter,
clock,
connections: HashMap::new(),
clients: HashMap::new(),
peers: HashMap::new(),
};
inner.publish_peer_discovered(self_address);
bootstrap
.iter()
.for_each(|address| inner.publish_peer_discovered(address));
Self {
inner: Arc::new(RwLock::new(inner)),
}
}
fn get_current(&self) -> GossipData {
let locked = self.inner.read().unwrap();
let mut gossip = locked.current.clone();
gossip.set_updated_at(locked.clock.now().into());
gossip
}
pub fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.inner.read().unwrap().get_client(node_id)
}
pub fn get_meta_leader_client(&self) -> Result<RpcClient, Error> {
let locked = self.inner.read().unwrap();
locked
.meta_leader_id()
.ok_or_else(|| err_msg("Leader not available"))
.and_then(|node_id| self.get_client(node_id))
}
fn merge_gossip(&self, gossip: GossipData) {
self.inner.write().unwrap().merge_gossip(gossip)
}
pub fn update_meta_leader(&self, node_id: u64) {
self.inner.write().unwrap().update_meta_leader(node_id)
}
fn update_node_liveness(&self, peer_state: &PeerState) {
self.inner.write().unwrap().update_node_liveness(peer_state)
}
fn new_ref(&self) -> GossipStateRef {
GossipStateRef {
inner: Arc::downgrade(&self.inner),
}
}
fn update_clock(&self, peer_sent_at: HybridTimestamp) {
self.inner
.read()
.unwrap()
.clock
.update(&peer_sent_at)
.unwrap_or_else(|err| error!("Failed to update clock: {:?}", err));
}
}
impl GossipStateRef {
fn upgrade(&self) -> GossipState {
GossipState {
inner: self.inner.upgrade().unwrap(),
}
}
}
impl InnerGossipState {
fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.peers
.get(&node_id)
.and_then(|gossip| self.clients.get(gossip.get_address()))
.cloned()
.ok_or_else(|| format_err!("Not connected to '{}'", node_id))
}
fn add_connection(&mut self, addr: &str, sender: oneshot::Sender<()>, client: RpcClient) {
self.connections.insert(addr.to_string(), sender);
self.clients.insert(addr.to_string(), client);
}
fn merge_gossip(&mut self, gossip: GossipData) {
let peer_id = gossip.get_node_id();
let current_addrs = self.current.mut_peer_addresses();
if current_addrs.get(&peer_id).is_none() {
let address = gossip.get_address();
current_addrs.insert(peer_id, address.to_string());
self.publish_peer_discovered(address);
}
gossip
.get_node_liveness()
.values()
.for_each(|peer| self.update_node_liveness(peer));
gossip
.get_peer_addresses()
.iter()
.filter(|(id, _)| !self.peers.contains_key(id))
.for_each(|(_, address)| self.publish_peer_discovered(address));
self.peers.insert(peer_id, gossip);
}
fn publish_event(&self, event: GossipEvent) {
let f = self.event_publisher.clone().send(event);
tokio::spawn(f.map(|_| ()).map_err(|_| ()));
}
fn publish_peer_discovered(&self, address: &str) {
self.publish_event(GossipEvent::NewPeerDiscovered(address.to_string()));
}
fn update_meta_leader(&mut self, node_id: u64) {
self.current.set_meta_leader_id(node_id);
}
fn meta_leader_id(&self) -> Option<u64> {
if self.current.meta_leader_id != 0 {
return Some(self.current.meta_leader_id);
}
self.peers
.values()
.filter(|peer| peer.meta_leader_id != 0)
.max_by_key(|peer| -> HybridTimestamp { peer.get_updated_at().into() })
.map(|peer| peer.meta_leader_id)
}
fn update_node_liveness(&mut self, peer: &PeerState) {
let peer_id = peer.get_peer().id;
if self.current.get_node_liveness().get(&peer_id).is_none() {
self.emit_new_live_node(peer_id)
}
self.current
.mut_node_liveness()
.insert(peer_id, peer.clone());
}
fn emit_new_live_node(&self, peer_id: u64) {
self.event_emitter.emit(PeerStateEvent::PeerJoined(peer_id))
}
}
struct ClientContext {
state: GossipStateRef,
client: RpcClient,
}
fn connect_to_client(state: GossipState, self_id: u64, address: &str) {
let mut locked_state = state.inner.write().unwrap();
if locked_state.connections.contains_key(address) {
return;
}
info!("Discovered: {}", address);
let client = RpcClient::new(self_id, address);
let (sender, receiver) = oneshot::channel();
locked_state.add_connection(address, sender | {
let (sender, receiver) = mpsc::channel(32);
let state = GossipState::new(node_id, self_address, bootstrap, sender.clone(), clock);
run_gossip_event_handler(receiver, state.new_ref(), node_id);
GossipServer { state, sender }
} | identifier_body |
gossip.rs | Gossip for GossipServer {
fn exchange(&mut self, ctx: RpcContext, req: GossipData, sink: UnarySink<GossipData>) {
ctx.spawn(
self.sender
.clone()
.send(GossipEvent::GossipReceived(req))
.map(|_| ())
.map_err(|_| error!("Failed to update gossip state")),
);
let out = self.state.get_current();
ctx.spawn(
sink.success(out)
.map_err(|err| error!("Error exhanging gossip: {:?}", err)),
);
}
}
#[derive(Eq, PartialEq)]
enum ClientEvent {
GossipTick,
Done,
}
#[derive(Clone)]
pub enum PeerStateEvent {
PeerJoined(u64),
}
impl GossipServer {
pub fn new(node_id: u64, bootstrap: &[String], self_address: &str, clock: Clock) -> Self {
let (sender, receiver) = mpsc::channel(32);
let state = GossipState::new(node_id, self_address, bootstrap, sender.clone(), clock);
run_gossip_event_handler(receiver, state.new_ref(), node_id);
GossipServer { state, sender }
}
pub fn build_service(&self) -> Service {
create_gossip(self.clone())
}
pub fn state(&self) -> GossipState {
self.state.clone()
}
pub fn update_meta_leader(&self, id: u64) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::MetaLeaderChanged(id))
}
pub fn update_node_liveness(&self, peer: PeerState) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::PeerUpdate(peer))
}
fn event(&self, event: GossipEvent) -> impl Future<Item = (), Error = ()> {
self.sender.clone().send(event).map(|_| ()).map_err(|_| ())
}
}
fn run_gossip_event_handler(
receiver: mpsc::Receiver<GossipEvent>,
state: GossipStateRef,
self_id: u64,
) {
let f = receiver.for_each(move |event| {
match event {
GossipEvent::NewPeerDiscovered(address) => {
connect_to_client(state.upgrade(), self_id, &address);
}
GossipEvent::GossipReceived(data) => {
state.upgrade().merge_gossip(data);
}
GossipEvent::MetaLeaderChanged(id) => {
state.upgrade().update_meta_leader(id);
}
GossipEvent::PeerUpdate(peer) => {
state.upgrade().update_node_liveness(&peer);
}
};
Ok(())
});
tokio::spawn(f);
}
#[derive(Clone)]
pub struct GossipState {
inner: Arc<RwLock<InnerGossipState>>,
}
#[derive(Clone)]
pub struct GossipStateRef {
inner: Weak<RwLock<InnerGossipState>>,
}
struct InnerGossipState {
clock: Clock,
current: GossipData,
connections: HashMap<String, oneshot::Sender<()>>,
clients: HashMap<String, RpcClient>,
peers: HashMap<u64, GossipData>,
event_publisher: mpsc::Sender<GossipEvent>,
event_emitter: EventEmitter<PeerStateEvent>,
}
enum GossipEvent {
GossipReceived(GossipData),
NewPeerDiscovered(String),
MetaLeaderChanged(u64),
PeerUpdate(PeerState),
}
impl GossipState {
fn new(
node_id: u64,
self_address: &str,
bootstrap: &[String],
event_publisher: mpsc::Sender<GossipEvent>,
clock: Clock,
) -> Self {
let mut current = GossipData::new();
current.set_node_id(node_id);
current.set_address(self_address.to_string());
let event_emitter = EventEmitter::new(32);
let inner = InnerGossipState {
current,
event_publisher,
event_emitter,
clock,
connections: HashMap::new(),
clients: HashMap::new(),
peers: HashMap::new(),
};
inner.publish_peer_discovered(self_address);
bootstrap
.iter()
.for_each(|address| inner.publish_peer_discovered(address));
Self {
inner: Arc::new(RwLock::new(inner)),
}
}
fn get_current(&self) -> GossipData {
let locked = self.inner.read().unwrap();
let mut gossip = locked.current.clone();
gossip.set_updated_at(locked.clock.now().into());
gossip
}
pub fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.inner.read().unwrap().get_client(node_id)
}
pub fn get_meta_leader_client(&self) -> Result<RpcClient, Error> {
let locked = self.inner.read().unwrap();
locked
.meta_leader_id()
.ok_or_else(|| err_msg("Leader not available"))
.and_then(|node_id| self.get_client(node_id))
}
fn merge_gossip(&self, gossip: GossipData) {
self.inner.write().unwrap().merge_gossip(gossip)
}
pub fn update_meta_leader(&self, node_id: u64) {
self.inner.write().unwrap().update_meta_leader(node_id)
}
fn update_node_liveness(&self, peer_state: &PeerState) {
self.inner.write().unwrap().update_node_liveness(peer_state)
}
fn new_ref(&self) -> GossipStateRef {
GossipStateRef {
inner: Arc::downgrade(&self.inner),
}
}
fn update_clock(&self, peer_sent_at: HybridTimestamp) {
self.inner
.read()
.unwrap()
.clock
.update(&peer_sent_at)
.unwrap_or_else(|err| error!("Failed to update clock: {:?}", err));
}
}
impl GossipStateRef {
fn upgrade(&self) -> GossipState {
GossipState {
inner: self.inner.upgrade().unwrap(),
}
}
}
impl InnerGossipState {
fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.peers
.get(&node_id)
.and_then(|gossip| self.clients.get(gossip.get_address()))
.cloned()
.ok_or_else(|| format_err!("Not connected to '{}'", node_id))
}
fn add_connection(&mut self, addr: &str, sender: oneshot::Sender<()>, client: RpcClient) {
self.connections.insert(addr.to_string(), sender);
self.clients.insert(addr.to_string(), client);
}
fn merge_gossip(&mut self, gossip: GossipData) {
let peer_id = gossip.get_node_id();
let current_addrs = self.current.mut_peer_addresses();
if current_addrs.get(&peer_id).is_none() |
gossip
.get_node_liveness()
.values()
.for_each(|peer| self.update_node_liveness(peer));
gossip
.get_peer_addresses()
.iter()
.filter(|(id, _)| !self.peers.contains_key(id))
.for_each(|(_, address)| self.publish_peer_discovered(address));
self.peers.insert(peer_id, gossip);
}
fn publish_event(&self, event: GossipEvent) {
let f = self.event_publisher.clone().send(event);
tokio::spawn(f.map(|_| ()).map_err(|_| ()));
}
fn publish_peer_discovered(&self, address: &str) {
self.publish_event(GossipEvent::NewPeerDiscovered(address.to_string()));
}
fn update_meta_leader(&mut self, node_id: u64) {
self.current.set_meta_leader_id(node_id);
}
fn meta_leader_id(&self) -> Option<u64> {
if self.current.meta_leader_id != 0 {
return Some(self.current.meta_leader_id);
}
self.peers
.values()
.filter(|peer| peer.meta_leader_id != 0)
.max_by_key(|peer| -> HybridTimestamp { peer.get_updated_at().into() })
.map(|peer| peer.meta_leader_id)
}
fn update_node_liveness(&mut self, peer: &PeerState) {
let peer_id = peer.get_peer().id;
if self.current.get_node_liveness().get(&peer_id).is_none() {
self.emit_new_live_node(peer_id)
}
self.current
.mut_node_liveness()
.insert(peer_id, peer.clone());
}
fn emit_new_live_node(&self, peer_id: u64) {
self.event_emitter.emit(PeerStateEvent::PeerJoined(peer_id))
}
}
struct ClientContext {
state: GossipStateRef,
client: RpcClient,
}
fn connect_to_client(state: GossipState, self_id: u64, address: &str) {
let mut locked_state = state.inner.write().unwrap();
if locked_state.connections.contains_key(address) {
return;
}
info!("Discovered: {}", address);
let client = RpcClient::new(self_id, address);
let (sender, receiver) = oneshot::channel();
locked_state.add_connection(address, sender | {
let address = gossip.get_address();
current_addrs.insert(peer_id, address.to_string());
self.publish_peer_discovered(address);
} | conditional_block |
gossip.rs | Gossip for GossipServer {
fn exchange(&mut self, ctx: RpcContext, req: GossipData, sink: UnarySink<GossipData>) {
ctx.spawn(
self.sender
.clone()
.send(GossipEvent::GossipReceived(req))
.map(|_| ())
.map_err(|_| error!("Failed to update gossip state")),
);
let out = self.state.get_current();
ctx.spawn(
sink.success(out)
.map_err(|err| error!("Error exhanging gossip: {:?}", err)),
);
}
}
#[derive(Eq, PartialEq)]
enum ClientEvent {
GossipTick,
Done,
}
#[derive(Clone)]
pub enum PeerStateEvent {
PeerJoined(u64),
}
impl GossipServer {
pub fn new(node_id: u64, bootstrap: &[String], self_address: &str, clock: Clock) -> Self {
let (sender, receiver) = mpsc::channel(32);
let state = GossipState::new(node_id, self_address, bootstrap, sender.clone(), clock);
run_gossip_event_handler(receiver, state.new_ref(), node_id);
GossipServer { state, sender }
}
pub fn build_service(&self) -> Service {
create_gossip(self.clone())
}
pub fn state(&self) -> GossipState {
self.state.clone()
}
pub fn update_meta_leader(&self, id: u64) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::MetaLeaderChanged(id))
}
pub fn update_node_liveness(&self, peer: PeerState) -> impl Future<Item = (), Error = ()> {
self.event(GossipEvent::PeerUpdate(peer))
}
fn event(&self, event: GossipEvent) -> impl Future<Item = (), Error = ()> {
self.sender.clone().send(event).map(|_| ()).map_err(|_| ())
}
}
fn run_gossip_event_handler(
receiver: mpsc::Receiver<GossipEvent>,
state: GossipStateRef,
self_id: u64,
) {
let f = receiver.for_each(move |event| {
match event {
GossipEvent::NewPeerDiscovered(address) => {
connect_to_client(state.upgrade(), self_id, &address);
}
GossipEvent::GossipReceived(data) => {
state.upgrade().merge_gossip(data);
}
GossipEvent::MetaLeaderChanged(id) => {
state.upgrade().update_meta_leader(id);
}
GossipEvent::PeerUpdate(peer) => {
state.upgrade().update_node_liveness(&peer);
}
};
Ok(())
});
tokio::spawn(f);
}
#[derive(Clone)]
pub struct GossipState {
inner: Arc<RwLock<InnerGossipState>>,
}
#[derive(Clone)]
pub struct GossipStateRef {
inner: Weak<RwLock<InnerGossipState>>,
}
struct InnerGossipState {
clock: Clock,
current: GossipData,
connections: HashMap<String, oneshot::Sender<()>>,
clients: HashMap<String, RpcClient>,
peers: HashMap<u64, GossipData>,
event_publisher: mpsc::Sender<GossipEvent>,
event_emitter: EventEmitter<PeerStateEvent>,
}
enum GossipEvent {
GossipReceived(GossipData),
NewPeerDiscovered(String),
MetaLeaderChanged(u64),
PeerUpdate(PeerState),
}
impl GossipState {
fn new(
node_id: u64,
self_address: &str,
bootstrap: &[String],
event_publisher: mpsc::Sender<GossipEvent>,
clock: Clock,
) -> Self {
let mut current = GossipData::new();
current.set_node_id(node_id);
current.set_address(self_address.to_string());
let event_emitter = EventEmitter::new(32);
let inner = InnerGossipState {
current,
event_publisher, | peers: HashMap::new(),
};
inner.publish_peer_discovered(self_address);
bootstrap
.iter()
.for_each(|address| inner.publish_peer_discovered(address));
Self {
inner: Arc::new(RwLock::new(inner)),
}
}
fn get_current(&self) -> GossipData {
let locked = self.inner.read().unwrap();
let mut gossip = locked.current.clone();
gossip.set_updated_at(locked.clock.now().into());
gossip
}
pub fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.inner.read().unwrap().get_client(node_id)
}
pub fn get_meta_leader_client(&self) -> Result<RpcClient, Error> {
let locked = self.inner.read().unwrap();
locked
.meta_leader_id()
.ok_or_else(|| err_msg("Leader not available"))
.and_then(|node_id| self.get_client(node_id))
}
fn merge_gossip(&self, gossip: GossipData) {
self.inner.write().unwrap().merge_gossip(gossip)
}
pub fn update_meta_leader(&self, node_id: u64) {
self.inner.write().unwrap().update_meta_leader(node_id)
}
fn update_node_liveness(&self, peer_state: &PeerState) {
self.inner.write().unwrap().update_node_liveness(peer_state)
}
fn new_ref(&self) -> GossipStateRef {
GossipStateRef {
inner: Arc::downgrade(&self.inner),
}
}
fn update_clock(&self, peer_sent_at: HybridTimestamp) {
self.inner
.read()
.unwrap()
.clock
.update(&peer_sent_at)
.unwrap_or_else(|err| error!("Failed to update clock: {:?}", err));
}
}
impl GossipStateRef {
fn upgrade(&self) -> GossipState {
GossipState {
inner: self.inner.upgrade().unwrap(),
}
}
}
impl InnerGossipState {
fn get_client(&self, node_id: u64) -> Result<RpcClient, Error> {
self.peers
.get(&node_id)
.and_then(|gossip| self.clients.get(gossip.get_address()))
.cloned()
.ok_or_else(|| format_err!("Not connected to '{}'", node_id))
}
fn add_connection(&mut self, addr: &str, sender: oneshot::Sender<()>, client: RpcClient) {
self.connections.insert(addr.to_string(), sender);
self.clients.insert(addr.to_string(), client);
}
fn merge_gossip(&mut self, gossip: GossipData) {
let peer_id = gossip.get_node_id();
let current_addrs = self.current.mut_peer_addresses();
if current_addrs.get(&peer_id).is_none() {
let address = gossip.get_address();
current_addrs.insert(peer_id, address.to_string());
self.publish_peer_discovered(address);
}
gossip
.get_node_liveness()
.values()
.for_each(|peer| self.update_node_liveness(peer));
gossip
.get_peer_addresses()
.iter()
.filter(|(id, _)| !self.peers.contains_key(id))
.for_each(|(_, address)| self.publish_peer_discovered(address));
self.peers.insert(peer_id, gossip);
}
fn publish_event(&self, event: GossipEvent) {
let f = self.event_publisher.clone().send(event);
tokio::spawn(f.map(|_| ()).map_err(|_| ()));
}
fn publish_peer_discovered(&self, address: &str) {
self.publish_event(GossipEvent::NewPeerDiscovered(address.to_string()));
}
fn update_meta_leader(&mut self, node_id: u64) {
self.current.set_meta_leader_id(node_id);
}
fn meta_leader_id(&self) -> Option<u64> {
if self.current.meta_leader_id != 0 {
return Some(self.current.meta_leader_id);
}
self.peers
.values()
.filter(|peer| peer.meta_leader_id != 0)
.max_by_key(|peer| -> HybridTimestamp { peer.get_updated_at().into() })
.map(|peer| peer.meta_leader_id)
}
fn update_node_liveness(&mut self, peer: &PeerState) {
let peer_id = peer.get_peer().id;
if self.current.get_node_liveness().get(&peer_id).is_none() {
self.emit_new_live_node(peer_id)
}
self.current
.mut_node_liveness()
.insert(peer_id, peer.clone());
}
fn emit_new_live_node(&self, peer_id: u64) {
self.event_emitter.emit(PeerStateEvent::PeerJoined(peer_id))
}
}
struct ClientContext {
state: GossipStateRef,
client: RpcClient,
}
fn connect_to_client(state: GossipState, self_id: u64, address: &str) {
let mut locked_state = state.inner.write().unwrap();
if locked_state.connections.contains_key(address) {
return;
}
info!("Discovered: {}", address);
let client = RpcClient::new(self_id, address);
let (sender, receiver) = oneshot::channel();
locked_state.add_connection(address, sender, client | event_emitter,
clock,
connections: HashMap::new(),
clients: HashMap::new(), | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.