file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
server.go | /*
Copyright 2015 Cesanta Software Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"encoding/base64"
"encoding/json"
"fmt"
"math/rand"
"net"
"net/http"
"regexp"
"sort"
"strings"
"time"
"github.com/casbin/casbin/v2"
"github.com/cesanta/glog"
"github.com/docker/distribution/registry/auth/token"
"github.com/cesanta/docker_auth/auth_server/api"
"github.com/cesanta/docker_auth/auth_server/authn"
"github.com/cesanta/docker_auth/auth_server/authz"
)
var (
hostPortRegex = regexp.MustCompile(`^(?:\[(.+)\]:\d+|([^:]+):\d+)$`)
scopeRegex = regexp.MustCompile(`([a-z0-9]+)(\([a-z0-9]+\))?`)
)
type AuthServer struct {
config *Config
authenticators []api.Authenticator
authorizers []api.Authorizer
ga *authn.GoogleAuth
gha *authn.GitHubAuth
oidc *authn.OIDCAuth
glab *authn.GitlabAuth
}
func NewAuthServer(c *Config) (*AuthServer, error) {
as := &AuthServer{
config: c,
authorizers: []api.Authorizer{},
}
if c.ACL != nil {
staticAuthorizer, err := authz.NewACLAuthorizer(c.ACL)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, staticAuthorizer)
}
if c.ACLMongo != nil {
mongoAuthorizer, err := authz.NewACLMongoAuthorizer(c.ACLMongo)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, mongoAuthorizer)
}
if c.ACLXorm != nil {
xormAuthorizer, err := authz.NewACLXormAuthz(c.ACLXorm)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, xormAuthorizer)
}
if c.ExtAuthz != nil {
extAuthorizer := authz.NewExtAuthzAuthorizer(c.ExtAuthz)
as.authorizers = append(as.authorizers, extAuthorizer)
}
if c.Users != nil {
as.authenticators = append(as.authenticators, authn.NewStaticUserAuth(c.Users))
}
if c.ExtAuth != nil {
as.authenticators = append(as.authenticators, authn.NewExtAuth(c.ExtAuth))
}
if c.GoogleAuth != nil {
ga, err := authn.NewGoogleAuth(c.GoogleAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, ga)
as.ga = ga
}
if c.GitHubAuth != nil {
gha, err := authn.NewGitHubAuth(c.GitHubAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, gha)
as.gha = gha
}
if c.OIDCAuth != nil {
oidc, err := authn.NewOIDCAuth(c.OIDCAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, oidc)
as.oidc = oidc
}
if c.GitlabAuth != nil {
glab, err := authn.NewGitlabAuth(c.GitlabAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, glab)
as.glab = glab
}
if c.LDAPAuth != nil {
la, err := authn.NewLDAPAuth(c.LDAPAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, la)
}
if c.MongoAuth != nil {
ma, err := authn.NewMongoAuth(c.MongoAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, ma)
}
if c.XormAuthn != nil {
xa, err := authn.NewXormAuth(c.XormAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, xa)
}
if c.PluginAuthn != nil {
pluginAuthn, err := authn.NewPluginAuthn(c.PluginAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, pluginAuthn)
}
if c.PluginAuthz != nil {
pluginAuthz, err := authz.NewPluginAuthzAuthorizer(c.PluginAuthz)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, pluginAuthz)
}
if c.CasbinAuthz != nil {
enforcer, err := casbin.NewEnforcer(c.CasbinAuthz.ModelFilePath, c.CasbinAuthz.PolicyFilePath)
if err != nil {
return nil, err
}
casbinAuthz, err := authz.NewCasbinAuthorizer(enforcer)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, casbinAuthz)
}
return as, nil
}
type authRequest struct {
RemoteConnAddr string
RemoteAddr string
RemoteIP net.IP
User string
Password api.PasswordString
Account string
Service string
Scopes []authScope
Labels api.Labels
}
type authScope struct {
Type string
Class string
Name string
Actions []string
}
type authzResult struct {
scope authScope
autorizedActions []string
}
func (ar authRequest) String() string {
return fmt.Sprintf("{%s:%s@%s %s}", ar.User, ar.Password, ar.RemoteAddr, ar.Scopes)
}
func parseRemoteAddr(ra string) net.IP {
hp := hostPortRegex.FindStringSubmatch(ra)
if hp != nil {
if hp[1] != "" {
ra = hp[1]
} else if hp[2] != "" {
ra = hp[2]
}
}
res := net.ParseIP(ra)
return res
}
func parseScope(scope string) (string, string, error) {
parts := scopeRegex.FindStringSubmatch(scope)
if parts == nil {
return "", "", fmt.Errorf("malformed scope request")
}
switch len(parts) {
case 3:
return parts[1], "", nil
case 4:
return parts[1], parts[3], nil
default:
return "", "", fmt.Errorf("malformed scope request")
}
}
func (as *AuthServer) ParseRequest(req *http.Request) (*authRequest, error) {
ar := &authRequest{RemoteConnAddr: req.RemoteAddr, RemoteAddr: req.RemoteAddr}
if as.config.Server.RealIPHeader != "" {
hv := req.Header.Get(as.config.Server.RealIPHeader)
ips := strings.Split(hv, ",")
realIPPos := as.config.Server.RealIPPos
if realIPPos < 0 {
realIPPos = len(ips) + realIPPos
if realIPPos < 0 {
realIPPos = 0
}
}
ar.RemoteAddr = strings.TrimSpace(ips[realIPPos])
glog.V(3).Infof("Conn ip %s, %s: %s, addr: %s", ar.RemoteAddr, as.config.Server.RealIPHeader, hv, ar.RemoteAddr)
if ar.RemoteAddr == "" {
return nil, fmt.Errorf("client address not provided")
}
}
ar.RemoteIP = parseRemoteAddr(ar.RemoteAddr)
if ar.RemoteIP == nil {
return nil, fmt.Errorf("unable to parse remote addr %s", ar.RemoteAddr)
}
user, password, haveBasicAuth := req.BasicAuth()
if haveBasicAuth {
ar.User = user
ar.Password = api.PasswordString(password)
} else if req.Method == "POST" {
// username and password could be part of form data
username := req.FormValue("username")
password := req.FormValue("password")
if username != "" && password != "" {
ar.User = username
ar.Password = api.PasswordString(password)
}
}
ar.Account = req.FormValue("account")
if ar.Account == "" {
ar.Account = ar.User
} else if haveBasicAuth && ar.Account != ar.User {
return nil, fmt.Errorf("user and account are not the same (%q vs %q)", ar.User, ar.Account)
}
ar.Service = req.FormValue("service")
if err := req.ParseForm(); err != nil {
return nil, fmt.Errorf("invalid form value")
}
// https://github.com/docker/distribution/blob/1b9ab303a477ded9bdd3fc97e9119fa8f9e58fca/docs/spec/auth/scope.md#resource-scope-grammar
if req.FormValue("scope") != "" {
for _, scopeValue := range req.Form["scope"] {
for _, scopeStr := range strings.Split(scopeValue, " ") {
parts := strings.Split(scopeStr, ":")
var scope authScope
scopeType, scopeClass, err := parseScope(parts[0])
if err != nil {
return nil, err
}
switch len(parts) {
case 3:
scope = authScope{
Type: scopeType,
Class: scopeClass,
Name: parts[1],
Actions: strings.Split(parts[2], ","),
}
case 4:
scope = authScope{
Type: scopeType,
Class: scopeClass,
Name: parts[1] + ":" + parts[2],
Actions: strings.Split(parts[3], ","),
}
default:
return nil, fmt.Errorf("invalid scope: %q", scopeStr)
}
sort.Strings(scope.Actions)
ar.Scopes = append(ar.Scopes, scope)
}
}
}
return ar, nil
}
func (as *AuthServer) Authenticate(ar *authRequest) (bool, api.Labels, error) {
for i, a := range as.authenticators {
result, labels, err := a.Authenticate(ar.Account, ar.Password)
glog.V(2).Infof("Authn %s %s -> %t, %+v, %v", a.Name(), ar.Account, result, labels, err)
if err != nil |
return result, labels, nil
}
// Deny by default.
glog.Warningf("%s did not match any authn rule", ar)
return false, nil, nil
}
func (as *AuthServer) authorizeScope(ai *api.AuthRequestInfo) ([]string, error) {
for i, a := range as.authorizers {
result, err := a.Authorize(ai)
glog.V(2).Infof("Authz %s %s -> %s, %s", a.Name(), *ai, result, err)
if err != nil {
if err == api.NoMatch {
continue
}
err = fmt.Errorf("authz #%d returned error: %s", i+1, err)
glog.Errorf("%s: %s", *ai, err)
return nil, err
}
return result, nil
}
// Deny by default.
glog.Warningf("%s did not match any authz rule", *ai)
return nil, nil
}
func (as *AuthServer) Authorize(ar *authRequest) ([]authzResult, error) {
ares := []authzResult{}
for _, scope := range ar.Scopes {
ai := &api.AuthRequestInfo{
Account: ar.Account,
Type: scope.Type,
Name: scope.Name,
Service: ar.Service,
IP: ar.RemoteIP,
Actions: scope.Actions,
Labels: ar.Labels,
}
actions, err := as.authorizeScope(ai)
if err != nil {
return nil, err
}
ares = append(ares, authzResult{scope: scope, autorizedActions: actions})
}
return ares, nil
}
// https://github.com/docker/distribution/blob/master/docs/spec/auth/token.md#example
func (as *AuthServer) CreateToken(ar *authRequest, ares []authzResult) (string, error) {
now := time.Now().Unix()
tc := &as.config.Token
// Sign something dummy to find out which algorithm is used.
_, sigAlg, err := tc.privateKey.Sign(strings.NewReader("dummy"), 0)
if err != nil {
return "", fmt.Errorf("failed to sign: %s", err)
}
header := token.Header{
Type: "JWT",
SigningAlg: sigAlg,
KeyID: tc.publicKey.KeyID(),
}
headerJSON, err := json.Marshal(header)
if err != nil {
return "", fmt.Errorf("failed to marshal header: %s", err)
}
claims := token.ClaimSet{
Issuer: tc.Issuer,
Subject: ar.Account,
Audience: ar.Service,
NotBefore: now - 10,
IssuedAt: now,
Expiration: now + tc.Expiration,
JWTID: fmt.Sprintf("%d", rand.Int63()),
Access: []*token.ResourceActions{},
}
for _, a := range ares {
ra := &token.ResourceActions{
Type: a.scope.Type,
Name: a.scope.Name,
Actions: a.autorizedActions,
}
if ra.Actions == nil {
ra.Actions = []string{}
}
sort.Strings(ra.Actions)
claims.Access = append(claims.Access, ra)
}
claimsJSON, err := json.Marshal(claims)
if err != nil {
return "", fmt.Errorf("failed to marshal claims: %s", err)
}
payload := fmt.Sprintf("%s%s%s", joseBase64UrlEncode(headerJSON), token.TokenSeparator, joseBase64UrlEncode(claimsJSON))
sig, sigAlg2, err := tc.privateKey.Sign(strings.NewReader(payload), 0)
if err != nil || sigAlg2 != sigAlg {
return "", fmt.Errorf("failed to sign token: %s", err)
}
glog.Infof("New token for %s %+v: %s", *ar, ar.Labels, claimsJSON)
return fmt.Sprintf("%s%s%s", payload, token.TokenSeparator, joseBase64UrlEncode(sig)), nil
}
func (as *AuthServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
glog.V(3).Infof("Request: %+v", req)
path_prefix := as.config.Server.PathPrefix
if as.config.Server.HSTS {
rw.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains")
}
switch {
case req.URL.Path == path_prefix+"/":
as.doIndex(rw, req)
case req.URL.Path == path_prefix+"/auth":
as.doAuth(rw, req)
case req.URL.Path == path_prefix+"/google_auth" && as.ga != nil:
as.ga.DoGoogleAuth(rw, req)
case req.URL.Path == path_prefix+"/github_auth" && as.gha != nil:
as.gha.DoGitHubAuth(rw, req)
case req.URL.Path == path_prefix+"/oidc_auth" && as.oidc != nil:
as.oidc.DoOIDCAuth(rw, req)
case req.URL.Path == path_prefix+"/gitlab_auth" && as.glab != nil:
as.glab.DoGitlabAuth(rw, req)
default:
http.Error(rw, "Not found", http.StatusNotFound)
return
}
}
// https://developers.google.com/identity/sign-in/web/server-side-flow
func (as *AuthServer) doIndex(rw http.ResponseWriter, req *http.Request) {
switch {
case as.ga != nil:
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
fmt.Fprintf(rw, "<h1>%s</h1>\n", as.config.Token.Issuer)
fmt.Fprint(rw, `<p><a href="/google_auth">Login with Google account</a></p>`)
case as.gha != nil:
url := as.config.Server.PathPrefix + "/github_auth"
http.Redirect(rw, req, url, 301)
case as.oidc != nil:
url := as.config.Server.PathPrefix + "/oidc_auth"
http.Redirect(rw, req, url, 301)
case as.glab != nil:
url := as.config.Server.PathPrefix + "/gitlab_auth"
http.Redirect(rw, req, url, 301)
default:
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
fmt.Fprintf(rw, "<h1>%s</h1>\n", as.config.Token.Issuer)
}
}
func (as *AuthServer) doAuth(rw http.ResponseWriter, req *http.Request) {
ar, err := as.ParseRequest(req)
ares := []authzResult{}
if err != nil {
glog.Warningf("Bad request: %s", err)
http.Error(rw, fmt.Sprintf("Bad request: %s", err), http.StatusBadRequest)
return
}
glog.V(2).Infof("Auth request: %+v", ar)
{
authnResult, labels, err := as.Authenticate(ar)
if err != nil {
http.Error(rw, fmt.Sprintf("Authentication failed (%s)", err), http.StatusInternalServerError)
return
}
if !authnResult {
glog.Warningf("Auth failed: %s", *ar)
rw.Header()["WWW-Authenticate"] = []string{fmt.Sprintf(`Basic realm="%s"`, as.config.Token.Issuer)}
http.Error(rw, "Auth failed.", http.StatusUnauthorized)
return
}
ar.Labels = labels
}
if len(ar.Scopes) > 0 {
ares, err = as.Authorize(ar)
if err != nil {
http.Error(rw, fmt.Sprintf("Authorization failed (%s)", err), http.StatusInternalServerError)
return
}
} else {
// Authentication-only request ("docker login"), pass through.
}
token, err := as.CreateToken(ar, ares)
if err != nil {
msg := fmt.Sprintf("Failed to generate token %s", err)
http.Error(rw, msg, http.StatusInternalServerError)
glog.Errorf("%s: %s", ar, msg)
return
}
// https://www.oauth.com/oauth2-servers/access-tokens/access-token-response/
// describes that the response should have the token in `access_token`
// https://docs.docker.com/registry/spec/auth/token/#token-response-fields
// the token should also be in `token` to support older clients
result, _ := json.Marshal(&map[string]string{"access_token": token, "token": token})
glog.V(3).Infof("%s", result)
rw.Header().Set("Content-Type", "application/json")
rw.Write(result)
}
func (as *AuthServer) Stop() {
for _, an := range as.authenticators {
an.Stop()
}
for _, az := range as.authorizers {
az.Stop()
}
glog.Infof("Server stopped")
}
// Copy-pasted from libtrust where it is private.
func joseBase64UrlEncode(b []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
| {
if err == api.NoMatch {
continue
} else if err == api.WrongPass {
glog.Warningf("Failed authentication with %s: %s", err, ar.Account)
return false, nil, nil
}
err = fmt.Errorf("authn #%d returned error: %s", i+1, err)
glog.Errorf("%s: %s", ar, err)
return false, nil, err
} | conditional_block |
server.go | /*
Copyright 2015 Cesanta Software Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"encoding/base64"
"encoding/json"
"fmt"
"math/rand"
"net"
"net/http"
"regexp"
"sort"
"strings"
"time"
"github.com/casbin/casbin/v2"
"github.com/cesanta/glog"
"github.com/docker/distribution/registry/auth/token"
"github.com/cesanta/docker_auth/auth_server/api"
"github.com/cesanta/docker_auth/auth_server/authn"
"github.com/cesanta/docker_auth/auth_server/authz"
)
var (
hostPortRegex = regexp.MustCompile(`^(?:\[(.+)\]:\d+|([^:]+):\d+)$`)
scopeRegex = regexp.MustCompile(`([a-z0-9]+)(\([a-z0-9]+\))?`)
)
type AuthServer struct {
config *Config
authenticators []api.Authenticator
authorizers []api.Authorizer
ga *authn.GoogleAuth
gha *authn.GitHubAuth
oidc *authn.OIDCAuth
glab *authn.GitlabAuth
}
func NewAuthServer(c *Config) (*AuthServer, error) {
as := &AuthServer{
config: c,
authorizers: []api.Authorizer{},
}
if c.ACL != nil {
staticAuthorizer, err := authz.NewACLAuthorizer(c.ACL)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, staticAuthorizer)
}
if c.ACLMongo != nil {
mongoAuthorizer, err := authz.NewACLMongoAuthorizer(c.ACLMongo)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, mongoAuthorizer)
}
if c.ACLXorm != nil {
xormAuthorizer, err := authz.NewACLXormAuthz(c.ACLXorm)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, xormAuthorizer)
}
if c.ExtAuthz != nil {
extAuthorizer := authz.NewExtAuthzAuthorizer(c.ExtAuthz)
as.authorizers = append(as.authorizers, extAuthorizer)
}
if c.Users != nil {
as.authenticators = append(as.authenticators, authn.NewStaticUserAuth(c.Users))
}
if c.ExtAuth != nil {
as.authenticators = append(as.authenticators, authn.NewExtAuth(c.ExtAuth))
}
if c.GoogleAuth != nil {
ga, err := authn.NewGoogleAuth(c.GoogleAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, ga)
as.ga = ga
}
if c.GitHubAuth != nil {
gha, err := authn.NewGitHubAuth(c.GitHubAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, gha)
as.gha = gha
}
if c.OIDCAuth != nil {
oidc, err := authn.NewOIDCAuth(c.OIDCAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, oidc)
as.oidc = oidc
}
if c.GitlabAuth != nil {
glab, err := authn.NewGitlabAuth(c.GitlabAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, glab)
as.glab = glab
}
if c.LDAPAuth != nil {
la, err := authn.NewLDAPAuth(c.LDAPAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, la)
}
if c.MongoAuth != nil {
ma, err := authn.NewMongoAuth(c.MongoAuth)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, ma)
}
if c.XormAuthn != nil {
xa, err := authn.NewXormAuth(c.XormAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, xa)
}
if c.PluginAuthn != nil {
pluginAuthn, err := authn.NewPluginAuthn(c.PluginAuthn)
if err != nil {
return nil, err
}
as.authenticators = append(as.authenticators, pluginAuthn)
}
if c.PluginAuthz != nil {
pluginAuthz, err := authz.NewPluginAuthzAuthorizer(c.PluginAuthz)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, pluginAuthz)
}
if c.CasbinAuthz != nil {
enforcer, err := casbin.NewEnforcer(c.CasbinAuthz.ModelFilePath, c.CasbinAuthz.PolicyFilePath)
if err != nil {
return nil, err
}
casbinAuthz, err := authz.NewCasbinAuthorizer(enforcer)
if err != nil {
return nil, err
}
as.authorizers = append(as.authorizers, casbinAuthz)
}
return as, nil
}
type authRequest struct {
RemoteConnAddr string
RemoteAddr string
RemoteIP net.IP
User string
Password api.PasswordString
Account string
Service string
Scopes []authScope
Labels api.Labels
}
type authScope struct {
Type string
Class string
Name string
Actions []string
}
type authzResult struct {
scope authScope
autorizedActions []string
}
func (ar authRequest) String() string {
return fmt.Sprintf("{%s:%s@%s %s}", ar.User, ar.Password, ar.RemoteAddr, ar.Scopes)
}
func parseRemoteAddr(ra string) net.IP {
hp := hostPortRegex.FindStringSubmatch(ra)
if hp != nil {
if hp[1] != "" {
ra = hp[1]
} else if hp[2] != "" {
ra = hp[2]
}
}
res := net.ParseIP(ra)
return res
}
func parseScope(scope string) (string, string, error) {
parts := scopeRegex.FindStringSubmatch(scope)
if parts == nil {
return "", "", fmt.Errorf("malformed scope request")
}
switch len(parts) {
case 3:
return parts[1], "", nil
case 4:
return parts[1], parts[3], nil
default:
return "", "", fmt.Errorf("malformed scope request")
}
}
func (as *AuthServer) ParseRequest(req *http.Request) (*authRequest, error) {
ar := &authRequest{RemoteConnAddr: req.RemoteAddr, RemoteAddr: req.RemoteAddr}
if as.config.Server.RealIPHeader != "" {
hv := req.Header.Get(as.config.Server.RealIPHeader)
ips := strings.Split(hv, ",")
realIPPos := as.config.Server.RealIPPos
if realIPPos < 0 {
realIPPos = len(ips) + realIPPos
if realIPPos < 0 {
realIPPos = 0
}
}
ar.RemoteAddr = strings.TrimSpace(ips[realIPPos])
glog.V(3).Infof("Conn ip %s, %s: %s, addr: %s", ar.RemoteAddr, as.config.Server.RealIPHeader, hv, ar.RemoteAddr)
if ar.RemoteAddr == "" {
return nil, fmt.Errorf("client address not provided")
}
}
ar.RemoteIP = parseRemoteAddr(ar.RemoteAddr)
if ar.RemoteIP == nil {
return nil, fmt.Errorf("unable to parse remote addr %s", ar.RemoteAddr)
}
user, password, haveBasicAuth := req.BasicAuth()
if haveBasicAuth {
ar.User = user
ar.Password = api.PasswordString(password)
} else if req.Method == "POST" {
// username and password could be part of form data
username := req.FormValue("username")
password := req.FormValue("password")
if username != "" && password != "" {
ar.User = username
ar.Password = api.PasswordString(password)
}
}
ar.Account = req.FormValue("account")
if ar.Account == "" {
ar.Account = ar.User
} else if haveBasicAuth && ar.Account != ar.User {
return nil, fmt.Errorf("user and account are not the same (%q vs %q)", ar.User, ar.Account)
}
ar.Service = req.FormValue("service")
if err := req.ParseForm(); err != nil {
return nil, fmt.Errorf("invalid form value")
}
// https://github.com/docker/distribution/blob/1b9ab303a477ded9bdd3fc97e9119fa8f9e58fca/docs/spec/auth/scope.md#resource-scope-grammar
if req.FormValue("scope") != "" {
for _, scopeValue := range req.Form["scope"] {
for _, scopeStr := range strings.Split(scopeValue, " ") {
parts := strings.Split(scopeStr, ":")
var scope authScope
scopeType, scopeClass, err := parseScope(parts[0])
if err != nil {
return nil, err
}
switch len(parts) {
case 3:
scope = authScope{
Type: scopeType,
Class: scopeClass,
Name: parts[1],
Actions: strings.Split(parts[2], ","),
}
case 4:
scope = authScope{
Type: scopeType,
Class: scopeClass,
Name: parts[1] + ":" + parts[2],
Actions: strings.Split(parts[3], ","),
}
default:
return nil, fmt.Errorf("invalid scope: %q", scopeStr)
}
sort.Strings(scope.Actions)
ar.Scopes = append(ar.Scopes, scope)
}
}
}
return ar, nil
}
func (as *AuthServer) Authenticate(ar *authRequest) (bool, api.Labels, error) {
for i, a := range as.authenticators {
result, labels, err := a.Authenticate(ar.Account, ar.Password)
glog.V(2).Infof("Authn %s %s -> %t, %+v, %v", a.Name(), ar.Account, result, labels, err)
if err != nil {
if err == api.NoMatch {
continue
} else if err == api.WrongPass {
glog.Warningf("Failed authentication with %s: %s", err, ar.Account)
return false, nil, nil
}
err = fmt.Errorf("authn #%d returned error: %s", i+1, err)
glog.Errorf("%s: %s", ar, err)
return false, nil, err
}
return result, labels, nil
}
// Deny by default.
glog.Warningf("%s did not match any authn rule", ar)
return false, nil, nil
}
func (as *AuthServer) authorizeScope(ai *api.AuthRequestInfo) ([]string, error) {
for i, a := range as.authorizers {
result, err := a.Authorize(ai)
glog.V(2).Infof("Authz %s %s -> %s, %s", a.Name(), *ai, result, err)
if err != nil {
if err == api.NoMatch {
continue
}
err = fmt.Errorf("authz #%d returned error: %s", i+1, err)
glog.Errorf("%s: %s", *ai, err)
return nil, err
}
return result, nil
}
// Deny by default.
glog.Warningf("%s did not match any authz rule", *ai)
return nil, nil
}
func (as *AuthServer) | (ar *authRequest) ([]authzResult, error) {
ares := []authzResult{}
for _, scope := range ar.Scopes {
ai := &api.AuthRequestInfo{
Account: ar.Account,
Type: scope.Type,
Name: scope.Name,
Service: ar.Service,
IP: ar.RemoteIP,
Actions: scope.Actions,
Labels: ar.Labels,
}
actions, err := as.authorizeScope(ai)
if err != nil {
return nil, err
}
ares = append(ares, authzResult{scope: scope, autorizedActions: actions})
}
return ares, nil
}
// https://github.com/docker/distribution/blob/master/docs/spec/auth/token.md#example
func (as *AuthServer) CreateToken(ar *authRequest, ares []authzResult) (string, error) {
now := time.Now().Unix()
tc := &as.config.Token
// Sign something dummy to find out which algorithm is used.
_, sigAlg, err := tc.privateKey.Sign(strings.NewReader("dummy"), 0)
if err != nil {
return "", fmt.Errorf("failed to sign: %s", err)
}
header := token.Header{
Type: "JWT",
SigningAlg: sigAlg,
KeyID: tc.publicKey.KeyID(),
}
headerJSON, err := json.Marshal(header)
if err != nil {
return "", fmt.Errorf("failed to marshal header: %s", err)
}
claims := token.ClaimSet{
Issuer: tc.Issuer,
Subject: ar.Account,
Audience: ar.Service,
NotBefore: now - 10,
IssuedAt: now,
Expiration: now + tc.Expiration,
JWTID: fmt.Sprintf("%d", rand.Int63()),
Access: []*token.ResourceActions{},
}
for _, a := range ares {
ra := &token.ResourceActions{
Type: a.scope.Type,
Name: a.scope.Name,
Actions: a.autorizedActions,
}
if ra.Actions == nil {
ra.Actions = []string{}
}
sort.Strings(ra.Actions)
claims.Access = append(claims.Access, ra)
}
claimsJSON, err := json.Marshal(claims)
if err != nil {
return "", fmt.Errorf("failed to marshal claims: %s", err)
}
payload := fmt.Sprintf("%s%s%s", joseBase64UrlEncode(headerJSON), token.TokenSeparator, joseBase64UrlEncode(claimsJSON))
sig, sigAlg2, err := tc.privateKey.Sign(strings.NewReader(payload), 0)
if err != nil || sigAlg2 != sigAlg {
return "", fmt.Errorf("failed to sign token: %s", err)
}
glog.Infof("New token for %s %+v: %s", *ar, ar.Labels, claimsJSON)
return fmt.Sprintf("%s%s%s", payload, token.TokenSeparator, joseBase64UrlEncode(sig)), nil
}
func (as *AuthServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
glog.V(3).Infof("Request: %+v", req)
path_prefix := as.config.Server.PathPrefix
if as.config.Server.HSTS {
rw.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains")
}
switch {
case req.URL.Path == path_prefix+"/":
as.doIndex(rw, req)
case req.URL.Path == path_prefix+"/auth":
as.doAuth(rw, req)
case req.URL.Path == path_prefix+"/google_auth" && as.ga != nil:
as.ga.DoGoogleAuth(rw, req)
case req.URL.Path == path_prefix+"/github_auth" && as.gha != nil:
as.gha.DoGitHubAuth(rw, req)
case req.URL.Path == path_prefix+"/oidc_auth" && as.oidc != nil:
as.oidc.DoOIDCAuth(rw, req)
case req.URL.Path == path_prefix+"/gitlab_auth" && as.glab != nil:
as.glab.DoGitlabAuth(rw, req)
default:
http.Error(rw, "Not found", http.StatusNotFound)
return
}
}
// https://developers.google.com/identity/sign-in/web/server-side-flow
func (as *AuthServer) doIndex(rw http.ResponseWriter, req *http.Request) {
switch {
case as.ga != nil:
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
fmt.Fprintf(rw, "<h1>%s</h1>\n", as.config.Token.Issuer)
fmt.Fprint(rw, `<p><a href="/google_auth">Login with Google account</a></p>`)
case as.gha != nil:
url := as.config.Server.PathPrefix + "/github_auth"
http.Redirect(rw, req, url, 301)
case as.oidc != nil:
url := as.config.Server.PathPrefix + "/oidc_auth"
http.Redirect(rw, req, url, 301)
case as.glab != nil:
url := as.config.Server.PathPrefix + "/gitlab_auth"
http.Redirect(rw, req, url, 301)
default:
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
fmt.Fprintf(rw, "<h1>%s</h1>\n", as.config.Token.Issuer)
}
}
func (as *AuthServer) doAuth(rw http.ResponseWriter, req *http.Request) {
ar, err := as.ParseRequest(req)
ares := []authzResult{}
if err != nil {
glog.Warningf("Bad request: %s", err)
http.Error(rw, fmt.Sprintf("Bad request: %s", err), http.StatusBadRequest)
return
}
glog.V(2).Infof("Auth request: %+v", ar)
{
authnResult, labels, err := as.Authenticate(ar)
if err != nil {
http.Error(rw, fmt.Sprintf("Authentication failed (%s)", err), http.StatusInternalServerError)
return
}
if !authnResult {
glog.Warningf("Auth failed: %s", *ar)
rw.Header()["WWW-Authenticate"] = []string{fmt.Sprintf(`Basic realm="%s"`, as.config.Token.Issuer)}
http.Error(rw, "Auth failed.", http.StatusUnauthorized)
return
}
ar.Labels = labels
}
if len(ar.Scopes) > 0 {
ares, err = as.Authorize(ar)
if err != nil {
http.Error(rw, fmt.Sprintf("Authorization failed (%s)", err), http.StatusInternalServerError)
return
}
} else {
// Authentication-only request ("docker login"), pass through.
}
token, err := as.CreateToken(ar, ares)
if err != nil {
msg := fmt.Sprintf("Failed to generate token %s", err)
http.Error(rw, msg, http.StatusInternalServerError)
glog.Errorf("%s: %s", ar, msg)
return
}
// https://www.oauth.com/oauth2-servers/access-tokens/access-token-response/
// describes that the response should have the token in `access_token`
// https://docs.docker.com/registry/spec/auth/token/#token-response-fields
// the token should also be in `token` to support older clients
result, _ := json.Marshal(&map[string]string{"access_token": token, "token": token})
glog.V(3).Infof("%s", result)
rw.Header().Set("Content-Type", "application/json")
rw.Write(result)
}
func (as *AuthServer) Stop() {
for _, an := range as.authenticators {
an.Stop()
}
for _, az := range as.authorizers {
az.Stop()
}
glog.Infof("Server stopped")
}
// Copy-pasted from libtrust where it is private.
func joseBase64UrlEncode(b []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
| Authorize | identifier_name |
main.rs | extern crate argparse;
extern crate chrono;
extern crate colored;
extern crate rand;
extern crate serde;
extern crate serde_json;
extern crate time;
#[macro_use]
extern crate serde_derive;
use argparse::{ArgumentParser, Store, StoreOption};
use rand::Rng;
use std::fs::File;
use std::fs::OpenOptions;
use std::io::BufRead;
use std::io::BufReader;
use std::path::PathBuf;
use std::str::FromStr;
use time::Duration;
use std::env;
use std::io::Read;
use std::io::Result as IOResult;
use std::io::Write;
use colored::*;
use chrono::prelude::*;
enum VagueTime {
Tomorrow,
Today,
Evening,
NextWeek,
Day(u8),
}
impl FromStr for VagueTime {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
use VagueTime::*;
match s {
"tomorrow" => Ok(Tomorrow),
"today" => Ok(Today),
"tonight" => Ok(Today),
"evening" => Ok(Evening),
"week" => Ok(NextWeek),
"next week" => Ok(NextWeek),
d => Ok(match u8::from_str(d) {
Ok(x) => Day(x),
Err(_e) => {
panic!("I don't understand the date you asked for!");
}
}),
}
}
}
impl VagueTime {
fn concretise(&self) -> DateTime<Local> {
use VagueTime::*;
let t0 = Local::now();
match self {
Tomorrow => t0 + Duration::days(1),
Today => Local::today().and_hms(23, 30, 0),
Evening => Local::today().and_hms(23, 00, 0),
NextWeek => t0 + Duration::days(7),
Day(d) => Local::today()
.with_day(u32::from(*d))
.unwrap()
.and_hms(15, 00, 0),
}
}
}
enum Command {
List,
Add,
Started,
Resolve,
Remove,
None,
}
impl FromStr for Command {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
use Command::*;
Ok(match s {
"" => List,
"list" => List,
"add" => Add,
"start" => Started,
"resolve" => Resolve,
"remove" => Remove,
_ => None,
})
}
}
fn main() {
let mut command = Command::List;
let mut arg = "".to_owned();
let mut deadline: Option<VagueTime> = None;
let mut priority: u8 = 0;
{
let mut ap = ArgumentParser::new();
ap.set_description(
"Something to help me organise\nSupports commands:
list\n - add \"Text of task\"\n - start taskname
- \nresolve taskname\n - remove taskname
Supports setting deadlines which can be of the form
tommorow, today, tonight, evening, nextweek, or a day of this month as a single
number",
);
ap.refer(&mut command)
.add_argument("command", Store, "Command to run");
ap.refer(&mut arg)
.add_argument("arg", Store, "arg for command");
ap.refer(&mut deadline)
.add_option(&["-d", "--deadline"], StoreOption, "Deadline of task");
ap.refer(&mut priority)
.add_option(&["-p", "--priority"], Store, "Priority of task");
ap.parse_args_or_exit();
}
match command {
Command::List => {
do_list();
}
Command::Add => {
do_add(arg, priority, &deadline);
}
Command::Started => {
do_set_progress(&arg, Status::Started);
}
Command::Resolve => {
do_set_progress(&arg, Status::Resolved);
}
Command::Remove => {
do_remove(&arg);
}
_ => {
println!("Unrecognised argument, try todo --help");
}
}
}
#[derive(Serialize, Deserialize)]
struct Data {
entries: Vec<Entry>,
last_updated: DateTime<Local>,
}
impl Data {
fn new() -> Self {
Data {
entries: Vec::new(),
last_updated: Local::now(),
}
}
fn add_entry(&mut self, entry: Entry) {
self.entries.push(entry);
}
fn find_entry<'t>(&'t self, id: &str) -> Option<&'t Entry> {
for x in &self.entries {
if x.id == id {
return Some(x);
}
}
None
}
fn find_entry_mut<'t>(&'t mut self, id: &str) -> Option<&'t mut Entry> {
for x in &mut self.entries {
if x.id == id {
return Some(x);
}
}
None
}
fn remove_by_id(&mut self, id: &str) {
self.entries.retain(|x| x.id != id);
}
fn print(&self) {
if self.entries.is_empty() {
println!("Nothing todo, woooooo!");
}
for entry in &self.entries {
println!("{}", entry.format());
}
}
}
#[derive(Serialize, Deserialize)]
enum Status {
NotStarted,
Started,
Resolved,
}
impl Status {
fn is_urgent(&self) -> bool {
use Status::*;
match self {
NotStarted => true,
Started => true,
Resolved => false,
}
}
fn to_colored(&self, urgent: &bool) -> ColoredString {
use Status::*;
match self {
NotStarted => {
let base = "Not Started";
if *urgent {
base.red()
} else {
base.dimmed()
}
}
Started => {
let base = "Started";
if *urgent {
base.red()
} else {
base.yellow()
}
}
Resolved => "Resolved".green(),
}
}
}
#[derive(Serialize, Deserialize)]
struct Entry {
id: String,
task: String,
deadline: Option<DateTime<Local>>,
status: Status,
priority: u8,
}
impl Entry {
fn new(id: String, task: String, priority: u8, mb_deadline: Option<DateTime<Local>>) -> Self {
Entry {
id,
task,
deadline: mb_deadline,
priority,
status: Status::NotStarted,
}
}
fn format(&self) -> String {
let deadline_urgent = match self.deadline {
Some(x) => x.date() <= Local::now().date(),
_ => false,
};
let status_urgent = self.status.is_urgent();
let urgent = deadline_urgent && status_urgent;
let deadline_str = match self.deadline {
Some(deadline) => {
let str = format!("{}", deadline.format("\n\t Deadline: %d-%m %H:%M")).to_owned();
if urgent {
str.red()
} else {
str.dimmed()
}
}
None => "".to_owned().dimmed(),
};
let priority_str = if self.priority > 0 {
format!("Priority: {}", self.priority).to_owned()
} else {
"".to_owned()
};
let status_str = self.status.to_colored(&urgent);
| "Task: {} {} | {} | {} {}",
self.id,
priority_str,
self.task.bold(),
status_str,
deadline_str
)
}
}
const DATA_FOLDER: &str = ".todo.d";
const DATA_FILENAME: &str = "data.json";
const NOUNS_FILENAME: &str = "nouns.txt";
fn data_folder() -> PathBuf {
match env::home_dir() {
Some(mut p) => {
p.push(DATA_FOLDER);
p
}
None => {
panic!("Couldn't find your home folder, setup will require some manual hacking");
}
}
}
fn data_path() -> PathBuf {
let mut p = data_folder();
p.push(DATA_FILENAME);
p
}
fn nouns_path() -> PathBuf {
let mut p = data_folder();
p.push(NOUNS_FILENAME);
p
}
fn load_data() -> IOResult<Data> {
let filename = data_path();
let mut file = File::open(filename)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
Ok(match serde_json::from_str(&contents) {
Ok(d) => d,
Err(e) => {
panic!("Error, corrupted data!\n{}", e);
}
})
}
fn load_data_catch() -> Data {
load_data().unwrap_or_else(|_| Data::new())
}
fn load_nouns() -> IOResult<Vec<String>> {
let filename = nouns_path();
let f = File::open(filename)?;
let f = BufReader::new(f);
f.lines().collect()
}
fn save_data(data: &Data) -> Result<(), serde_json::Error> {
let j = serde_json::to_string(data)?;
let filename = data_path();
// TODO merge two result error types
let mut file = OpenOptions::new()
.write(true) // Overwrite whole file when writing
.create(true)
.truncate(true) // Remove any previous stuff
.open(filename).unwrap();
file.write_all(j.as_ref()).unwrap();
Ok(())
}
fn do_list() {
let data = load_data_catch();
data.print();
}
fn pick_name(data: &Data) -> String {
// TODO error handle
let nouns = load_nouns().unwrap();
let mut noun;
// We know this will probably terminate
// stop worrying guys
#[allow(while_immutable_condition)]
while {
noun = rand::thread_rng().choose(&nouns).unwrap();
// Repeat until we find one not already used
data.find_entry(noun).is_some()
} {}
noun.to_owned()
}
fn do_add(task: String, priority: u8, deadline_vague: &Option<VagueTime>) {
let mut data = load_data_catch();
let id = pick_name(&data);
println!("Adding {} - '{}'", id, task);
let deadline = deadline_vague.as_ref().map(|x| x.concretise());
let new_entry = Entry::new(id, task, priority, deadline);
data.add_entry(new_entry);
data.print();
save_data(&data).unwrap();
}
fn do_set_progress(id: &str, progress: Status) {
let mut data = load_data_catch();
println!("Resolving '{}'", id);
{
// Scope for mutable borrow
match data.find_entry_mut(id) {
Some(entry) => {
entry.status = progress;
}
None => {
println!("Could not find '{}' to update, exiting..", id);
return;
}
}
}
data.print();
save_data(&data).unwrap();
}
fn do_remove(id: &str) {
let mut data = load_data_catch();
println!("Removing '{}'", id);
data.remove_by_id(id);
data.print();
save_data(&data).unwrap();
} | format!( | random_line_split |
main.rs | extern crate argparse;
extern crate chrono;
extern crate colored;
extern crate rand;
extern crate serde;
extern crate serde_json;
extern crate time;
#[macro_use]
extern crate serde_derive;
use argparse::{ArgumentParser, Store, StoreOption};
use rand::Rng;
use std::fs::File;
use std::fs::OpenOptions;
use std::io::BufRead;
use std::io::BufReader;
use std::path::PathBuf;
use std::str::FromStr;
use time::Duration;
use std::env;
use std::io::Read;
use std::io::Result as IOResult;
use std::io::Write;
use colored::*;
use chrono::prelude::*;
enum VagueTime {
Tomorrow,
Today,
Evening,
NextWeek,
Day(u8),
}
impl FromStr for VagueTime {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
use VagueTime::*;
match s {
"tomorrow" => Ok(Tomorrow),
"today" => Ok(Today),
"tonight" => Ok(Today),
"evening" => Ok(Evening),
"week" => Ok(NextWeek),
"next week" => Ok(NextWeek),
d => Ok(match u8::from_str(d) {
Ok(x) => Day(x),
Err(_e) => {
panic!("I don't understand the date you asked for!");
}
}),
}
}
}
impl VagueTime {
fn concretise(&self) -> DateTime<Local> {
use VagueTime::*;
let t0 = Local::now();
match self {
Tomorrow => t0 + Duration::days(1),
Today => Local::today().and_hms(23, 30, 0),
Evening => Local::today().and_hms(23, 00, 0),
NextWeek => t0 + Duration::days(7),
Day(d) => Local::today()
.with_day(u32::from(*d))
.unwrap()
.and_hms(15, 00, 0),
}
}
}
enum Command {
List,
Add,
Started,
Resolve,
Remove,
None,
}
impl FromStr for Command {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
use Command::*;
Ok(match s {
"" => List,
"list" => List,
"add" => Add,
"start" => Started,
"resolve" => Resolve,
"remove" => Remove,
_ => None,
})
}
}
fn main() {
let mut command = Command::List;
let mut arg = "".to_owned();
let mut deadline: Option<VagueTime> = None;
let mut priority: u8 = 0;
{
let mut ap = ArgumentParser::new();
ap.set_description(
"Something to help me organise\nSupports commands:
list\n - add \"Text of task\"\n - start taskname
- \nresolve taskname\n - remove taskname
Supports setting deadlines which can be of the form
tommorow, today, tonight, evening, nextweek, or a day of this month as a single
number",
);
ap.refer(&mut command)
.add_argument("command", Store, "Command to run");
ap.refer(&mut arg)
.add_argument("arg", Store, "arg for command");
ap.refer(&mut deadline)
.add_option(&["-d", "--deadline"], StoreOption, "Deadline of task");
ap.refer(&mut priority)
.add_option(&["-p", "--priority"], Store, "Priority of task");
ap.parse_args_or_exit();
}
match command {
Command::List => {
do_list();
}
Command::Add => {
do_add(arg, priority, &deadline);
}
Command::Started => {
do_set_progress(&arg, Status::Started);
}
Command::Resolve => {
do_set_progress(&arg, Status::Resolved);
}
Command::Remove => {
do_remove(&arg);
}
_ => {
println!("Unrecognised argument, try todo --help");
}
}
}
#[derive(Serialize, Deserialize)]
struct Data {
entries: Vec<Entry>,
last_updated: DateTime<Local>,
}
impl Data {
fn new() -> Self {
Data {
entries: Vec::new(),
last_updated: Local::now(),
}
}
fn add_entry(&mut self, entry: Entry) {
self.entries.push(entry);
}
fn find_entry<'t>(&'t self, id: &str) -> Option<&'t Entry> {
for x in &self.entries {
if x.id == id {
return Some(x);
}
}
None
}
fn find_entry_mut<'t>(&'t mut self, id: &str) -> Option<&'t mut Entry> {
for x in &mut self.entries {
if x.id == id {
return Some(x);
}
}
None
}
fn remove_by_id(&mut self, id: &str) {
self.entries.retain(|x| x.id != id);
}
fn print(&self) {
if self.entries.is_empty() {
println!("Nothing todo, woooooo!");
}
for entry in &self.entries {
println!("{}", entry.format());
}
}
}
#[derive(Serialize, Deserialize)]
enum Status {
NotStarted,
Started,
Resolved,
}
impl Status {
fn is_urgent(&self) -> bool {
use Status::*;
match self {
NotStarted => true,
Started => true,
Resolved => false,
}
}
fn to_colored(&self, urgent: &bool) -> ColoredString {
use Status::*;
match self {
NotStarted => {
let base = "Not Started";
if *urgent {
base.red()
} else {
base.dimmed()
}
}
Started => {
let base = "Started";
if *urgent {
base.red()
} else {
base.yellow()
}
}
Resolved => "Resolved".green(),
}
}
}
#[derive(Serialize, Deserialize)]
struct Entry {
id: String,
task: String,
deadline: Option<DateTime<Local>>,
status: Status,
priority: u8,
}
impl Entry {
fn new(id: String, task: String, priority: u8, mb_deadline: Option<DateTime<Local>>) -> Self {
Entry {
id,
task,
deadline: mb_deadline,
priority,
status: Status::NotStarted,
}
}
fn format(&self) -> String {
let deadline_urgent = match self.deadline {
Some(x) => x.date() <= Local::now().date(),
_ => false,
};
let status_urgent = self.status.is_urgent();
let urgent = deadline_urgent && status_urgent;
let deadline_str = match self.deadline {
Some(deadline) => {
let str = format!("{}", deadline.format("\n\t Deadline: %d-%m %H:%M")).to_owned();
if urgent {
str.red()
} else {
str.dimmed()
}
}
None => "".to_owned().dimmed(),
};
let priority_str = if self.priority > 0 {
format!("Priority: {}", self.priority).to_owned()
} else {
"".to_owned()
};
let status_str = self.status.to_colored(&urgent);
format!(
"Task: {} {} | {} | {} {}",
self.id,
priority_str,
self.task.bold(),
status_str,
deadline_str
)
}
}
const DATA_FOLDER: &str = ".todo.d";
const DATA_FILENAME: &str = "data.json";
const NOUNS_FILENAME: &str = "nouns.txt";
fn data_folder() -> PathBuf {
match env::home_dir() {
Some(mut p) => {
p.push(DATA_FOLDER);
p
}
None => {
panic!("Couldn't find your home folder, setup will require some manual hacking");
}
}
}
fn data_path() -> PathBuf {
let mut p = data_folder();
p.push(DATA_FILENAME);
p
}
fn nouns_path() -> PathBuf {
let mut p = data_folder();
p.push(NOUNS_FILENAME);
p
}
fn load_data() -> IOResult<Data> {
let filename = data_path();
let mut file = File::open(filename)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
Ok(match serde_json::from_str(&contents) {
Ok(d) => d,
Err(e) => {
panic!("Error, corrupted data!\n{}", e);
}
})
}
fn load_data_catch() -> Data {
load_data().unwrap_or_else(|_| Data::new())
}
fn load_nouns() -> IOResult<Vec<String>> {
let filename = nouns_path();
let f = File::open(filename)?;
let f = BufReader::new(f);
f.lines().collect()
}
fn save_data(data: &Data) -> Result<(), serde_json::Error> {
let j = serde_json::to_string(data)?;
let filename = data_path();
// TODO merge two result error types
let mut file = OpenOptions::new()
.write(true) // Overwrite whole file when writing
.create(true)
.truncate(true) // Remove any previous stuff
.open(filename).unwrap();
file.write_all(j.as_ref()).unwrap();
Ok(())
}
fn do_list() {
let data = load_data_catch();
data.print();
}
fn pick_name(data: &Data) -> String {
// TODO error handle
let nouns = load_nouns().unwrap();
let mut noun;
// We know this will probably terminate
// stop worrying guys
#[allow(while_immutable_condition)]
while {
noun = rand::thread_rng().choose(&nouns).unwrap();
// Repeat until we find one not already used
data.find_entry(noun).is_some()
} {}
noun.to_owned()
}
fn | (task: String, priority: u8, deadline_vague: &Option<VagueTime>) {
let mut data = load_data_catch();
let id = pick_name(&data);
println!("Adding {} - '{}'", id, task);
let deadline = deadline_vague.as_ref().map(|x| x.concretise());
let new_entry = Entry::new(id, task, priority, deadline);
data.add_entry(new_entry);
data.print();
save_data(&data).unwrap();
}
fn do_set_progress(id: &str, progress: Status) {
let mut data = load_data_catch();
println!("Resolving '{}'", id);
{
// Scope for mutable borrow
match data.find_entry_mut(id) {
Some(entry) => {
entry.status = progress;
}
None => {
println!("Could not find '{}' to update, exiting..", id);
return;
}
}
}
data.print();
save_data(&data).unwrap();
}
fn do_remove(id: &str) {
let mut data = load_data_catch();
println!("Removing '{}'", id);
data.remove_by_id(id);
data.print();
save_data(&data).unwrap();
}
| do_add | identifier_name |
metrics.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"crypto/sha256"
"errors"
"fmt"
"hash"
"sync"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2"
"k8s.io/utils/lru"
)
const (
namespace = "apiserver"
subsystem = "envelope_encryption"
FromStorageLabel = "from_storage"
ToStorageLabel = "to_storage"
)
type metricLabels struct {
transformationType string
providerName string
keyIDHash string
}
/*
* By default, all the following metrics are defined as falling under
* ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes)
*
* Promoting the stability level of the metric is a responsibility of the component owner, since it
* involves explicitly acknowledging support for the metric across multiple releases, in accordance with
* the metric stability policy.
*/
var (
lockLastFromStorage sync.Mutex
lockLastToStorage sync.Mutex
lockRecordKeyID sync.Mutex
lockRecordKeyIDStatus sync.Mutex
lastFromStorage time.Time
lastToStorage time.Time
keyIDHashTotalMetricLabels *lru.Cache
keyIDHashStatusLastTimestampSecondsMetricLabels *lru.Cache
cacheSize = 100
// This metric is only used for KMS v1 API.
dekCacheFillPercent = metrics.NewGauge(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dek_cache_fill_percent",
Help: "Percent of the cache slots currently occupied by cached DEKs.",
StabilityLevel: metrics.ALPHA,
},
)
// This metric is only used for KMS v1 API.
dekCacheInterArrivals = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dek_cache_inter_arrival_time_seconds",
Help: "Time (in seconds) of inter arrival of transformation requests.",
StabilityLevel: metrics.ALPHA,
Buckets: metrics.ExponentialBuckets(60, 2, 10),
},
[]string{"transformation_type"},
)
// These metrics are made public to be used by unit tests.
KMSOperationsLatencyMetric = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "kms_operations_latency_seconds",
Help: "KMS operation duration with gRPC error code status total.",
StabilityLevel: metrics.ALPHA,
// Use custom buckets to avoid the default buckets which are too small for KMS operations.
// Start 0.1ms with the last bucket being [~52s, +Inf)
Buckets: metrics.ExponentialBuckets(0.0001, 2, 20),
},
[]string{"provider_name", "method_name", "grpc_status_code"},
)
// keyIDHashTotal is the number of times a keyID is used
// e.g. apiserver_envelope_encryption_key_id_hash_total counter
// apiserver_envelope_encryption_key_id_hash_total{key_id_hash="sha256",
// provider_name="providerName",transformation_type="from_storage"} 1
KeyIDHashTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "key_id_hash_total",
Help: "Number of times a keyID is used split by transformation type and provider.",
StabilityLevel: metrics.ALPHA,
},
[]string{"transformation_type", "provider_name", "key_id_hash"},
)
// keyIDHashLastTimestampSeconds is the last time in seconds when a keyID was used
// e.g. apiserver_envelope_encryption_key_id_hash_last_timestamp_seconds{key_id_hash="sha256", provider_name="providerName",transformation_type="from_storage"} 1.674865558833728e+09
KeyIDHashLastTimestampSeconds = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "key_id_hash_last_timestamp_seconds",
Help: "The last time in seconds when a keyID was used.",
StabilityLevel: metrics.ALPHA,
},
[]string{"transformation_type", "provider_name", "key_id_hash"},
)
// keyIDHashStatusLastTimestampSeconds is the last time in seconds when a keyID was returned by the Status RPC call.
// e.g. apiserver_envelope_encryption_key_id_hash_status_last_timestamp_seconds{key_id_hash="sha256", provider_name="providerName"} 1.674865558833728e+09
KeyIDHashStatusLastTimestampSeconds = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "key_id_hash_status_last_timestamp_seconds",
Help: "The last time in seconds when a keyID was returned by the Status RPC call.",
StabilityLevel: metrics.ALPHA,
},
[]string{"provider_name", "key_id_hash"},
)
InvalidKeyIDFromStatusTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "invalid_key_id_from_status_total",
Help: "Number of times an invalid keyID is returned by the Status RPC call split by error.",
StabilityLevel: metrics.ALPHA,
},
[]string{"provider_name", "error"},
)
DekSourceCacheSize = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dek_source_cache_size",
Help: "Number of records in data encryption key (DEK) source cache. On a restart, this value is an approximation of the number of decrypt RPC calls the server will make to the KMS plugin.",
StabilityLevel: metrics.ALPHA,
},
[]string{"provider_name"},
)
)
var registerMetricsFunc sync.Once
var hashPool *sync.Pool
func registerLRUMetrics() {
if keyIDHashTotalMetricLabels != nil {
keyIDHashTotalMetricLabels.Clear()
}
if keyIDHashStatusLastTimestampSecondsMetricLabels != nil {
keyIDHashStatusLastTimestampSecondsMetricLabels.Clear()
}
keyIDHashTotalMetricLabels = lru.NewWithEvictionFunc(cacheSize, func(key lru.Key, _ interface{}) {
item := key.(metricLabels)
if deleted := KeyIDHashTotal.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash); deleted {
klog.InfoS("Deleted keyIDHashTotalMetricLabels", "transformationType", item.transformationType,
"providerName", item.providerName, "keyIDHash", item.keyIDHash)
}
if deleted := KeyIDHashLastTimestampSeconds.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash); deleted {
klog.InfoS("Deleted keyIDHashLastTimestampSecondsMetricLabels", "transformationType", item.transformationType,
"providerName", item.providerName, "keyIDHash", item.keyIDHash)
}
})
keyIDHashStatusLastTimestampSecondsMetricLabels = lru.NewWithEvictionFunc(cacheSize, func(key lru.Key, _ interface{}) {
item := key.(metricLabels)
if deleted := KeyIDHashStatusLastTimestampSeconds.DeleteLabelValues(item.providerName, item.keyIDHash); deleted {
klog.InfoS("Deleted keyIDHashStatusLastTimestampSecondsMetricLabels", "providerName", item.providerName, "keyIDHash", item.keyIDHash)
}
})
}
func RegisterMetrics() {
registerMetricsFunc.Do(func() {
registerLRUMetrics()
hashPool = &sync.Pool{
New: func() interface{} {
return sha256.New()
},
}
legacyregistry.MustRegister(dekCacheFillPercent)
legacyregistry.MustRegister(dekCacheInterArrivals)
legacyregistry.MustRegister(DekSourceCacheSize)
legacyregistry.MustRegister(KeyIDHashTotal)
legacyregistry.MustRegister(KeyIDHashLastTimestampSeconds)
legacyregistry.MustRegister(KeyIDHashStatusLastTimestampSeconds)
legacyregistry.MustRegister(InvalidKeyIDFromStatusTotal)
legacyregistry.MustRegister(KMSOperationsLatencyMetric)
})
}
// RecordKeyID records total count and last time in seconds when a KeyID was used for TransformFromStorage and TransformToStorage operations
func RecordKeyID(transformationType, providerName, keyID string) {
lockRecordKeyID.Lock()
defer lockRecordKeyID.Unlock()
keyIDHash := addLabelToCache(keyIDHashTotalMetricLabels, transformationType, providerName, keyID)
KeyIDHashTotal.WithLabelValues(transformationType, providerName, keyIDHash).Inc()
KeyIDHashLastTimestampSeconds.WithLabelValues(transformationType, providerName, keyIDHash).SetToCurrentTime()
}
// RecordKeyIDFromStatus records last time in seconds when a KeyID was returned by the Status RPC call.
func RecordKeyIDFromStatus(providerName, keyID string) {
lockRecordKeyIDStatus.Lock()
defer lockRecordKeyIDStatus.Unlock()
keyIDHash := addLabelToCache(keyIDHashStatusLastTimestampSecondsMetricLabels, "", providerName, keyID)
KeyIDHashStatusLastTimestampSeconds.WithLabelValues(providerName, keyIDHash).SetToCurrentTime()
}
func RecordInvalidKeyIDFromStatus(providerName, errCode string) {
InvalidKeyIDFromStatusTotal.WithLabelValues(providerName, errCode).Inc()
}
func RecordArrival(transformationType string, start time.Time) {
switch transformationType {
case FromStorageLabel:
lockLastFromStorage.Lock()
defer lockLastFromStorage.Unlock()
if lastFromStorage.IsZero() {
lastFromStorage = start
}
dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastFromStorage).Seconds())
lastFromStorage = start
case ToStorageLabel:
lockLastToStorage.Lock()
defer lockLastToStorage.Unlock()
if lastToStorage.IsZero() {
lastToStorage = start
}
dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastToStorage).Seconds())
lastToStorage = start
}
}
func RecordDekCacheFillPercent(percent float64) {
dekCacheFillPercent.Set(percent)
}
func RecordDekSourceCacheSize(providerName string, size int) {
DekSourceCacheSize.WithLabelValues(providerName).Set(float64(size))
}
// RecordKMSOperationLatency records the latency of KMS operation.
func RecordKMSOperationLatency(providerName, methodName string, duration time.Duration, err error) {
KMSOperationsLatencyMetric.WithLabelValues(providerName, methodName, getErrorCode(err)).Observe(duration.Seconds())
}
type gRPCError interface {
GRPCStatus() *status.Status
}
func getErrorCode(err error) string {
if err == nil |
// handle errors wrapped with fmt.Errorf and similar
var s gRPCError
if errors.As(err, &s) {
return s.GRPCStatus().Code().String()
}
// This is not gRPC error. The operation must have failed before gRPC
// method was called, otherwise we would get gRPC error.
return "unknown-non-grpc"
}
func getHash(data string) string {
h := hashPool.Get().(hash.Hash)
h.Reset()
h.Write([]byte(data))
result := fmt.Sprintf("sha256:%x", h.Sum(nil))
hashPool.Put(h)
return result
}
func addLabelToCache(c *lru.Cache, transformationType, providerName, keyID string) string {
keyIDHash := ""
// only get hash if the keyID is not empty
if len(keyID) > 0 {
keyIDHash = getHash(keyID)
}
c.Add(metricLabels{
transformationType: transformationType,
providerName: providerName,
keyIDHash: keyIDHash,
}, nil) // value is irrelevant, this is a set and not a map
return keyIDHash
}
| {
return codes.OK.String()
} | conditional_block |
metrics.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"crypto/sha256"
"errors"
"fmt"
"hash"
"sync"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2"
"k8s.io/utils/lru"
)
const (
namespace = "apiserver"
subsystem = "envelope_encryption"
FromStorageLabel = "from_storage"
ToStorageLabel = "to_storage"
)
type metricLabels struct {
transformationType string
providerName string
keyIDHash string
}
/*
* By default, all the following metrics are defined as falling under
* ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes)
*
* Promoting the stability level of the metric is a responsibility of the component owner, since it
* involves explicitly acknowledging support for the metric across multiple releases, in accordance with
* the metric stability policy.
*/
var (
lockLastFromStorage sync.Mutex
lockLastToStorage sync.Mutex
lockRecordKeyID sync.Mutex
lockRecordKeyIDStatus sync.Mutex
lastFromStorage time.Time
lastToStorage time.Time
keyIDHashTotalMetricLabels *lru.Cache
keyIDHashStatusLastTimestampSecondsMetricLabels *lru.Cache
cacheSize = 100
// This metric is only used for KMS v1 API.
dekCacheFillPercent = metrics.NewGauge(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dek_cache_fill_percent",
Help: "Percent of the cache slots currently occupied by cached DEKs.",
StabilityLevel: metrics.ALPHA,
},
)
// This metric is only used for KMS v1 API.
dekCacheInterArrivals = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dek_cache_inter_arrival_time_seconds",
Help: "Time (in seconds) of inter arrival of transformation requests.",
StabilityLevel: metrics.ALPHA,
Buckets: metrics.ExponentialBuckets(60, 2, 10),
},
[]string{"transformation_type"},
)
// These metrics are made public to be used by unit tests.
KMSOperationsLatencyMetric = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "kms_operations_latency_seconds",
Help: "KMS operation duration with gRPC error code status total.",
StabilityLevel: metrics.ALPHA,
// Use custom buckets to avoid the default buckets which are too small for KMS operations.
// Start 0.1ms with the last bucket being [~52s, +Inf)
Buckets: metrics.ExponentialBuckets(0.0001, 2, 20),
},
[]string{"provider_name", "method_name", "grpc_status_code"},
)
// keyIDHashTotal is the number of times a keyID is used
// e.g. apiserver_envelope_encryption_key_id_hash_total counter
// apiserver_envelope_encryption_key_id_hash_total{key_id_hash="sha256",
// provider_name="providerName",transformation_type="from_storage"} 1
KeyIDHashTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "key_id_hash_total",
Help: "Number of times a keyID is used split by transformation type and provider.",
StabilityLevel: metrics.ALPHA,
},
[]string{"transformation_type", "provider_name", "key_id_hash"},
)
// keyIDHashLastTimestampSeconds is the last time in seconds when a keyID was used
// e.g. apiserver_envelope_encryption_key_id_hash_last_timestamp_seconds{key_id_hash="sha256", provider_name="providerName",transformation_type="from_storage"} 1.674865558833728e+09
KeyIDHashLastTimestampSeconds = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "key_id_hash_last_timestamp_seconds",
Help: "The last time in seconds when a keyID was used.",
StabilityLevel: metrics.ALPHA,
},
[]string{"transformation_type", "provider_name", "key_id_hash"},
)
// keyIDHashStatusLastTimestampSeconds is the last time in seconds when a keyID was returned by the Status RPC call.
// e.g. apiserver_envelope_encryption_key_id_hash_status_last_timestamp_seconds{key_id_hash="sha256", provider_name="providerName"} 1.674865558833728e+09
KeyIDHashStatusLastTimestampSeconds = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "key_id_hash_status_last_timestamp_seconds",
Help: "The last time in seconds when a keyID was returned by the Status RPC call.",
StabilityLevel: metrics.ALPHA,
},
[]string{"provider_name", "key_id_hash"},
)
InvalidKeyIDFromStatusTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "invalid_key_id_from_status_total",
Help: "Number of times an invalid keyID is returned by the Status RPC call split by error.",
StabilityLevel: metrics.ALPHA,
},
[]string{"provider_name", "error"},
)
DekSourceCacheSize = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dek_source_cache_size",
Help: "Number of records in data encryption key (DEK) source cache. On a restart, this value is an approximation of the number of decrypt RPC calls the server will make to the KMS plugin.",
StabilityLevel: metrics.ALPHA,
},
[]string{"provider_name"},
)
)
var registerMetricsFunc sync.Once
var hashPool *sync.Pool
func registerLRUMetrics() {
if keyIDHashTotalMetricLabels != nil {
keyIDHashTotalMetricLabels.Clear()
}
if keyIDHashStatusLastTimestampSecondsMetricLabels != nil {
keyIDHashStatusLastTimestampSecondsMetricLabels.Clear()
}
keyIDHashTotalMetricLabels = lru.NewWithEvictionFunc(cacheSize, func(key lru.Key, _ interface{}) { | if deleted := KeyIDHashTotal.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash); deleted {
klog.InfoS("Deleted keyIDHashTotalMetricLabels", "transformationType", item.transformationType,
"providerName", item.providerName, "keyIDHash", item.keyIDHash)
}
if deleted := KeyIDHashLastTimestampSeconds.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash); deleted {
klog.InfoS("Deleted keyIDHashLastTimestampSecondsMetricLabels", "transformationType", item.transformationType,
"providerName", item.providerName, "keyIDHash", item.keyIDHash)
}
})
keyIDHashStatusLastTimestampSecondsMetricLabels = lru.NewWithEvictionFunc(cacheSize, func(key lru.Key, _ interface{}) {
item := key.(metricLabels)
if deleted := KeyIDHashStatusLastTimestampSeconds.DeleteLabelValues(item.providerName, item.keyIDHash); deleted {
klog.InfoS("Deleted keyIDHashStatusLastTimestampSecondsMetricLabels", "providerName", item.providerName, "keyIDHash", item.keyIDHash)
}
})
}
func RegisterMetrics() {
registerMetricsFunc.Do(func() {
registerLRUMetrics()
hashPool = &sync.Pool{
New: func() interface{} {
return sha256.New()
},
}
legacyregistry.MustRegister(dekCacheFillPercent)
legacyregistry.MustRegister(dekCacheInterArrivals)
legacyregistry.MustRegister(DekSourceCacheSize)
legacyregistry.MustRegister(KeyIDHashTotal)
legacyregistry.MustRegister(KeyIDHashLastTimestampSeconds)
legacyregistry.MustRegister(KeyIDHashStatusLastTimestampSeconds)
legacyregistry.MustRegister(InvalidKeyIDFromStatusTotal)
legacyregistry.MustRegister(KMSOperationsLatencyMetric)
})
}
// RecordKeyID records total count and last time in seconds when a KeyID was used for TransformFromStorage and TransformToStorage operations
func RecordKeyID(transformationType, providerName, keyID string) {
lockRecordKeyID.Lock()
defer lockRecordKeyID.Unlock()
keyIDHash := addLabelToCache(keyIDHashTotalMetricLabels, transformationType, providerName, keyID)
KeyIDHashTotal.WithLabelValues(transformationType, providerName, keyIDHash).Inc()
KeyIDHashLastTimestampSeconds.WithLabelValues(transformationType, providerName, keyIDHash).SetToCurrentTime()
}
// RecordKeyIDFromStatus records last time in seconds when a KeyID was returned by the Status RPC call.
func RecordKeyIDFromStatus(providerName, keyID string) {
lockRecordKeyIDStatus.Lock()
defer lockRecordKeyIDStatus.Unlock()
keyIDHash := addLabelToCache(keyIDHashStatusLastTimestampSecondsMetricLabels, "", providerName, keyID)
KeyIDHashStatusLastTimestampSeconds.WithLabelValues(providerName, keyIDHash).SetToCurrentTime()
}
func RecordInvalidKeyIDFromStatus(providerName, errCode string) {
InvalidKeyIDFromStatusTotal.WithLabelValues(providerName, errCode).Inc()
}
func RecordArrival(transformationType string, start time.Time) {
switch transformationType {
case FromStorageLabel:
lockLastFromStorage.Lock()
defer lockLastFromStorage.Unlock()
if lastFromStorage.IsZero() {
lastFromStorage = start
}
dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastFromStorage).Seconds())
lastFromStorage = start
case ToStorageLabel:
lockLastToStorage.Lock()
defer lockLastToStorage.Unlock()
if lastToStorage.IsZero() {
lastToStorage = start
}
dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastToStorage).Seconds())
lastToStorage = start
}
}
func RecordDekCacheFillPercent(percent float64) {
dekCacheFillPercent.Set(percent)
}
func RecordDekSourceCacheSize(providerName string, size int) {
DekSourceCacheSize.WithLabelValues(providerName).Set(float64(size))
}
// RecordKMSOperationLatency records the latency of KMS operation.
func RecordKMSOperationLatency(providerName, methodName string, duration time.Duration, err error) {
KMSOperationsLatencyMetric.WithLabelValues(providerName, methodName, getErrorCode(err)).Observe(duration.Seconds())
}
type gRPCError interface {
GRPCStatus() *status.Status
}
func getErrorCode(err error) string {
if err == nil {
return codes.OK.String()
}
// handle errors wrapped with fmt.Errorf and similar
var s gRPCError
if errors.As(err, &s) {
return s.GRPCStatus().Code().String()
}
// This is not gRPC error. The operation must have failed before gRPC
// method was called, otherwise we would get gRPC error.
return "unknown-non-grpc"
}
func getHash(data string) string {
h := hashPool.Get().(hash.Hash)
h.Reset()
h.Write([]byte(data))
result := fmt.Sprintf("sha256:%x", h.Sum(nil))
hashPool.Put(h)
return result
}
func addLabelToCache(c *lru.Cache, transformationType, providerName, keyID string) string {
keyIDHash := ""
// only get hash if the keyID is not empty
if len(keyID) > 0 {
keyIDHash = getHash(keyID)
}
c.Add(metricLabels{
transformationType: transformationType,
providerName: providerName,
keyIDHash: keyIDHash,
}, nil) // value is irrelevant, this is a set and not a map
return keyIDHash
} | item := key.(metricLabels) | random_line_split |
metrics.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"crypto/sha256"
"errors"
"fmt"
"hash"
"sync"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2"
"k8s.io/utils/lru"
)
const (
namespace = "apiserver"
subsystem = "envelope_encryption"
FromStorageLabel = "from_storage"
ToStorageLabel = "to_storage"
)
type metricLabels struct {
transformationType string
providerName string
keyIDHash string
}
/*
* By default, all the following metrics are defined as falling under
* ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes)
*
* Promoting the stability level of the metric is a responsibility of the component owner, since it
* involves explicitly acknowledging support for the metric across multiple releases, in accordance with
* the metric stability policy.
*/
var (
lockLastFromStorage sync.Mutex
lockLastToStorage sync.Mutex
lockRecordKeyID sync.Mutex
lockRecordKeyIDStatus sync.Mutex
lastFromStorage time.Time
lastToStorage time.Time
keyIDHashTotalMetricLabels *lru.Cache
keyIDHashStatusLastTimestampSecondsMetricLabels *lru.Cache
cacheSize = 100
// This metric is only used for KMS v1 API.
dekCacheFillPercent = metrics.NewGauge(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dek_cache_fill_percent",
Help: "Percent of the cache slots currently occupied by cached DEKs.",
StabilityLevel: metrics.ALPHA,
},
)
// This metric is only used for KMS v1 API.
dekCacheInterArrivals = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dek_cache_inter_arrival_time_seconds",
Help: "Time (in seconds) of inter arrival of transformation requests.",
StabilityLevel: metrics.ALPHA,
Buckets: metrics.ExponentialBuckets(60, 2, 10),
},
[]string{"transformation_type"},
)
// These metrics are made public to be used by unit tests.
KMSOperationsLatencyMetric = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "kms_operations_latency_seconds",
Help: "KMS operation duration with gRPC error code status total.",
StabilityLevel: metrics.ALPHA,
// Use custom buckets to avoid the default buckets which are too small for KMS operations.
// Start 0.1ms with the last bucket being [~52s, +Inf)
Buckets: metrics.ExponentialBuckets(0.0001, 2, 20),
},
[]string{"provider_name", "method_name", "grpc_status_code"},
)
// keyIDHashTotal is the number of times a keyID is used
// e.g. apiserver_envelope_encryption_key_id_hash_total counter
// apiserver_envelope_encryption_key_id_hash_total{key_id_hash="sha256",
// provider_name="providerName",transformation_type="from_storage"} 1
KeyIDHashTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "key_id_hash_total",
Help: "Number of times a keyID is used split by transformation type and provider.",
StabilityLevel: metrics.ALPHA,
},
[]string{"transformation_type", "provider_name", "key_id_hash"},
)
// keyIDHashLastTimestampSeconds is the last time in seconds when a keyID was used
// e.g. apiserver_envelope_encryption_key_id_hash_last_timestamp_seconds{key_id_hash="sha256", provider_name="providerName",transformation_type="from_storage"} 1.674865558833728e+09
KeyIDHashLastTimestampSeconds = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "key_id_hash_last_timestamp_seconds",
Help: "The last time in seconds when a keyID was used.",
StabilityLevel: metrics.ALPHA,
},
[]string{"transformation_type", "provider_name", "key_id_hash"},
)
// keyIDHashStatusLastTimestampSeconds is the last time in seconds when a keyID was returned by the Status RPC call.
// e.g. apiserver_envelope_encryption_key_id_hash_status_last_timestamp_seconds{key_id_hash="sha256", provider_name="providerName"} 1.674865558833728e+09
KeyIDHashStatusLastTimestampSeconds = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "key_id_hash_status_last_timestamp_seconds",
Help: "The last time in seconds when a keyID was returned by the Status RPC call.",
StabilityLevel: metrics.ALPHA,
},
[]string{"provider_name", "key_id_hash"},
)
InvalidKeyIDFromStatusTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "invalid_key_id_from_status_total",
Help: "Number of times an invalid keyID is returned by the Status RPC call split by error.",
StabilityLevel: metrics.ALPHA,
},
[]string{"provider_name", "error"},
)
DekSourceCacheSize = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dek_source_cache_size",
Help: "Number of records in data encryption key (DEK) source cache. On a restart, this value is an approximation of the number of decrypt RPC calls the server will make to the KMS plugin.",
StabilityLevel: metrics.ALPHA,
},
[]string{"provider_name"},
)
)
var registerMetricsFunc sync.Once
var hashPool *sync.Pool
func registerLRUMetrics() {
if keyIDHashTotalMetricLabels != nil {
keyIDHashTotalMetricLabels.Clear()
}
if keyIDHashStatusLastTimestampSecondsMetricLabels != nil {
keyIDHashStatusLastTimestampSecondsMetricLabels.Clear()
}
keyIDHashTotalMetricLabels = lru.NewWithEvictionFunc(cacheSize, func(key lru.Key, _ interface{}) {
item := key.(metricLabels)
if deleted := KeyIDHashTotal.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash); deleted {
klog.InfoS("Deleted keyIDHashTotalMetricLabels", "transformationType", item.transformationType,
"providerName", item.providerName, "keyIDHash", item.keyIDHash)
}
if deleted := KeyIDHashLastTimestampSeconds.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash); deleted {
klog.InfoS("Deleted keyIDHashLastTimestampSecondsMetricLabels", "transformationType", item.transformationType,
"providerName", item.providerName, "keyIDHash", item.keyIDHash)
}
})
keyIDHashStatusLastTimestampSecondsMetricLabels = lru.NewWithEvictionFunc(cacheSize, func(key lru.Key, _ interface{}) {
item := key.(metricLabels)
if deleted := KeyIDHashStatusLastTimestampSeconds.DeleteLabelValues(item.providerName, item.keyIDHash); deleted {
klog.InfoS("Deleted keyIDHashStatusLastTimestampSecondsMetricLabels", "providerName", item.providerName, "keyIDHash", item.keyIDHash)
}
})
}
func RegisterMetrics() {
registerMetricsFunc.Do(func() {
registerLRUMetrics()
hashPool = &sync.Pool{
New: func() interface{} {
return sha256.New()
},
}
legacyregistry.MustRegister(dekCacheFillPercent)
legacyregistry.MustRegister(dekCacheInterArrivals)
legacyregistry.MustRegister(DekSourceCacheSize)
legacyregistry.MustRegister(KeyIDHashTotal)
legacyregistry.MustRegister(KeyIDHashLastTimestampSeconds)
legacyregistry.MustRegister(KeyIDHashStatusLastTimestampSeconds)
legacyregistry.MustRegister(InvalidKeyIDFromStatusTotal)
legacyregistry.MustRegister(KMSOperationsLatencyMetric)
})
}
// RecordKeyID records total count and last time in seconds when a KeyID was used for TransformFromStorage and TransformToStorage operations
func RecordKeyID(transformationType, providerName, keyID string) {
lockRecordKeyID.Lock()
defer lockRecordKeyID.Unlock()
keyIDHash := addLabelToCache(keyIDHashTotalMetricLabels, transformationType, providerName, keyID)
KeyIDHashTotal.WithLabelValues(transformationType, providerName, keyIDHash).Inc()
KeyIDHashLastTimestampSeconds.WithLabelValues(transformationType, providerName, keyIDHash).SetToCurrentTime()
}
// RecordKeyIDFromStatus records last time in seconds when a KeyID was returned by the Status RPC call.
func RecordKeyIDFromStatus(providerName, keyID string) {
lockRecordKeyIDStatus.Lock()
defer lockRecordKeyIDStatus.Unlock()
keyIDHash := addLabelToCache(keyIDHashStatusLastTimestampSecondsMetricLabels, "", providerName, keyID)
KeyIDHashStatusLastTimestampSeconds.WithLabelValues(providerName, keyIDHash).SetToCurrentTime()
}
func RecordInvalidKeyIDFromStatus(providerName, errCode string) |
func RecordArrival(transformationType string, start time.Time) {
switch transformationType {
case FromStorageLabel:
lockLastFromStorage.Lock()
defer lockLastFromStorage.Unlock()
if lastFromStorage.IsZero() {
lastFromStorage = start
}
dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastFromStorage).Seconds())
lastFromStorage = start
case ToStorageLabel:
lockLastToStorage.Lock()
defer lockLastToStorage.Unlock()
if lastToStorage.IsZero() {
lastToStorage = start
}
dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastToStorage).Seconds())
lastToStorage = start
}
}
func RecordDekCacheFillPercent(percent float64) {
dekCacheFillPercent.Set(percent)
}
func RecordDekSourceCacheSize(providerName string, size int) {
DekSourceCacheSize.WithLabelValues(providerName).Set(float64(size))
}
// RecordKMSOperationLatency records the latency of KMS operation.
func RecordKMSOperationLatency(providerName, methodName string, duration time.Duration, err error) {
KMSOperationsLatencyMetric.WithLabelValues(providerName, methodName, getErrorCode(err)).Observe(duration.Seconds())
}
type gRPCError interface {
GRPCStatus() *status.Status
}
func getErrorCode(err error) string {
if err == nil {
return codes.OK.String()
}
// handle errors wrapped with fmt.Errorf and similar
var s gRPCError
if errors.As(err, &s) {
return s.GRPCStatus().Code().String()
}
// This is not gRPC error. The operation must have failed before gRPC
// method was called, otherwise we would get gRPC error.
return "unknown-non-grpc"
}
func getHash(data string) string {
h := hashPool.Get().(hash.Hash)
h.Reset()
h.Write([]byte(data))
result := fmt.Sprintf("sha256:%x", h.Sum(nil))
hashPool.Put(h)
return result
}
func addLabelToCache(c *lru.Cache, transformationType, providerName, keyID string) string {
keyIDHash := ""
// only get hash if the keyID is not empty
if len(keyID) > 0 {
keyIDHash = getHash(keyID)
}
c.Add(metricLabels{
transformationType: transformationType,
providerName: providerName,
keyIDHash: keyIDHash,
}, nil) // value is irrelevant, this is a set and not a map
return keyIDHash
}
| {
InvalidKeyIDFromStatusTotal.WithLabelValues(providerName, errCode).Inc()
} | identifier_body |
metrics.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"crypto/sha256"
"errors"
"fmt"
"hash"
"sync"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2"
"k8s.io/utils/lru"
)
const (
namespace = "apiserver"
subsystem = "envelope_encryption"
FromStorageLabel = "from_storage"
ToStorageLabel = "to_storage"
)
type metricLabels struct {
transformationType string
providerName string
keyIDHash string
}
/*
* By default, all the following metrics are defined as falling under
* ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes)
*
* Promoting the stability level of the metric is a responsibility of the component owner, since it
* involves explicitly acknowledging support for the metric across multiple releases, in accordance with
* the metric stability policy.
*/
var (
lockLastFromStorage sync.Mutex
lockLastToStorage sync.Mutex
lockRecordKeyID sync.Mutex
lockRecordKeyIDStatus sync.Mutex
lastFromStorage time.Time
lastToStorage time.Time
keyIDHashTotalMetricLabels *lru.Cache
keyIDHashStatusLastTimestampSecondsMetricLabels *lru.Cache
cacheSize = 100
// This metric is only used for KMS v1 API.
dekCacheFillPercent = metrics.NewGauge(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dek_cache_fill_percent",
Help: "Percent of the cache slots currently occupied by cached DEKs.",
StabilityLevel: metrics.ALPHA,
},
)
// This metric is only used for KMS v1 API.
dekCacheInterArrivals = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dek_cache_inter_arrival_time_seconds",
Help: "Time (in seconds) of inter arrival of transformation requests.",
StabilityLevel: metrics.ALPHA,
Buckets: metrics.ExponentialBuckets(60, 2, 10),
},
[]string{"transformation_type"},
)
// These metrics are made public to be used by unit tests.
KMSOperationsLatencyMetric = metrics.NewHistogramVec(
&metrics.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "kms_operations_latency_seconds",
Help: "KMS operation duration with gRPC error code status total.",
StabilityLevel: metrics.ALPHA,
// Use custom buckets to avoid the default buckets which are too small for KMS operations.
// Start 0.1ms with the last bucket being [~52s, +Inf)
Buckets: metrics.ExponentialBuckets(0.0001, 2, 20),
},
[]string{"provider_name", "method_name", "grpc_status_code"},
)
// keyIDHashTotal is the number of times a keyID is used
// e.g. apiserver_envelope_encryption_key_id_hash_total counter
// apiserver_envelope_encryption_key_id_hash_total{key_id_hash="sha256",
// provider_name="providerName",transformation_type="from_storage"} 1
KeyIDHashTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "key_id_hash_total",
Help: "Number of times a keyID is used split by transformation type and provider.",
StabilityLevel: metrics.ALPHA,
},
[]string{"transformation_type", "provider_name", "key_id_hash"},
)
// keyIDHashLastTimestampSeconds is the last time in seconds when a keyID was used
// e.g. apiserver_envelope_encryption_key_id_hash_last_timestamp_seconds{key_id_hash="sha256", provider_name="providerName",transformation_type="from_storage"} 1.674865558833728e+09
KeyIDHashLastTimestampSeconds = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "key_id_hash_last_timestamp_seconds",
Help: "The last time in seconds when a keyID was used.",
StabilityLevel: metrics.ALPHA,
},
[]string{"transformation_type", "provider_name", "key_id_hash"},
)
// keyIDHashStatusLastTimestampSeconds is the last time in seconds when a keyID was returned by the Status RPC call.
// e.g. apiserver_envelope_encryption_key_id_hash_status_last_timestamp_seconds{key_id_hash="sha256", provider_name="providerName"} 1.674865558833728e+09
KeyIDHashStatusLastTimestampSeconds = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "key_id_hash_status_last_timestamp_seconds",
Help: "The last time in seconds when a keyID was returned by the Status RPC call.",
StabilityLevel: metrics.ALPHA,
},
[]string{"provider_name", "key_id_hash"},
)
InvalidKeyIDFromStatusTotal = metrics.NewCounterVec(
&metrics.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "invalid_key_id_from_status_total",
Help: "Number of times an invalid keyID is returned by the Status RPC call split by error.",
StabilityLevel: metrics.ALPHA,
},
[]string{"provider_name", "error"},
)
DekSourceCacheSize = metrics.NewGaugeVec(
&metrics.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: "dek_source_cache_size",
Help: "Number of records in data encryption key (DEK) source cache. On a restart, this value is an approximation of the number of decrypt RPC calls the server will make to the KMS plugin.",
StabilityLevel: metrics.ALPHA,
},
[]string{"provider_name"},
)
)
var registerMetricsFunc sync.Once
var hashPool *sync.Pool
func registerLRUMetrics() {
if keyIDHashTotalMetricLabels != nil {
keyIDHashTotalMetricLabels.Clear()
}
if keyIDHashStatusLastTimestampSecondsMetricLabels != nil {
keyIDHashStatusLastTimestampSecondsMetricLabels.Clear()
}
keyIDHashTotalMetricLabels = lru.NewWithEvictionFunc(cacheSize, func(key lru.Key, _ interface{}) {
item := key.(metricLabels)
if deleted := KeyIDHashTotal.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash); deleted {
klog.InfoS("Deleted keyIDHashTotalMetricLabels", "transformationType", item.transformationType,
"providerName", item.providerName, "keyIDHash", item.keyIDHash)
}
if deleted := KeyIDHashLastTimestampSeconds.DeleteLabelValues(item.transformationType, item.providerName, item.keyIDHash); deleted {
klog.InfoS("Deleted keyIDHashLastTimestampSecondsMetricLabels", "transformationType", item.transformationType,
"providerName", item.providerName, "keyIDHash", item.keyIDHash)
}
})
keyIDHashStatusLastTimestampSecondsMetricLabels = lru.NewWithEvictionFunc(cacheSize, func(key lru.Key, _ interface{}) {
item := key.(metricLabels)
if deleted := KeyIDHashStatusLastTimestampSeconds.DeleteLabelValues(item.providerName, item.keyIDHash); deleted {
klog.InfoS("Deleted keyIDHashStatusLastTimestampSecondsMetricLabels", "providerName", item.providerName, "keyIDHash", item.keyIDHash)
}
})
}
func RegisterMetrics() {
registerMetricsFunc.Do(func() {
registerLRUMetrics()
hashPool = &sync.Pool{
New: func() interface{} {
return sha256.New()
},
}
legacyregistry.MustRegister(dekCacheFillPercent)
legacyregistry.MustRegister(dekCacheInterArrivals)
legacyregistry.MustRegister(DekSourceCacheSize)
legacyregistry.MustRegister(KeyIDHashTotal)
legacyregistry.MustRegister(KeyIDHashLastTimestampSeconds)
legacyregistry.MustRegister(KeyIDHashStatusLastTimestampSeconds)
legacyregistry.MustRegister(InvalidKeyIDFromStatusTotal)
legacyregistry.MustRegister(KMSOperationsLatencyMetric)
})
}
// RecordKeyID records total count and last time in seconds when a KeyID was used for TransformFromStorage and TransformToStorage operations
func RecordKeyID(transformationType, providerName, keyID string) {
lockRecordKeyID.Lock()
defer lockRecordKeyID.Unlock()
keyIDHash := addLabelToCache(keyIDHashTotalMetricLabels, transformationType, providerName, keyID)
KeyIDHashTotal.WithLabelValues(transformationType, providerName, keyIDHash).Inc()
KeyIDHashLastTimestampSeconds.WithLabelValues(transformationType, providerName, keyIDHash).SetToCurrentTime()
}
// RecordKeyIDFromStatus records last time in seconds when a KeyID was returned by the Status RPC call.
func RecordKeyIDFromStatus(providerName, keyID string) {
lockRecordKeyIDStatus.Lock()
defer lockRecordKeyIDStatus.Unlock()
keyIDHash := addLabelToCache(keyIDHashStatusLastTimestampSecondsMetricLabels, "", providerName, keyID)
KeyIDHashStatusLastTimestampSeconds.WithLabelValues(providerName, keyIDHash).SetToCurrentTime()
}
func RecordInvalidKeyIDFromStatus(providerName, errCode string) {
InvalidKeyIDFromStatusTotal.WithLabelValues(providerName, errCode).Inc()
}
func | (transformationType string, start time.Time) {
switch transformationType {
case FromStorageLabel:
lockLastFromStorage.Lock()
defer lockLastFromStorage.Unlock()
if lastFromStorage.IsZero() {
lastFromStorage = start
}
dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastFromStorage).Seconds())
lastFromStorage = start
case ToStorageLabel:
lockLastToStorage.Lock()
defer lockLastToStorage.Unlock()
if lastToStorage.IsZero() {
lastToStorage = start
}
dekCacheInterArrivals.WithLabelValues(transformationType).Observe(start.Sub(lastToStorage).Seconds())
lastToStorage = start
}
}
func RecordDekCacheFillPercent(percent float64) {
dekCacheFillPercent.Set(percent)
}
func RecordDekSourceCacheSize(providerName string, size int) {
DekSourceCacheSize.WithLabelValues(providerName).Set(float64(size))
}
// RecordKMSOperationLatency records the latency of KMS operation.
func RecordKMSOperationLatency(providerName, methodName string, duration time.Duration, err error) {
KMSOperationsLatencyMetric.WithLabelValues(providerName, methodName, getErrorCode(err)).Observe(duration.Seconds())
}
type gRPCError interface {
GRPCStatus() *status.Status
}
func getErrorCode(err error) string {
if err == nil {
return codes.OK.String()
}
// handle errors wrapped with fmt.Errorf and similar
var s gRPCError
if errors.As(err, &s) {
return s.GRPCStatus().Code().String()
}
// This is not gRPC error. The operation must have failed before gRPC
// method was called, otherwise we would get gRPC error.
return "unknown-non-grpc"
}
func getHash(data string) string {
h := hashPool.Get().(hash.Hash)
h.Reset()
h.Write([]byte(data))
result := fmt.Sprintf("sha256:%x", h.Sum(nil))
hashPool.Put(h)
return result
}
func addLabelToCache(c *lru.Cache, transformationType, providerName, keyID string) string {
keyIDHash := ""
// only get hash if the keyID is not empty
if len(keyID) > 0 {
keyIDHash = getHash(keyID)
}
c.Add(metricLabels{
transformationType: transformationType,
providerName: providerName,
keyIDHash: keyIDHash,
}, nil) // value is irrelevant, this is a set and not a map
return keyIDHash
}
| RecordArrival | identifier_name |
prepare_data_for_nlp.py | """
- Script takes input years and output years and loads the corresponding year's
(processed) visits tables.
- It then sorts the visits based on krypht.
- merges the visits table with the combined diagnosis table
- sets the target
"""
import os
import sys
import time
import gc
import pandas as pd
import numpy as np
import dask
from dask import dataframe as dd
from tqdm import tqdm
from pathlib import Path
from sklearn.model_selection import train_test_split
import warnings
warnings.simplefilter("ignore")
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from multiprocessing import cpu_count
n_cores = cpu_count()
RANDOM_STATE = 2
do_split = True
load_temp = False
target_years = [2018]
input_years = [2012, 2013, 2014, 2015, 2016, 2017]
all_years = input_years + target_years
starttime = time.time()
logging.info(f"between years {input_years[0]} to {target_years[-1]}")
assert len(target_years) > 0, "at least 1 target year needed"
assert len(input_years) > 0, "at least 1 input year needed"
VOLUME_PATH = Path("/home/ykuv/pummel_data/volume/")
df_basics = pd.concat(
[pd.read_csv(VOLUME_PATH / Path(f"basics_norm_{year}.csv")) for year in all_years]
)
df_basics.sort_values(["krypht", "ika"], ascending=False, inplace=True)
logging.info(f"basics total rows before removing duplicates is {df_basics.shape[0]}")
df_basics.drop_duplicates(["krypht", "sukup"], keep="first", inplace=True)
logging.info(f"basics total rows after removing duplicates is {df_basics.shape[0]}")
if load_temp == False:
combined_diag = pd.read_csv(VOLUME_PATH / "merged_diagnosis_grouped.csv")
logging.info("diagnosis data loaded...")
# Processing input tables
df_x = pd.read_csv(
VOLUME_PATH / Path(f"visits_norm_{input_years[0]}.csv"), parse_dates=["tupva"]
)
df_y = pd.read_csv(
VOLUME_PATH / Path(f"visits_norm_{target_years[0]}.csv"), parse_dates=["tupva"]
)
for year in target_years[1:]:
df_y = pd.concat(
[df_y, pd.read_csv(VOLUME_PATH / Path(f"visits_norm_{year}.csv"))],
parse_dates=["tupva"],
)
for year in input_years[1:]:
|
logging.info("visits data loaded...")
logging.info("dropping rows with missing krypht id...")
df_x = df_x.query("krypht != -1")
df_y = df_y.query("krypht != -1")
logging.info("merging visits with diag..."),
df_x.set_index("isoid", inplace=True)
df_y.set_index("isoid", inplace=True)
combined_diag.set_index("isoid", inplace=True)
df_x = df_x.join(combined_diag, on="isoid", how="left")
df_y = df_y.join(combined_diag, on="isoid", how="left")
logging.info("done.")
del combined_diag
gc.collect()
# extract only physical visits for target
logging.info(f"number of rows in df_y considering all visits: {df_y.shape[0]}")
df_y = df_y.query(
"palvelumuoto == 'T11' and (yhteystapa == 'R10' or yhteystapa == 'R40')"
)
logging.info(
f"number of rows in df_y considering only physical visits: {df_y.shape[0]}"
)
# sorting needed for time shift calc
logging.info("sorting df_x by krypht and tupva to get time deltas...")
df_x.sort_values(["krypht", "tupva"], inplace=True)
df_x["days_from_prev"] = df_x["tupva"] - df_x["tupva"].shift(1)
df_x["days_from_prev"] = df_x["days_from_prev"].dt.total_seconds() / 86400.0
df_y.sort_values(["krypht", "tupva"], inplace=True)
df_y["days_from_prev"] = df_y["tupva"] - df_y["tupva"].shift(1)
df_y["days_from_prev"] = df_y["days_from_prev"].dt.total_seconds() / 86400.0
logging.info("done.")
df_x.reset_index(inplace=True)
df_x.set_index("krypht", inplace=True)
df_y.reset_index(inplace=True)
df_y.set_index("krypht", inplace=True)
# convert to str
df_x["diagnosis"].fillna("<UNK>", inplace=True)
df_y["diagnosis"].fillna("<UNK>", inplace=True)
df_x["palvelumuoto"].fillna("<UNK>", inplace=True)
df_x["yhteystapa"].fillna("<UNK>", inplace=True)
df_x["diagnosis"] = df_x["diagnosis"].apply(lambda row: str(row))
df_x["days_from_prev"] = df_x["days_from_prev"].apply(lambda row: str(row))
df_x["yhteystapa"] = df_x["yhteystapa"].apply(lambda row: str(row))
df_x["palvelumuoto"] = df_x["palvelumuoto"].apply(lambda row: str(row))
df_x["tupva"] = df_x["tupva"].apply(lambda row: str(row))
df_x["isoid"] = df_x["isoid"].apply(lambda row: str(row))
df_y["diagnosis"] = df_y["diagnosis"].apply(lambda row: str(row))
df_y["days_from_prev"] = df_y["days_from_prev"].apply(lambda row: str(row))
df_x["diag_length_per_visit"] = df_x["diagnosis"].apply(
lambda row: len(row.split())
)
tqdm.pandas()
logging.info("grouping on krypht... ")
def _diag_grouper(paritition):
diag_seq = ";".join(paritition["diagnosis"].values.tolist())
return diag_seq
def _time_delta_grouper(partition):
time_delta_seq = partition["days_from_prev"].values.tolist()
time_delta_seq[0] = "0.0"
return ";".join(time_delta_seq)
def _yh_grouper(partition):
yh_seq = ";".join(partition["yhteystapa"].values.tolist())
return yh_seq
def _pal_grouper(partition):
pal_seq = ";".join(partition["palvelumuoto"].values.tolist())
return pal_seq
def _admittime_grouper(partition):
pal_seq = ";".join(partition["tupva"].values.tolist())
return pal_seq
def _isoid_grouper(partition):
pal_seq = ";".join(partition["isoid"].values.tolist())
return pal_seq
print("grouping df_x...")
grouped_max_visit_length = df_x.groupby("krypht").progress_apply(
lambda partition: max(partition["diag_length_per_visit"].values)
)
grouped_diag = df_x.groupby("krypht").progress_apply(_diag_grouper)
grouped_time_delta = df_x.groupby("krypht").progress_apply(_time_delta_grouper)
grouped_yh = df_x.groupby("krypht").progress_apply(_yh_grouper)
grouped_pal = df_x.groupby("krypht").progress_apply(_pal_grouper)
grouped_admittime = df_x.groupby("krypht").progress_apply(_admittime_grouper)
grouped_isoid = df_x.groupby("krypht").progress_apply(_isoid_grouper)
grouped_df_x = pd.DataFrame(
list(
zip(
grouped_diag.index,
grouped_diag.values,
grouped_time_delta,
grouped_max_visit_length,
grouped_yh,
grouped_pal,
grouped_admittime,
grouped_isoid,
)
),
columns=[
"krypht",
"X_seq",
"days_from_prev",
"max_num_diagnoses_per_visit",
"yh",
"pal",
"admit_time",
"isoid",
],
)
print("writing df_x_temp...")
grouped_df_x.to_csv(VOLUME_PATH / "df_x_temp.csv", index=False)
print("grouping df_y...")
grouped_diag = df_y.groupby("krypht").progress_apply(_diag_grouper)
grouped_time_delta = df_y.groupby("krypht").progress_apply(_time_delta_grouper)
grouped_df_y = pd.DataFrame(
list(zip(grouped_diag.index, grouped_diag.values, grouped_time_delta)),
columns=["krypht", "Y_seq", "Y_days_from_prev"],
)
print("writing df_y_temp...")
grouped_df_y.to_csv(VOLUME_PATH / "df_y_temp.csv", index=False)
logging.info("grouping on krypht done... ")
else:
grouped_df_x = pd.read_csv(VOLUME_PATH / "df_x_temp.csv")
grouped_df_y = pd.read_csv(VOLUME_PATH / "df_y_temp.csv")
grouped_df_x.set_index("krypht", inplace=True)
grouped_df_y.set_index("krypht", inplace=True)
df_basics.set_index("krypht", inplace=True)
grouped_df_y["target_y"] = grouped_df_y["Y_seq"].apply(lambda row: len(row.split(";")))
merged_x_y = grouped_df_x.loc[
:,
[
"X_seq",
"days_from_prev",
"max_num_diagnoses_per_visit",
"yh",
"pal",
"admit_time",
"isoid",
],
].merge(
grouped_df_y.loc[:, ["Y_seq", "Y_days_from_prev", "target_y"]],
how="left",
on="krypht",
)
merged_x_y = merged_x_y.merge(df_basics, how="left", on="krypht")
merged_x_y.fillna(
{
"target_y": 0.0, # should be 0 because it means no physical visits this year
"sukup": merged_x_y.sukup.mode(),
"ika": merged_x_y.ika.median(),
},
inplace=True,
)
def _time_delta_mean(delta_seq):
delta_seq = np.array(delta_seq.split(";")).astype(np.float)
return np.sum(delta_seq) / delta_seq.shape[0]
merged_x_y["prev_num_visits"] = merged_x_y["X_seq"].apply(
lambda row: len(row.split(";"))
)
# merged_x_y = merged_x_y.query("prev_num_visits > 2 and prev_num_visits <= 100")
merged_x_y["time_delta_mean"] = merged_x_y["days_from_prev"].apply(
lambda row: _time_delta_mean(row)
)
merged_x_y["yh"] = merged_x_y["yh"].astype(str)
merged_x_y["yh"] = merged_x_y["yh"].apply(lambda row: row.strip())
merged_x_y["pal"] = merged_x_y["pal"].astype(str)
merged_x_y["pal"] = merged_x_y["pal"].apply(lambda row: row.strip())
print(merged_x_y.head(5))
logging.info("writing to file")
#### Validation split logic
if do_split:
print("doing split...")
# Add a column to the dataset for split
train_size = 0.8
test_size = 0.1
heldout_size = 0.1
train, test = train_test_split(
merged_x_y, test_size=(test_size + heldout_size), random_state=RANDOM_STATE
)
test, heldout = train_test_split(test, test_size=0.5, random_state=RANDOM_STATE)
train, val = train_test_split(train, test_size=0.1, random_state=RANDOM_STATE)
train["split"] = "train"
test["split"] = "test"
val["split"] = "val"
heldout["split"] = "heldout"
pd.concat([train, test, val, heldout]).to_csv(
VOLUME_PATH
/ Path(f"NLPized_data_{input_years[0]}_to_{target_years[-1]}_w_split_seq.csv"),
index=True,
)
else:
merged_x_y.to_csv(
VOLUME_PATH
/ Path(f"NLPized_data_{input_years[0]}_to_{target_years[-1]}_seq.csv"),
index=True,
) # index is krypht
print(f"done in {(time.time() - starttime)/60:.4f} minutes.")
| df_x = pd.concat(
[
df_x,
pd.read_csv(
VOLUME_PATH / Path(f"visits_norm_{year}.csv"), parse_dates=["tupva"]
),
]
) | conditional_block |
prepare_data_for_nlp.py | """
- Script takes input years and output years and loads the corresponding year's
(processed) visits tables.
- It then sorts the visits based on krypht.
- merges the visits table with the combined diagnosis table
- sets the target
"""
import os
import sys
import time
import gc
import pandas as pd
import numpy as np
import dask
from dask import dataframe as dd
from tqdm import tqdm
from pathlib import Path
from sklearn.model_selection import train_test_split
import warnings
warnings.simplefilter("ignore")
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from multiprocessing import cpu_count
n_cores = cpu_count()
RANDOM_STATE = 2
do_split = True
load_temp = False
target_years = [2018]
input_years = [2012, 2013, 2014, 2015, 2016, 2017]
all_years = input_years + target_years
starttime = time.time()
logging.info(f"between years {input_years[0]} to {target_years[-1]}")
assert len(target_years) > 0, "at least 1 target year needed"
assert len(input_years) > 0, "at least 1 input year needed"
VOLUME_PATH = Path("/home/ykuv/pummel_data/volume/")
df_basics = pd.concat(
[pd.read_csv(VOLUME_PATH / Path(f"basics_norm_{year}.csv")) for year in all_years]
)
df_basics.sort_values(["krypht", "ika"], ascending=False, inplace=True)
logging.info(f"basics total rows before removing duplicates is {df_basics.shape[0]}")
df_basics.drop_duplicates(["krypht", "sukup"], keep="first", inplace=True)
logging.info(f"basics total rows after removing duplicates is {df_basics.shape[0]}")
if load_temp == False:
combined_diag = pd.read_csv(VOLUME_PATH / "merged_diagnosis_grouped.csv")
logging.info("diagnosis data loaded...")
# Processing input tables
df_x = pd.read_csv(
VOLUME_PATH / Path(f"visits_norm_{input_years[0]}.csv"), parse_dates=["tupva"]
)
df_y = pd.read_csv(
VOLUME_PATH / Path(f"visits_norm_{target_years[0]}.csv"), parse_dates=["tupva"]
)
for year in target_years[1:]:
df_y = pd.concat(
[df_y, pd.read_csv(VOLUME_PATH / Path(f"visits_norm_{year}.csv"))],
parse_dates=["tupva"],
)
for year in input_years[1:]:
df_x = pd.concat(
[
df_x,
pd.read_csv(
VOLUME_PATH / Path(f"visits_norm_{year}.csv"), parse_dates=["tupva"]
),
]
)
logging.info("visits data loaded...")
logging.info("dropping rows with missing krypht id...")
df_x = df_x.query("krypht != -1")
df_y = df_y.query("krypht != -1")
logging.info("merging visits with diag..."),
df_x.set_index("isoid", inplace=True)
df_y.set_index("isoid", inplace=True)
combined_diag.set_index("isoid", inplace=True)
df_x = df_x.join(combined_diag, on="isoid", how="left")
df_y = df_y.join(combined_diag, on="isoid", how="left")
logging.info("done.")
del combined_diag
gc.collect()
# extract only physical visits for target
logging.info(f"number of rows in df_y considering all visits: {df_y.shape[0]}")
df_y = df_y.query(
"palvelumuoto == 'T11' and (yhteystapa == 'R10' or yhteystapa == 'R40')"
)
logging.info(
f"number of rows in df_y considering only physical visits: {df_y.shape[0]}"
)
# sorting needed for time shift calc
logging.info("sorting df_x by krypht and tupva to get time deltas...")
df_x.sort_values(["krypht", "tupva"], inplace=True)
df_x["days_from_prev"] = df_x["tupva"] - df_x["tupva"].shift(1)
df_x["days_from_prev"] = df_x["days_from_prev"].dt.total_seconds() / 86400.0
df_y.sort_values(["krypht", "tupva"], inplace=True)
df_y["days_from_prev"] = df_y["tupva"] - df_y["tupva"].shift(1)
df_y["days_from_prev"] = df_y["days_from_prev"].dt.total_seconds() / 86400.0
logging.info("done.")
df_x.reset_index(inplace=True)
df_x.set_index("krypht", inplace=True)
df_y.reset_index(inplace=True)
df_y.set_index("krypht", inplace=True)
# convert to str
df_x["diagnosis"].fillna("<UNK>", inplace=True)
df_y["diagnosis"].fillna("<UNK>", inplace=True)
df_x["palvelumuoto"].fillna("<UNK>", inplace=True)
df_x["yhteystapa"].fillna("<UNK>", inplace=True)
df_x["diagnosis"] = df_x["diagnosis"].apply(lambda row: str(row))
df_x["days_from_prev"] = df_x["days_from_prev"].apply(lambda row: str(row))
df_x["yhteystapa"] = df_x["yhteystapa"].apply(lambda row: str(row))
df_x["palvelumuoto"] = df_x["palvelumuoto"].apply(lambda row: str(row))
df_x["tupva"] = df_x["tupva"].apply(lambda row: str(row))
df_x["isoid"] = df_x["isoid"].apply(lambda row: str(row))
df_y["diagnosis"] = df_y["diagnosis"].apply(lambda row: str(row))
df_y["days_from_prev"] = df_y["days_from_prev"].apply(lambda row: str(row))
df_x["diag_length_per_visit"] = df_x["diagnosis"].apply(
lambda row: len(row.split())
)
tqdm.pandas()
logging.info("grouping on krypht... ")
def _diag_grouper(paritition):
diag_seq = ";".join(paritition["diagnosis"].values.tolist())
return diag_seq
def _time_delta_grouper(partition):
time_delta_seq = partition["days_from_prev"].values.tolist()
time_delta_seq[0] = "0.0" |
def _yh_grouper(partition):
yh_seq = ";".join(partition["yhteystapa"].values.tolist())
return yh_seq
def _pal_grouper(partition):
pal_seq = ";".join(partition["palvelumuoto"].values.tolist())
return pal_seq
def _admittime_grouper(partition):
pal_seq = ";".join(partition["tupva"].values.tolist())
return pal_seq
def _isoid_grouper(partition):
pal_seq = ";".join(partition["isoid"].values.tolist())
return pal_seq
print("grouping df_x...")
grouped_max_visit_length = df_x.groupby("krypht").progress_apply(
lambda partition: max(partition["diag_length_per_visit"].values)
)
grouped_diag = df_x.groupby("krypht").progress_apply(_diag_grouper)
grouped_time_delta = df_x.groupby("krypht").progress_apply(_time_delta_grouper)
grouped_yh = df_x.groupby("krypht").progress_apply(_yh_grouper)
grouped_pal = df_x.groupby("krypht").progress_apply(_pal_grouper)
grouped_admittime = df_x.groupby("krypht").progress_apply(_admittime_grouper)
grouped_isoid = df_x.groupby("krypht").progress_apply(_isoid_grouper)
grouped_df_x = pd.DataFrame(
list(
zip(
grouped_diag.index,
grouped_diag.values,
grouped_time_delta,
grouped_max_visit_length,
grouped_yh,
grouped_pal,
grouped_admittime,
grouped_isoid,
)
),
columns=[
"krypht",
"X_seq",
"days_from_prev",
"max_num_diagnoses_per_visit",
"yh",
"pal",
"admit_time",
"isoid",
],
)
print("writing df_x_temp...")
grouped_df_x.to_csv(VOLUME_PATH / "df_x_temp.csv", index=False)
print("grouping df_y...")
grouped_diag = df_y.groupby("krypht").progress_apply(_diag_grouper)
grouped_time_delta = df_y.groupby("krypht").progress_apply(_time_delta_grouper)
grouped_df_y = pd.DataFrame(
list(zip(grouped_diag.index, grouped_diag.values, grouped_time_delta)),
columns=["krypht", "Y_seq", "Y_days_from_prev"],
)
print("writing df_y_temp...")
grouped_df_y.to_csv(VOLUME_PATH / "df_y_temp.csv", index=False)
logging.info("grouping on krypht done... ")
else:
grouped_df_x = pd.read_csv(VOLUME_PATH / "df_x_temp.csv")
grouped_df_y = pd.read_csv(VOLUME_PATH / "df_y_temp.csv")
grouped_df_x.set_index("krypht", inplace=True)
grouped_df_y.set_index("krypht", inplace=True)
df_basics.set_index("krypht", inplace=True)
grouped_df_y["target_y"] = grouped_df_y["Y_seq"].apply(lambda row: len(row.split(";")))
merged_x_y = grouped_df_x.loc[
:,
[
"X_seq",
"days_from_prev",
"max_num_diagnoses_per_visit",
"yh",
"pal",
"admit_time",
"isoid",
],
].merge(
grouped_df_y.loc[:, ["Y_seq", "Y_days_from_prev", "target_y"]],
how="left",
on="krypht",
)
merged_x_y = merged_x_y.merge(df_basics, how="left", on="krypht")
merged_x_y.fillna(
{
"target_y": 0.0, # should be 0 because it means no physical visits this year
"sukup": merged_x_y.sukup.mode(),
"ika": merged_x_y.ika.median(),
},
inplace=True,
)
def _time_delta_mean(delta_seq):
delta_seq = np.array(delta_seq.split(";")).astype(np.float)
return np.sum(delta_seq) / delta_seq.shape[0]
merged_x_y["prev_num_visits"] = merged_x_y["X_seq"].apply(
lambda row: len(row.split(";"))
)
# merged_x_y = merged_x_y.query("prev_num_visits > 2 and prev_num_visits <= 100")
merged_x_y["time_delta_mean"] = merged_x_y["days_from_prev"].apply(
lambda row: _time_delta_mean(row)
)
merged_x_y["yh"] = merged_x_y["yh"].astype(str)
merged_x_y["yh"] = merged_x_y["yh"].apply(lambda row: row.strip())
merged_x_y["pal"] = merged_x_y["pal"].astype(str)
merged_x_y["pal"] = merged_x_y["pal"].apply(lambda row: row.strip())
print(merged_x_y.head(5))
logging.info("writing to file")
#### Validation split logic
if do_split:
print("doing split...")
# Add a column to the dataset for split
train_size = 0.8
test_size = 0.1
heldout_size = 0.1
train, test = train_test_split(
merged_x_y, test_size=(test_size + heldout_size), random_state=RANDOM_STATE
)
test, heldout = train_test_split(test, test_size=0.5, random_state=RANDOM_STATE)
train, val = train_test_split(train, test_size=0.1, random_state=RANDOM_STATE)
train["split"] = "train"
test["split"] = "test"
val["split"] = "val"
heldout["split"] = "heldout"
pd.concat([train, test, val, heldout]).to_csv(
VOLUME_PATH
/ Path(f"NLPized_data_{input_years[0]}_to_{target_years[-1]}_w_split_seq.csv"),
index=True,
)
else:
merged_x_y.to_csv(
VOLUME_PATH
/ Path(f"NLPized_data_{input_years[0]}_to_{target_years[-1]}_seq.csv"),
index=True,
) # index is krypht
print(f"done in {(time.time() - starttime)/60:.4f} minutes.") | return ";".join(time_delta_seq) | random_line_split |
prepare_data_for_nlp.py | """
- Script takes input years and output years and loads the corresponding year's
(processed) visits tables.
- It then sorts the visits based on krypht.
- merges the visits table with the combined diagnosis table
- sets the target
"""
import os
import sys
import time
import gc
import pandas as pd
import numpy as np
import dask
from dask import dataframe as dd
from tqdm import tqdm
from pathlib import Path
from sklearn.model_selection import train_test_split
import warnings
warnings.simplefilter("ignore")
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from multiprocessing import cpu_count
n_cores = cpu_count()
RANDOM_STATE = 2
do_split = True
load_temp = False
target_years = [2018]
input_years = [2012, 2013, 2014, 2015, 2016, 2017]
all_years = input_years + target_years
starttime = time.time()
logging.info(f"between years {input_years[0]} to {target_years[-1]}")
assert len(target_years) > 0, "at least 1 target year needed"
assert len(input_years) > 0, "at least 1 input year needed"
VOLUME_PATH = Path("/home/ykuv/pummel_data/volume/")
df_basics = pd.concat(
[pd.read_csv(VOLUME_PATH / Path(f"basics_norm_{year}.csv")) for year in all_years]
)
df_basics.sort_values(["krypht", "ika"], ascending=False, inplace=True)
logging.info(f"basics total rows before removing duplicates is {df_basics.shape[0]}")
df_basics.drop_duplicates(["krypht", "sukup"], keep="first", inplace=True)
logging.info(f"basics total rows after removing duplicates is {df_basics.shape[0]}")
if load_temp == False:
combined_diag = pd.read_csv(VOLUME_PATH / "merged_diagnosis_grouped.csv")
logging.info("diagnosis data loaded...")
# Processing input tables
df_x = pd.read_csv(
VOLUME_PATH / Path(f"visits_norm_{input_years[0]}.csv"), parse_dates=["tupva"]
)
df_y = pd.read_csv(
VOLUME_PATH / Path(f"visits_norm_{target_years[0]}.csv"), parse_dates=["tupva"]
)
for year in target_years[1:]:
df_y = pd.concat(
[df_y, pd.read_csv(VOLUME_PATH / Path(f"visits_norm_{year}.csv"))],
parse_dates=["tupva"],
)
for year in input_years[1:]:
df_x = pd.concat(
[
df_x,
pd.read_csv(
VOLUME_PATH / Path(f"visits_norm_{year}.csv"), parse_dates=["tupva"]
),
]
)
logging.info("visits data loaded...")
logging.info("dropping rows with missing krypht id...")
df_x = df_x.query("krypht != -1")
df_y = df_y.query("krypht != -1")
logging.info("merging visits with diag..."),
df_x.set_index("isoid", inplace=True)
df_y.set_index("isoid", inplace=True)
combined_diag.set_index("isoid", inplace=True)
df_x = df_x.join(combined_diag, on="isoid", how="left")
df_y = df_y.join(combined_diag, on="isoid", how="left")
logging.info("done.")
del combined_diag
gc.collect()
# extract only physical visits for target
logging.info(f"number of rows in df_y considering all visits: {df_y.shape[0]}")
df_y = df_y.query(
"palvelumuoto == 'T11' and (yhteystapa == 'R10' or yhteystapa == 'R40')"
)
logging.info(
f"number of rows in df_y considering only physical visits: {df_y.shape[0]}"
)
# sorting needed for time shift calc
logging.info("sorting df_x by krypht and tupva to get time deltas...")
df_x.sort_values(["krypht", "tupva"], inplace=True)
df_x["days_from_prev"] = df_x["tupva"] - df_x["tupva"].shift(1)
df_x["days_from_prev"] = df_x["days_from_prev"].dt.total_seconds() / 86400.0
df_y.sort_values(["krypht", "tupva"], inplace=True)
df_y["days_from_prev"] = df_y["tupva"] - df_y["tupva"].shift(1)
df_y["days_from_prev"] = df_y["days_from_prev"].dt.total_seconds() / 86400.0
logging.info("done.")
df_x.reset_index(inplace=True)
df_x.set_index("krypht", inplace=True)
df_y.reset_index(inplace=True)
df_y.set_index("krypht", inplace=True)
# convert to str
df_x["diagnosis"].fillna("<UNK>", inplace=True)
df_y["diagnosis"].fillna("<UNK>", inplace=True)
df_x["palvelumuoto"].fillna("<UNK>", inplace=True)
df_x["yhteystapa"].fillna("<UNK>", inplace=True)
df_x["diagnosis"] = df_x["diagnosis"].apply(lambda row: str(row))
df_x["days_from_prev"] = df_x["days_from_prev"].apply(lambda row: str(row))
df_x["yhteystapa"] = df_x["yhteystapa"].apply(lambda row: str(row))
df_x["palvelumuoto"] = df_x["palvelumuoto"].apply(lambda row: str(row))
df_x["tupva"] = df_x["tupva"].apply(lambda row: str(row))
df_x["isoid"] = df_x["isoid"].apply(lambda row: str(row))
df_y["diagnosis"] = df_y["diagnosis"].apply(lambda row: str(row))
df_y["days_from_prev"] = df_y["days_from_prev"].apply(lambda row: str(row))
df_x["diag_length_per_visit"] = df_x["diagnosis"].apply(
lambda row: len(row.split())
)
tqdm.pandas()
logging.info("grouping on krypht... ")
def | (paritition):
diag_seq = ";".join(paritition["diagnosis"].values.tolist())
return diag_seq
def _time_delta_grouper(partition):
time_delta_seq = partition["days_from_prev"].values.tolist()
time_delta_seq[0] = "0.0"
return ";".join(time_delta_seq)
def _yh_grouper(partition):
yh_seq = ";".join(partition["yhteystapa"].values.tolist())
return yh_seq
def _pal_grouper(partition):
pal_seq = ";".join(partition["palvelumuoto"].values.tolist())
return pal_seq
def _admittime_grouper(partition):
pal_seq = ";".join(partition["tupva"].values.tolist())
return pal_seq
def _isoid_grouper(partition):
pal_seq = ";".join(partition["isoid"].values.tolist())
return pal_seq
print("grouping df_x...")
grouped_max_visit_length = df_x.groupby("krypht").progress_apply(
lambda partition: max(partition["diag_length_per_visit"].values)
)
grouped_diag = df_x.groupby("krypht").progress_apply(_diag_grouper)
grouped_time_delta = df_x.groupby("krypht").progress_apply(_time_delta_grouper)
grouped_yh = df_x.groupby("krypht").progress_apply(_yh_grouper)
grouped_pal = df_x.groupby("krypht").progress_apply(_pal_grouper)
grouped_admittime = df_x.groupby("krypht").progress_apply(_admittime_grouper)
grouped_isoid = df_x.groupby("krypht").progress_apply(_isoid_grouper)
grouped_df_x = pd.DataFrame(
list(
zip(
grouped_diag.index,
grouped_diag.values,
grouped_time_delta,
grouped_max_visit_length,
grouped_yh,
grouped_pal,
grouped_admittime,
grouped_isoid,
)
),
columns=[
"krypht",
"X_seq",
"days_from_prev",
"max_num_diagnoses_per_visit",
"yh",
"pal",
"admit_time",
"isoid",
],
)
print("writing df_x_temp...")
grouped_df_x.to_csv(VOLUME_PATH / "df_x_temp.csv", index=False)
print("grouping df_y...")
grouped_diag = df_y.groupby("krypht").progress_apply(_diag_grouper)
grouped_time_delta = df_y.groupby("krypht").progress_apply(_time_delta_grouper)
grouped_df_y = pd.DataFrame(
list(zip(grouped_diag.index, grouped_diag.values, grouped_time_delta)),
columns=["krypht", "Y_seq", "Y_days_from_prev"],
)
print("writing df_y_temp...")
grouped_df_y.to_csv(VOLUME_PATH / "df_y_temp.csv", index=False)
logging.info("grouping on krypht done... ")
else:
grouped_df_x = pd.read_csv(VOLUME_PATH / "df_x_temp.csv")
grouped_df_y = pd.read_csv(VOLUME_PATH / "df_y_temp.csv")
grouped_df_x.set_index("krypht", inplace=True)
grouped_df_y.set_index("krypht", inplace=True)
df_basics.set_index("krypht", inplace=True)
grouped_df_y["target_y"] = grouped_df_y["Y_seq"].apply(lambda row: len(row.split(";")))
merged_x_y = grouped_df_x.loc[
:,
[
"X_seq",
"days_from_prev",
"max_num_diagnoses_per_visit",
"yh",
"pal",
"admit_time",
"isoid",
],
].merge(
grouped_df_y.loc[:, ["Y_seq", "Y_days_from_prev", "target_y"]],
how="left",
on="krypht",
)
merged_x_y = merged_x_y.merge(df_basics, how="left", on="krypht")
merged_x_y.fillna(
{
"target_y": 0.0, # should be 0 because it means no physical visits this year
"sukup": merged_x_y.sukup.mode(),
"ika": merged_x_y.ika.median(),
},
inplace=True,
)
def _time_delta_mean(delta_seq):
delta_seq = np.array(delta_seq.split(";")).astype(np.float)
return np.sum(delta_seq) / delta_seq.shape[0]
merged_x_y["prev_num_visits"] = merged_x_y["X_seq"].apply(
lambda row: len(row.split(";"))
)
# merged_x_y = merged_x_y.query("prev_num_visits > 2 and prev_num_visits <= 100")
merged_x_y["time_delta_mean"] = merged_x_y["days_from_prev"].apply(
lambda row: _time_delta_mean(row)
)
merged_x_y["yh"] = merged_x_y["yh"].astype(str)
merged_x_y["yh"] = merged_x_y["yh"].apply(lambda row: row.strip())
merged_x_y["pal"] = merged_x_y["pal"].astype(str)
merged_x_y["pal"] = merged_x_y["pal"].apply(lambda row: row.strip())
print(merged_x_y.head(5))
logging.info("writing to file")
#### Validation split logic
if do_split:
print("doing split...")
# Add a column to the dataset for split
train_size = 0.8
test_size = 0.1
heldout_size = 0.1
train, test = train_test_split(
merged_x_y, test_size=(test_size + heldout_size), random_state=RANDOM_STATE
)
test, heldout = train_test_split(test, test_size=0.5, random_state=RANDOM_STATE)
train, val = train_test_split(train, test_size=0.1, random_state=RANDOM_STATE)
train["split"] = "train"
test["split"] = "test"
val["split"] = "val"
heldout["split"] = "heldout"
pd.concat([train, test, val, heldout]).to_csv(
VOLUME_PATH
/ Path(f"NLPized_data_{input_years[0]}_to_{target_years[-1]}_w_split_seq.csv"),
index=True,
)
else:
merged_x_y.to_csv(
VOLUME_PATH
/ Path(f"NLPized_data_{input_years[0]}_to_{target_years[-1]}_seq.csv"),
index=True,
) # index is krypht
print(f"done in {(time.time() - starttime)/60:.4f} minutes.")
| _diag_grouper | identifier_name |
prepare_data_for_nlp.py | """
- Script takes input years and output years and loads the corresponding year's
(processed) visits tables.
- It then sorts the visits based on krypht.
- merges the visits table with the combined diagnosis table
- sets the target
"""
import os
import sys
import time
import gc
import pandas as pd
import numpy as np
import dask
from dask import dataframe as dd
from tqdm import tqdm
from pathlib import Path
from sklearn.model_selection import train_test_split
import warnings
warnings.simplefilter("ignore")
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from multiprocessing import cpu_count
n_cores = cpu_count()
RANDOM_STATE = 2
do_split = True
load_temp = False
target_years = [2018]
input_years = [2012, 2013, 2014, 2015, 2016, 2017]
all_years = input_years + target_years
starttime = time.time()
logging.info(f"between years {input_years[0]} to {target_years[-1]}")
assert len(target_years) > 0, "at least 1 target year needed"
assert len(input_years) > 0, "at least 1 input year needed"
VOLUME_PATH = Path("/home/ykuv/pummel_data/volume/")
df_basics = pd.concat(
[pd.read_csv(VOLUME_PATH / Path(f"basics_norm_{year}.csv")) for year in all_years]
)
df_basics.sort_values(["krypht", "ika"], ascending=False, inplace=True)
logging.info(f"basics total rows before removing duplicates is {df_basics.shape[0]}")
df_basics.drop_duplicates(["krypht", "sukup"], keep="first", inplace=True)
logging.info(f"basics total rows after removing duplicates is {df_basics.shape[0]}")
if load_temp == False:
combined_diag = pd.read_csv(VOLUME_PATH / "merged_diagnosis_grouped.csv")
logging.info("diagnosis data loaded...")
# Processing input tables
df_x = pd.read_csv(
VOLUME_PATH / Path(f"visits_norm_{input_years[0]}.csv"), parse_dates=["tupva"]
)
df_y = pd.read_csv(
VOLUME_PATH / Path(f"visits_norm_{target_years[0]}.csv"), parse_dates=["tupva"]
)
for year in target_years[1:]:
df_y = pd.concat(
[df_y, pd.read_csv(VOLUME_PATH / Path(f"visits_norm_{year}.csv"))],
parse_dates=["tupva"],
)
for year in input_years[1:]:
df_x = pd.concat(
[
df_x,
pd.read_csv(
VOLUME_PATH / Path(f"visits_norm_{year}.csv"), parse_dates=["tupva"]
),
]
)
logging.info("visits data loaded...")
logging.info("dropping rows with missing krypht id...")
df_x = df_x.query("krypht != -1")
df_y = df_y.query("krypht != -1")
logging.info("merging visits with diag..."),
df_x.set_index("isoid", inplace=True)
df_y.set_index("isoid", inplace=True)
combined_diag.set_index("isoid", inplace=True)
df_x = df_x.join(combined_diag, on="isoid", how="left")
df_y = df_y.join(combined_diag, on="isoid", how="left")
logging.info("done.")
del combined_diag
gc.collect()
# extract only physical visits for target
logging.info(f"number of rows in df_y considering all visits: {df_y.shape[0]}")
df_y = df_y.query(
"palvelumuoto == 'T11' and (yhteystapa == 'R10' or yhteystapa == 'R40')"
)
logging.info(
f"number of rows in df_y considering only physical visits: {df_y.shape[0]}"
)
# sorting needed for time shift calc
logging.info("sorting df_x by krypht and tupva to get time deltas...")
df_x.sort_values(["krypht", "tupva"], inplace=True)
df_x["days_from_prev"] = df_x["tupva"] - df_x["tupva"].shift(1)
df_x["days_from_prev"] = df_x["days_from_prev"].dt.total_seconds() / 86400.0
df_y.sort_values(["krypht", "tupva"], inplace=True)
df_y["days_from_prev"] = df_y["tupva"] - df_y["tupva"].shift(1)
df_y["days_from_prev"] = df_y["days_from_prev"].dt.total_seconds() / 86400.0
logging.info("done.")
df_x.reset_index(inplace=True)
df_x.set_index("krypht", inplace=True)
df_y.reset_index(inplace=True)
df_y.set_index("krypht", inplace=True)
# convert to str
df_x["diagnosis"].fillna("<UNK>", inplace=True)
df_y["diagnosis"].fillna("<UNK>", inplace=True)
df_x["palvelumuoto"].fillna("<UNK>", inplace=True)
df_x["yhteystapa"].fillna("<UNK>", inplace=True)
df_x["diagnosis"] = df_x["diagnosis"].apply(lambda row: str(row))
df_x["days_from_prev"] = df_x["days_from_prev"].apply(lambda row: str(row))
df_x["yhteystapa"] = df_x["yhteystapa"].apply(lambda row: str(row))
df_x["palvelumuoto"] = df_x["palvelumuoto"].apply(lambda row: str(row))
df_x["tupva"] = df_x["tupva"].apply(lambda row: str(row))
df_x["isoid"] = df_x["isoid"].apply(lambda row: str(row))
df_y["diagnosis"] = df_y["diagnosis"].apply(lambda row: str(row))
df_y["days_from_prev"] = df_y["days_from_prev"].apply(lambda row: str(row))
df_x["diag_length_per_visit"] = df_x["diagnosis"].apply(
lambda row: len(row.split())
)
tqdm.pandas()
logging.info("grouping on krypht... ")
def _diag_grouper(paritition):
diag_seq = ";".join(paritition["diagnosis"].values.tolist())
return diag_seq
def _time_delta_grouper(partition):
|
def _yh_grouper(partition):
yh_seq = ";".join(partition["yhteystapa"].values.tolist())
return yh_seq
def _pal_grouper(partition):
pal_seq = ";".join(partition["palvelumuoto"].values.tolist())
return pal_seq
def _admittime_grouper(partition):
pal_seq = ";".join(partition["tupva"].values.tolist())
return pal_seq
def _isoid_grouper(partition):
pal_seq = ";".join(partition["isoid"].values.tolist())
return pal_seq
print("grouping df_x...")
grouped_max_visit_length = df_x.groupby("krypht").progress_apply(
lambda partition: max(partition["diag_length_per_visit"].values)
)
grouped_diag = df_x.groupby("krypht").progress_apply(_diag_grouper)
grouped_time_delta = df_x.groupby("krypht").progress_apply(_time_delta_grouper)
grouped_yh = df_x.groupby("krypht").progress_apply(_yh_grouper)
grouped_pal = df_x.groupby("krypht").progress_apply(_pal_grouper)
grouped_admittime = df_x.groupby("krypht").progress_apply(_admittime_grouper)
grouped_isoid = df_x.groupby("krypht").progress_apply(_isoid_grouper)
grouped_df_x = pd.DataFrame(
list(
zip(
grouped_diag.index,
grouped_diag.values,
grouped_time_delta,
grouped_max_visit_length,
grouped_yh,
grouped_pal,
grouped_admittime,
grouped_isoid,
)
),
columns=[
"krypht",
"X_seq",
"days_from_prev",
"max_num_diagnoses_per_visit",
"yh",
"pal",
"admit_time",
"isoid",
],
)
print("writing df_x_temp...")
grouped_df_x.to_csv(VOLUME_PATH / "df_x_temp.csv", index=False)
print("grouping df_y...")
grouped_diag = df_y.groupby("krypht").progress_apply(_diag_grouper)
grouped_time_delta = df_y.groupby("krypht").progress_apply(_time_delta_grouper)
grouped_df_y = pd.DataFrame(
list(zip(grouped_diag.index, grouped_diag.values, grouped_time_delta)),
columns=["krypht", "Y_seq", "Y_days_from_prev"],
)
print("writing df_y_temp...")
grouped_df_y.to_csv(VOLUME_PATH / "df_y_temp.csv", index=False)
logging.info("grouping on krypht done... ")
else:
grouped_df_x = pd.read_csv(VOLUME_PATH / "df_x_temp.csv")
grouped_df_y = pd.read_csv(VOLUME_PATH / "df_y_temp.csv")
grouped_df_x.set_index("krypht", inplace=True)
grouped_df_y.set_index("krypht", inplace=True)
df_basics.set_index("krypht", inplace=True)
grouped_df_y["target_y"] = grouped_df_y["Y_seq"].apply(lambda row: len(row.split(";")))
merged_x_y = grouped_df_x.loc[
:,
[
"X_seq",
"days_from_prev",
"max_num_diagnoses_per_visit",
"yh",
"pal",
"admit_time",
"isoid",
],
].merge(
grouped_df_y.loc[:, ["Y_seq", "Y_days_from_prev", "target_y"]],
how="left",
on="krypht",
)
merged_x_y = merged_x_y.merge(df_basics, how="left", on="krypht")
merged_x_y.fillna(
{
"target_y": 0.0, # should be 0 because it means no physical visits this year
"sukup": merged_x_y.sukup.mode(),
"ika": merged_x_y.ika.median(),
},
inplace=True,
)
def _time_delta_mean(delta_seq):
delta_seq = np.array(delta_seq.split(";")).astype(np.float)
return np.sum(delta_seq) / delta_seq.shape[0]
merged_x_y["prev_num_visits"] = merged_x_y["X_seq"].apply(
lambda row: len(row.split(";"))
)
# merged_x_y = merged_x_y.query("prev_num_visits > 2 and prev_num_visits <= 100")
merged_x_y["time_delta_mean"] = merged_x_y["days_from_prev"].apply(
lambda row: _time_delta_mean(row)
)
merged_x_y["yh"] = merged_x_y["yh"].astype(str)
merged_x_y["yh"] = merged_x_y["yh"].apply(lambda row: row.strip())
merged_x_y["pal"] = merged_x_y["pal"].astype(str)
merged_x_y["pal"] = merged_x_y["pal"].apply(lambda row: row.strip())
print(merged_x_y.head(5))
logging.info("writing to file")
#### Validation split logic
if do_split:
print("doing split...")
# Add a column to the dataset for split
train_size = 0.8
test_size = 0.1
heldout_size = 0.1
train, test = train_test_split(
merged_x_y, test_size=(test_size + heldout_size), random_state=RANDOM_STATE
)
test, heldout = train_test_split(test, test_size=0.5, random_state=RANDOM_STATE)
train, val = train_test_split(train, test_size=0.1, random_state=RANDOM_STATE)
train["split"] = "train"
test["split"] = "test"
val["split"] = "val"
heldout["split"] = "heldout"
pd.concat([train, test, val, heldout]).to_csv(
VOLUME_PATH
/ Path(f"NLPized_data_{input_years[0]}_to_{target_years[-1]}_w_split_seq.csv"),
index=True,
)
else:
merged_x_y.to_csv(
VOLUME_PATH
/ Path(f"NLPized_data_{input_years[0]}_to_{target_years[-1]}_seq.csv"),
index=True,
) # index is krypht
print(f"done in {(time.time() - starttime)/60:.4f} minutes.")
| time_delta_seq = partition["days_from_prev"].values.tolist()
time_delta_seq[0] = "0.0"
return ";".join(time_delta_seq) | identifier_body |
bootstrap.go | /*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package bootstrap provides the functionality to initialize certain aspects
// of an xDS client by reading a bootstrap file.
package bootstrap
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/google"
"google.golang.org/grpc/credentials/tls/certprovider"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/xds/internal/env"
"google.golang.org/grpc/xds/internal/version"
)
const (
// The "server_features" field in the bootstrap file contains a list of
// features supported by the server. A value of "xds_v3" indicates that the
// server supports the v3 version of the xDS transport protocol.
serverFeaturesV3 = "xds_v3"
// Type name for Google default credentials.
credsGoogleDefault = "google_default"
credsInsecure = "insecure"
gRPCUserAgentName = "gRPC Go"
clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning"
)
var gRPCVersion = fmt.Sprintf("%s %s", gRPCUserAgentName, grpc.Version)
// For overriding in unit tests.
var bootstrapFileReadFunc = ioutil.ReadFile
// Config provides the xDS client with several key bits of information that it
// requires in its interaction with an xDS server. The Config is initialized
// from the bootstrap file.
type Config struct {
// BalancerName is the name of the xDS server to connect to.
//
// The bootstrap file contains a list of servers (with name+creds), but we
// pick the first one.
BalancerName string
// Creds contains the credentials to be used while talking to the xDS
// server, as a grpc.DialOption.
Creds grpc.DialOption
// TransportAPI indicates the API version of xDS transport protocol to use.
// This describes the xDS gRPC endpoint and version of
// DiscoveryRequest/Response used on the wire.
TransportAPI version.TransportAPI
// NodeProto contains the Node proto to be used in xDS requests. The actual
// type depends on the transport protocol version used.
NodeProto proto.Message
// CertProviderConfigs contain parsed configs for supported certificate
// provider plugins found in the bootstrap file.
CertProviderConfigs map[string]CertProviderConfig
}
// CertProviderConfig wraps the certificate provider plugin name and config
// (corresponding to one plugin instance) found in the bootstrap file.
type CertProviderConfig struct {
// Name is the registered name of the certificate provider.
Name string
// Config is the parsed config to be passed to the certificate provider.
Config certprovider.StableConfig
}
type channelCreds struct {
Type string `json:"type"`
Config json.RawMessage `json:"config"`
}
type xdsServer struct {
ServerURI string `json:"server_uri"`
ChannelCreds []channelCreds `json:"channel_creds"`
}
// NewConfig returns a new instance of Config initialized by reading the
// bootstrap file found at ${GRPC_XDS_BOOTSTRAP}.
//
// The format of the bootstrap file will be as follows:
// {
// "xds_server": {
// "server_uri": <string containing URI of xds server>,
// "channel_creds": [
// {
// "type": <string containing channel cred type>,
// "config": <JSON object containing config for the type>
// }
// ],
// "server_features": [ ... ]
// "certificate_providers" : {
// "default": {
// "plugin_name": "default-plugin-name",
// "config": { default plugin config in JSON }
// },
// "foo": {
// "plugin_name": "foo",
// "config": { foo plugin config in JSON }
// }
// }
// },
// "node": <JSON form of Node proto>
// }
//
// Currently, we support exactly one type of credential, which is
// "google_default", where we use the host's default certs for transport
// credentials and a Google oauth token for call credentials.
//
// This function tries to process as much of the bootstrap file as possible (in
// the presence of the errors) and may return a Config object with certain
// fields left unspecified, in which case the caller should use some sane
// defaults.
func NewConfig() (*Config, error) {
config := &Config{}
fName := env.BootstrapFileName
if fName == "" {
return nil, fmt.Errorf("xds: Environment variable %q not defined", "GRPC_XDS_BOOTSTRAP")
}
logger.Infof("Got bootstrap file location %q", fName)
data, err := bootstrapFileReadFunc(fName)
if err != nil {
return nil, fmt.Errorf("xds: Failed to read bootstrap file %s with error %v", fName, err)
}
logger.Debugf("Bootstrap content: %s", data)
var jsonData map[string]json.RawMessage
if err := json.Unmarshal(data, &jsonData); err != nil {
return nil, fmt.Errorf("xds: Failed to parse file %s (content %v) with error: %v", fName, string(data), err)
}
serverSupportsV3 := false
m := jsonpb.Unmarshaler{AllowUnknownFields: true}
for k, v := range jsonData {
switch k {
case "node":
// We unconditionally convert the JSON into a v3.Node proto. The v3
// proto does not contain the deprecated field "build_version" from
// the v2 proto. We do not expect the bootstrap file to contain the
// "build_version" field. In any case, the unmarshal will succeed
// because we have set the `AllowUnknownFields` option on the
// unmarshaler.
n := &v3corepb.Node{}
if err := m.Unmarshal(bytes.NewReader(v), n); err != nil {
return nil, fmt.Errorf("xds: jsonpb.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
config.NodeProto = n
case "xds_servers":
var servers []*xdsServer
if err := json.Unmarshal(v, &servers); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
if len(servers) < 1 {
return nil, fmt.Errorf("xds: bootstrap file parsing failed during bootstrap: file doesn't contain any xds server to connect to")
}
xs := servers[0]
config.BalancerName = xs.ServerURI
for _, cc := range xs.ChannelCreds {
// We stop at the first credential type that we support.
if cc.Type == credsGoogleDefault {
config.Creds = grpc.WithCredentialsBundle(google.NewDefaultCredentials())
break
} else if cc.Type == credsInsecure {
config.Creds = grpc.WithInsecure()
break
}
}
case "server_features":
var features []string
if err := json.Unmarshal(v, &features); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
for _, f := range features {
switch f {
case serverFeaturesV3:
serverSupportsV3 = true
}
}
case "certificate_providers":
var providerInstances map[string]json.RawMessage
if err := json.Unmarshal(v, &providerInstances); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
configs := make(map[string]CertProviderConfig)
getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder)
for instance, data := range providerInstances {
var nameAndConfig struct {
PluginName string `json:"plugin_name"`
Config json.RawMessage `json:"config"`
}
if err := json.Unmarshal(data, &nameAndConfig); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), instance, err)
}
name := nameAndConfig.PluginName
parser := getBuilder(nameAndConfig.PluginName)
if parser == nil {
// We ignore plugins that we do not know about.
continue
}
cfg := nameAndConfig.Config
c, err := parser.ParseConfig(cfg)
if err != nil {
return nil, fmt.Errorf("xds: Config parsing for plugin %q failed: %v", name, err)
}
configs[instance] = CertProviderConfig{
Name: name,
Config: c,
}
}
config.CertProviderConfigs = configs
}
// Do not fail the xDS bootstrap when an unknown field is seen. This can
// happen when an older version client reads a newer version bootstrap
// file with new fields.
}
if config.BalancerName == "" {
return nil, fmt.Errorf("xds: Required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"])
}
if config.Creds == nil {
return nil, fmt.Errorf("xds: Required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"])
}
// We end up using v3 transport protocol version only if the following
// conditions are met:
// 1. Server supports v3, indicated by the presence of "xds_v3" in
// server_features.
// 2. Environment variable "GRPC_XDS_EXPERIMENTAL_V3_SUPPORT" is set to
// true.
// The default value of the enum type "version.TransportAPI" is v2.
if env.V3Support && serverSupportsV3 {
config.TransportAPI = version.TransportV3
}
if err := config.updateNodeProto(); err != nil {
return nil, err
}
logger.Infof("Bootstrap config for creating xds-client: %+v", config)
return config, nil
}
// updateNodeProto updates the node proto read from the bootstrap file.
//
// Node proto in Config contains a v3.Node protobuf message corresponding to the
// JSON contents found in the bootstrap file. This method performs some post
// processing on it:
// 1. If we don't find a nodeProto in the bootstrap file, we create an empty one
// here. That way, callers of this function can always expect that the NodeProto
// field is non-nil.
// 2. If the transport protocol version to be used is not v3, we convert the
// current v3.Node proto in a v2.Node proto.
// 3. Some additional fields which are not expected to be set in the bootstrap
// file are populated here.
func (c *Config) updateNodeProto() error | {
if c.TransportAPI == version.TransportV3 {
v3, _ := c.NodeProto.(*v3corepb.Node)
if v3 == nil {
v3 = &v3corepb.Node{}
}
v3.UserAgentName = gRPCUserAgentName
v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}
v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning)
c.NodeProto = v3
return nil
}
v2 := &v2corepb.Node{}
if c.NodeProto != nil {
v3, err := proto.Marshal(c.NodeProto)
if err != nil {
return fmt.Errorf("xds: proto.Marshal(%v): %v", c.NodeProto, err)
}
if err := proto.Unmarshal(v3, v2); err != nil {
return fmt.Errorf("xds: proto.Unmarshal(%v): %v", v3, err)
}
}
c.NodeProto = v2
// BuildVersion is deprecated, and is replaced by user_agent_name and
// user_agent_version. But the management servers are still using the old
// field, so we will keep both set.
v2.BuildVersion = gRPCVersion
v2.UserAgentName = gRPCUserAgentName
v2.UserAgentVersionType = &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}
v2.ClientFeatures = append(v2.ClientFeatures, clientFeatureNoOverprovisioning)
return nil
} | identifier_body | |
bootstrap.go | /*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package bootstrap provides the functionality to initialize certain aspects
// of an xDS client by reading a bootstrap file.
package bootstrap
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/google"
"google.golang.org/grpc/credentials/tls/certprovider"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/xds/internal/env"
"google.golang.org/grpc/xds/internal/version"
)
const (
// The "server_features" field in the bootstrap file contains a list of
// features supported by the server. A value of "xds_v3" indicates that the
// server supports the v3 version of the xDS transport protocol.
serverFeaturesV3 = "xds_v3"
// Type name for Google default credentials.
credsGoogleDefault = "google_default"
credsInsecure = "insecure"
gRPCUserAgentName = "gRPC Go"
clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning"
)
var gRPCVersion = fmt.Sprintf("%s %s", gRPCUserAgentName, grpc.Version)
// For overriding in unit tests.
var bootstrapFileReadFunc = ioutil.ReadFile
// Config provides the xDS client with several key bits of information that it
// requires in its interaction with an xDS server. The Config is initialized
// from the bootstrap file.
type Config struct {
// BalancerName is the name of the xDS server to connect to.
//
// The bootstrap file contains a list of servers (with name+creds), but we
// pick the first one.
BalancerName string
// Creds contains the credentials to be used while talking to the xDS
// server, as a grpc.DialOption.
Creds grpc.DialOption
// TransportAPI indicates the API version of xDS transport protocol to use.
// This describes the xDS gRPC endpoint and version of
// DiscoveryRequest/Response used on the wire.
TransportAPI version.TransportAPI
// NodeProto contains the Node proto to be used in xDS requests. The actual
// type depends on the transport protocol version used.
NodeProto proto.Message
// CertProviderConfigs contain parsed configs for supported certificate
// provider plugins found in the bootstrap file.
CertProviderConfigs map[string]CertProviderConfig
}
// CertProviderConfig wraps the certificate provider plugin name and config
// (corresponding to one plugin instance) found in the bootstrap file.
type CertProviderConfig struct {
// Name is the registered name of the certificate provider.
Name string
// Config is the parsed config to be passed to the certificate provider.
Config certprovider.StableConfig
}
type channelCreds struct {
Type string `json:"type"`
Config json.RawMessage `json:"config"`
}
type xdsServer struct {
ServerURI string `json:"server_uri"`
ChannelCreds []channelCreds `json:"channel_creds"`
}
// NewConfig returns a new instance of Config initialized by reading the
// bootstrap file found at ${GRPC_XDS_BOOTSTRAP}.
//
// The format of the bootstrap file will be as follows:
// {
// "xds_server": {
// "server_uri": <string containing URI of xds server>,
// "channel_creds": [
// {
// "type": <string containing channel cred type>,
// "config": <JSON object containing config for the type>
// }
// ],
// "server_features": [ ... ]
// "certificate_providers" : {
// "default": {
// "plugin_name": "default-plugin-name",
// "config": { default plugin config in JSON }
// },
// "foo": {
// "plugin_name": "foo",
// "config": { foo plugin config in JSON }
// }
// }
// },
// "node": <JSON form of Node proto>
// }
//
// Currently, we support exactly one type of credential, which is
// "google_default", where we use the host's default certs for transport
// credentials and a Google oauth token for call credentials.
//
// This function tries to process as much of the bootstrap file as possible (in
// the presence of the errors) and may return a Config object with certain
// fields left unspecified, in which case the caller should use some sane
// defaults.
func NewConfig() (*Config, error) {
config := &Config{}
fName := env.BootstrapFileName
if fName == "" {
return nil, fmt.Errorf("xds: Environment variable %q not defined", "GRPC_XDS_BOOTSTRAP")
}
logger.Infof("Got bootstrap file location %q", fName)
data, err := bootstrapFileReadFunc(fName)
if err != nil {
return nil, fmt.Errorf("xds: Failed to read bootstrap file %s with error %v", fName, err)
}
logger.Debugf("Bootstrap content: %s", data)
var jsonData map[string]json.RawMessage
if err := json.Unmarshal(data, &jsonData); err != nil {
return nil, fmt.Errorf("xds: Failed to parse file %s (content %v) with error: %v", fName, string(data), err)
}
serverSupportsV3 := false
m := jsonpb.Unmarshaler{AllowUnknownFields: true}
for k, v := range jsonData {
switch k {
case "node":
// We unconditionally convert the JSON into a v3.Node proto. The v3
// proto does not contain the deprecated field "build_version" from
// the v2 proto. We do not expect the bootstrap file to contain the
// "build_version" field. In any case, the unmarshal will succeed
// because we have set the `AllowUnknownFields` option on the
// unmarshaler.
n := &v3corepb.Node{}
if err := m.Unmarshal(bytes.NewReader(v), n); err != nil |
config.NodeProto = n
case "xds_servers":
var servers []*xdsServer
if err := json.Unmarshal(v, &servers); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
if len(servers) < 1 {
return nil, fmt.Errorf("xds: bootstrap file parsing failed during bootstrap: file doesn't contain any xds server to connect to")
}
xs := servers[0]
config.BalancerName = xs.ServerURI
for _, cc := range xs.ChannelCreds {
// We stop at the first credential type that we support.
if cc.Type == credsGoogleDefault {
config.Creds = grpc.WithCredentialsBundle(google.NewDefaultCredentials())
break
} else if cc.Type == credsInsecure {
config.Creds = grpc.WithInsecure()
break
}
}
case "server_features":
var features []string
if err := json.Unmarshal(v, &features); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
for _, f := range features {
switch f {
case serverFeaturesV3:
serverSupportsV3 = true
}
}
case "certificate_providers":
var providerInstances map[string]json.RawMessage
if err := json.Unmarshal(v, &providerInstances); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
configs := make(map[string]CertProviderConfig)
getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder)
for instance, data := range providerInstances {
var nameAndConfig struct {
PluginName string `json:"plugin_name"`
Config json.RawMessage `json:"config"`
}
if err := json.Unmarshal(data, &nameAndConfig); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), instance, err)
}
name := nameAndConfig.PluginName
parser := getBuilder(nameAndConfig.PluginName)
if parser == nil {
// We ignore plugins that we do not know about.
continue
}
cfg := nameAndConfig.Config
c, err := parser.ParseConfig(cfg)
if err != nil {
return nil, fmt.Errorf("xds: Config parsing for plugin %q failed: %v", name, err)
}
configs[instance] = CertProviderConfig{
Name: name,
Config: c,
}
}
config.CertProviderConfigs = configs
}
// Do not fail the xDS bootstrap when an unknown field is seen. This can
// happen when an older version client reads a newer version bootstrap
// file with new fields.
}
if config.BalancerName == "" {
return nil, fmt.Errorf("xds: Required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"])
}
if config.Creds == nil {
return nil, fmt.Errorf("xds: Required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"])
}
// We end up using v3 transport protocol version only if the following
// conditions are met:
// 1. Server supports v3, indicated by the presence of "xds_v3" in
// server_features.
// 2. Environment variable "GRPC_XDS_EXPERIMENTAL_V3_SUPPORT" is set to
// true.
// The default value of the enum type "version.TransportAPI" is v2.
if env.V3Support && serverSupportsV3 {
config.TransportAPI = version.TransportV3
}
if err := config.updateNodeProto(); err != nil {
return nil, err
}
logger.Infof("Bootstrap config for creating xds-client: %+v", config)
return config, nil
}
// updateNodeProto updates the node proto read from the bootstrap file.
//
// Node proto in Config contains a v3.Node protobuf message corresponding to the
// JSON contents found in the bootstrap file. This method performs some post
// processing on it:
// 1. If we don't find a nodeProto in the bootstrap file, we create an empty one
// here. That way, callers of this function can always expect that the NodeProto
// field is non-nil.
// 2. If the transport protocol version to be used is not v3, we convert the
// current v3.Node proto in a v2.Node proto.
// 3. Some additional fields which are not expected to be set in the bootstrap
// file are populated here.
func (c *Config) updateNodeProto() error {
if c.TransportAPI == version.TransportV3 {
v3, _ := c.NodeProto.(*v3corepb.Node)
if v3 == nil {
v3 = &v3corepb.Node{}
}
v3.UserAgentName = gRPCUserAgentName
v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}
v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning)
c.NodeProto = v3
return nil
}
v2 := &v2corepb.Node{}
if c.NodeProto != nil {
v3, err := proto.Marshal(c.NodeProto)
if err != nil {
return fmt.Errorf("xds: proto.Marshal(%v): %v", c.NodeProto, err)
}
if err := proto.Unmarshal(v3, v2); err != nil {
return fmt.Errorf("xds: proto.Unmarshal(%v): %v", v3, err)
}
}
c.NodeProto = v2
// BuildVersion is deprecated, and is replaced by user_agent_name and
// user_agent_version. But the management servers are still using the old
// field, so we will keep both set.
v2.BuildVersion = gRPCVersion
v2.UserAgentName = gRPCUserAgentName
v2.UserAgentVersionType = &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}
v2.ClientFeatures = append(v2.ClientFeatures, clientFeatureNoOverprovisioning)
return nil
}
| {
return nil, fmt.Errorf("xds: jsonpb.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
} | conditional_block |
bootstrap.go | /*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package bootstrap provides the functionality to initialize certain aspects
// of an xDS client by reading a bootstrap file.
package bootstrap
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/google"
"google.golang.org/grpc/credentials/tls/certprovider"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/xds/internal/env"
"google.golang.org/grpc/xds/internal/version"
)
const (
// The "server_features" field in the bootstrap file contains a list of
// features supported by the server. A value of "xds_v3" indicates that the
// server supports the v3 version of the xDS transport protocol.
serverFeaturesV3 = "xds_v3"
// Type name for Google default credentials.
credsGoogleDefault = "google_default"
credsInsecure = "insecure"
gRPCUserAgentName = "gRPC Go"
clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning"
)
var gRPCVersion = fmt.Sprintf("%s %s", gRPCUserAgentName, grpc.Version)
// For overriding in unit tests.
var bootstrapFileReadFunc = ioutil.ReadFile
// Config provides the xDS client with several key bits of information that it
// requires in its interaction with an xDS server. The Config is initialized
// from the bootstrap file.
type Config struct {
// BalancerName is the name of the xDS server to connect to.
//
// The bootstrap file contains a list of servers (with name+creds), but we
// pick the first one.
BalancerName string
// Creds contains the credentials to be used while talking to the xDS
// server, as a grpc.DialOption.
Creds grpc.DialOption
// TransportAPI indicates the API version of xDS transport protocol to use.
// This describes the xDS gRPC endpoint and version of
// DiscoveryRequest/Response used on the wire.
TransportAPI version.TransportAPI
// NodeProto contains the Node proto to be used in xDS requests. The actual
// type depends on the transport protocol version used.
NodeProto proto.Message
// CertProviderConfigs contain parsed configs for supported certificate
// provider plugins found in the bootstrap file.
CertProviderConfigs map[string]CertProviderConfig
}
// CertProviderConfig wraps the certificate provider plugin name and config
// (corresponding to one plugin instance) found in the bootstrap file.
type CertProviderConfig struct {
// Name is the registered name of the certificate provider.
Name string
// Config is the parsed config to be passed to the certificate provider.
Config certprovider.StableConfig
}
type channelCreds struct {
Type string `json:"type"`
Config json.RawMessage `json:"config"`
}
type xdsServer struct {
ServerURI string `json:"server_uri"`
ChannelCreds []channelCreds `json:"channel_creds"`
}
// NewConfig returns a new instance of Config initialized by reading the
// bootstrap file found at ${GRPC_XDS_BOOTSTRAP}.
//
// The format of the bootstrap file will be as follows:
// {
// "xds_server": {
// "server_uri": <string containing URI of xds server>,
// "channel_creds": [
// {
// "type": <string containing channel cred type>,
// "config": <JSON object containing config for the type>
// }
// ],
// "server_features": [ ... ]
// "certificate_providers" : {
// "default": {
// "plugin_name": "default-plugin-name",
// "config": { default plugin config in JSON }
// },
// "foo": {
// "plugin_name": "foo",
// "config": { foo plugin config in JSON }
// }
// }
// },
// "node": <JSON form of Node proto>
// }
//
// Currently, we support exactly one type of credential, which is
// "google_default", where we use the host's default certs for transport
// credentials and a Google oauth token for call credentials.
//
// This function tries to process as much of the bootstrap file as possible (in
// the presence of the errors) and may return a Config object with certain
// fields left unspecified, in which case the caller should use some sane
// defaults.
func NewConfig() (*Config, error) {
config := &Config{}
fName := env.BootstrapFileName
if fName == "" {
return nil, fmt.Errorf("xds: Environment variable %q not defined", "GRPC_XDS_BOOTSTRAP")
}
logger.Infof("Got bootstrap file location %q", fName)
data, err := bootstrapFileReadFunc(fName)
if err != nil {
return nil, fmt.Errorf("xds: Failed to read bootstrap file %s with error %v", fName, err)
}
logger.Debugf("Bootstrap content: %s", data)
var jsonData map[string]json.RawMessage
if err := json.Unmarshal(data, &jsonData); err != nil {
return nil, fmt.Errorf("xds: Failed to parse file %s (content %v) with error: %v", fName, string(data), err)
}
serverSupportsV3 := false
m := jsonpb.Unmarshaler{AllowUnknownFields: true}
for k, v := range jsonData {
switch k {
case "node":
// We unconditionally convert the JSON into a v3.Node proto. The v3
// proto does not contain the deprecated field "build_version" from
// the v2 proto. We do not expect the bootstrap file to contain the
// "build_version" field. In any case, the unmarshal will succeed
// because we have set the `AllowUnknownFields` option on the
// unmarshaler.
n := &v3corepb.Node{}
if err := m.Unmarshal(bytes.NewReader(v), n); err != nil {
return nil, fmt.Errorf("xds: jsonpb.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
config.NodeProto = n
case "xds_servers":
var servers []*xdsServer
if err := json.Unmarshal(v, &servers); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
if len(servers) < 1 {
return nil, fmt.Errorf("xds: bootstrap file parsing failed during bootstrap: file doesn't contain any xds server to connect to")
}
xs := servers[0]
config.BalancerName = xs.ServerURI
for _, cc := range xs.ChannelCreds {
// We stop at the first credential type that we support.
if cc.Type == credsGoogleDefault {
config.Creds = grpc.WithCredentialsBundle(google.NewDefaultCredentials())
break
} else if cc.Type == credsInsecure {
config.Creds = grpc.WithInsecure()
break
}
}
case "server_features":
var features []string
if err := json.Unmarshal(v, &features); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
for _, f := range features {
switch f {
case serverFeaturesV3:
serverSupportsV3 = true
}
}
case "certificate_providers":
var providerInstances map[string]json.RawMessage
if err := json.Unmarshal(v, &providerInstances); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
configs := make(map[string]CertProviderConfig)
getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder)
for instance, data := range providerInstances {
var nameAndConfig struct {
PluginName string `json:"plugin_name"`
Config json.RawMessage `json:"config"`
}
if err := json.Unmarshal(data, &nameAndConfig); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), instance, err)
}
name := nameAndConfig.PluginName
parser := getBuilder(nameAndConfig.PluginName)
if parser == nil {
// We ignore plugins that we do not know about.
continue
}
cfg := nameAndConfig.Config
c, err := parser.ParseConfig(cfg)
if err != nil {
return nil, fmt.Errorf("xds: Config parsing for plugin %q failed: %v", name, err)
}
configs[instance] = CertProviderConfig{
Name: name,
Config: c,
}
}
config.CertProviderConfigs = configs
}
// Do not fail the xDS bootstrap when an unknown field is seen. This can
// happen when an older version client reads a newer version bootstrap
// file with new fields.
}
if config.BalancerName == "" {
return nil, fmt.Errorf("xds: Required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"])
}
if config.Creds == nil {
return nil, fmt.Errorf("xds: Required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"])
}
// We end up using v3 transport protocol version only if the following
// conditions are met:
// 1. Server supports v3, indicated by the presence of "xds_v3" in
// server_features.
// 2. Environment variable "GRPC_XDS_EXPERIMENTAL_V3_SUPPORT" is set to
// true.
// The default value of the enum type "version.TransportAPI" is v2.
if env.V3Support && serverSupportsV3 {
config.TransportAPI = version.TransportV3
}
if err := config.updateNodeProto(); err != nil {
return nil, err
}
logger.Infof("Bootstrap config for creating xds-client: %+v", config)
return config, nil
}
// updateNodeProto updates the node proto read from the bootstrap file.
//
// Node proto in Config contains a v3.Node protobuf message corresponding to the
// JSON contents found in the bootstrap file. This method performs some post
// processing on it:
// 1. If we don't find a nodeProto in the bootstrap file, we create an empty one
// here. That way, callers of this function can always expect that the NodeProto
// field is non-nil.
// 2. If the transport protocol version to be used is not v3, we convert the
// current v3.Node proto in a v2.Node proto.
// 3. Some additional fields which are not expected to be set in the bootstrap
// file are populated here.
func (c *Config) | () error {
if c.TransportAPI == version.TransportV3 {
v3, _ := c.NodeProto.(*v3corepb.Node)
if v3 == nil {
v3 = &v3corepb.Node{}
}
v3.UserAgentName = gRPCUserAgentName
v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}
v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning)
c.NodeProto = v3
return nil
}
v2 := &v2corepb.Node{}
if c.NodeProto != nil {
v3, err := proto.Marshal(c.NodeProto)
if err != nil {
return fmt.Errorf("xds: proto.Marshal(%v): %v", c.NodeProto, err)
}
if err := proto.Unmarshal(v3, v2); err != nil {
return fmt.Errorf("xds: proto.Unmarshal(%v): %v", v3, err)
}
}
c.NodeProto = v2
// BuildVersion is deprecated, and is replaced by user_agent_name and
// user_agent_version. But the management servers are still using the old
// field, so we will keep both set.
v2.BuildVersion = gRPCVersion
v2.UserAgentName = gRPCUserAgentName
v2.UserAgentVersionType = &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}
v2.ClientFeatures = append(v2.ClientFeatures, clientFeatureNoOverprovisioning)
return nil
}
| updateNodeProto | identifier_name |
bootstrap.go | /*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and | * limitations under the License.
*
*/
// Package bootstrap provides the functionality to initialize certain aspects
// of an xDS client by reading a bootstrap file.
package bootstrap
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/google"
"google.golang.org/grpc/credentials/tls/certprovider"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/xds/internal/env"
"google.golang.org/grpc/xds/internal/version"
)
const (
// The "server_features" field in the bootstrap file contains a list of
// features supported by the server. A value of "xds_v3" indicates that the
// server supports the v3 version of the xDS transport protocol.
serverFeaturesV3 = "xds_v3"
// Type name for Google default credentials.
credsGoogleDefault = "google_default"
credsInsecure = "insecure"
gRPCUserAgentName = "gRPC Go"
clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning"
)
var gRPCVersion = fmt.Sprintf("%s %s", gRPCUserAgentName, grpc.Version)
// For overriding in unit tests.
var bootstrapFileReadFunc = ioutil.ReadFile
// Config provides the xDS client with several key bits of information that it
// requires in its interaction with an xDS server. The Config is initialized
// from the bootstrap file.
type Config struct {
// BalancerName is the name of the xDS server to connect to.
//
// The bootstrap file contains a list of servers (with name+creds), but we
// pick the first one.
BalancerName string
// Creds contains the credentials to be used while talking to the xDS
// server, as a grpc.DialOption.
Creds grpc.DialOption
// TransportAPI indicates the API version of xDS transport protocol to use.
// This describes the xDS gRPC endpoint and version of
// DiscoveryRequest/Response used on the wire.
TransportAPI version.TransportAPI
// NodeProto contains the Node proto to be used in xDS requests. The actual
// type depends on the transport protocol version used.
NodeProto proto.Message
// CertProviderConfigs contain parsed configs for supported certificate
// provider plugins found in the bootstrap file.
CertProviderConfigs map[string]CertProviderConfig
}
// CertProviderConfig wraps the certificate provider plugin name and config
// (corresponding to one plugin instance) found in the bootstrap file.
type CertProviderConfig struct {
// Name is the registered name of the certificate provider.
Name string
// Config is the parsed config to be passed to the certificate provider.
Config certprovider.StableConfig
}
type channelCreds struct {
Type string `json:"type"`
Config json.RawMessage `json:"config"`
}
type xdsServer struct {
ServerURI string `json:"server_uri"`
ChannelCreds []channelCreds `json:"channel_creds"`
}
// NewConfig returns a new instance of Config initialized by reading the
// bootstrap file found at ${GRPC_XDS_BOOTSTRAP}.
//
// The format of the bootstrap file will be as follows:
// {
// "xds_server": {
// "server_uri": <string containing URI of xds server>,
// "channel_creds": [
// {
// "type": <string containing channel cred type>,
// "config": <JSON object containing config for the type>
// }
// ],
// "server_features": [ ... ]
// "certificate_providers" : {
// "default": {
// "plugin_name": "default-plugin-name",
// "config": { default plugin config in JSON }
// },
// "foo": {
// "plugin_name": "foo",
// "config": { foo plugin config in JSON }
// }
// }
// },
// "node": <JSON form of Node proto>
// }
//
// Currently, we support exactly one type of credential, which is
// "google_default", where we use the host's default certs for transport
// credentials and a Google oauth token for call credentials.
//
// This function tries to process as much of the bootstrap file as possible (in
// the presence of the errors) and may return a Config object with certain
// fields left unspecified, in which case the caller should use some sane
// defaults.
func NewConfig() (*Config, error) {
config := &Config{}
fName := env.BootstrapFileName
if fName == "" {
return nil, fmt.Errorf("xds: Environment variable %q not defined", "GRPC_XDS_BOOTSTRAP")
}
logger.Infof("Got bootstrap file location %q", fName)
data, err := bootstrapFileReadFunc(fName)
if err != nil {
return nil, fmt.Errorf("xds: Failed to read bootstrap file %s with error %v", fName, err)
}
logger.Debugf("Bootstrap content: %s", data)
var jsonData map[string]json.RawMessage
if err := json.Unmarshal(data, &jsonData); err != nil {
return nil, fmt.Errorf("xds: Failed to parse file %s (content %v) with error: %v", fName, string(data), err)
}
serverSupportsV3 := false
m := jsonpb.Unmarshaler{AllowUnknownFields: true}
for k, v := range jsonData {
switch k {
case "node":
// We unconditionally convert the JSON into a v3.Node proto. The v3
// proto does not contain the deprecated field "build_version" from
// the v2 proto. We do not expect the bootstrap file to contain the
// "build_version" field. In any case, the unmarshal will succeed
// because we have set the `AllowUnknownFields` option on the
// unmarshaler.
n := &v3corepb.Node{}
if err := m.Unmarshal(bytes.NewReader(v), n); err != nil {
return nil, fmt.Errorf("xds: jsonpb.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
config.NodeProto = n
case "xds_servers":
var servers []*xdsServer
if err := json.Unmarshal(v, &servers); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
if len(servers) < 1 {
return nil, fmt.Errorf("xds: bootstrap file parsing failed during bootstrap: file doesn't contain any xds server to connect to")
}
xs := servers[0]
config.BalancerName = xs.ServerURI
for _, cc := range xs.ChannelCreds {
// We stop at the first credential type that we support.
if cc.Type == credsGoogleDefault {
config.Creds = grpc.WithCredentialsBundle(google.NewDefaultCredentials())
break
} else if cc.Type == credsInsecure {
config.Creds = grpc.WithInsecure()
break
}
}
case "server_features":
var features []string
if err := json.Unmarshal(v, &features); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
for _, f := range features {
switch f {
case serverFeaturesV3:
serverSupportsV3 = true
}
}
case "certificate_providers":
var providerInstances map[string]json.RawMessage
if err := json.Unmarshal(v, &providerInstances); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
}
configs := make(map[string]CertProviderConfig)
getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder)
for instance, data := range providerInstances {
var nameAndConfig struct {
PluginName string `json:"plugin_name"`
Config json.RawMessage `json:"config"`
}
if err := json.Unmarshal(data, &nameAndConfig); err != nil {
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), instance, err)
}
name := nameAndConfig.PluginName
parser := getBuilder(nameAndConfig.PluginName)
if parser == nil {
// We ignore plugins that we do not know about.
continue
}
cfg := nameAndConfig.Config
c, err := parser.ParseConfig(cfg)
if err != nil {
return nil, fmt.Errorf("xds: Config parsing for plugin %q failed: %v", name, err)
}
configs[instance] = CertProviderConfig{
Name: name,
Config: c,
}
}
config.CertProviderConfigs = configs
}
// Do not fail the xDS bootstrap when an unknown field is seen. This can
// happen when an older version client reads a newer version bootstrap
// file with new fields.
}
if config.BalancerName == "" {
return nil, fmt.Errorf("xds: Required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"])
}
if config.Creds == nil {
return nil, fmt.Errorf("xds: Required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"])
}
// We end up using v3 transport protocol version only if the following
// conditions are met:
// 1. Server supports v3, indicated by the presence of "xds_v3" in
// server_features.
// 2. Environment variable "GRPC_XDS_EXPERIMENTAL_V3_SUPPORT" is set to
// true.
// The default value of the enum type "version.TransportAPI" is v2.
if env.V3Support && serverSupportsV3 {
config.TransportAPI = version.TransportV3
}
if err := config.updateNodeProto(); err != nil {
return nil, err
}
logger.Infof("Bootstrap config for creating xds-client: %+v", config)
return config, nil
}
// updateNodeProto updates the node proto read from the bootstrap file.
//
// Node proto in Config contains a v3.Node protobuf message corresponding to the
// JSON contents found in the bootstrap file. This method performs some post
// processing on it:
// 1. If we don't find a nodeProto in the bootstrap file, we create an empty one
// here. That way, callers of this function can always expect that the NodeProto
// field is non-nil.
// 2. If the transport protocol version to be used is not v3, we convert the
// current v3.Node proto in a v2.Node proto.
// 3. Some additional fields which are not expected to be set in the bootstrap
// file are populated here.
func (c *Config) updateNodeProto() error {
if c.TransportAPI == version.TransportV3 {
v3, _ := c.NodeProto.(*v3corepb.Node)
if v3 == nil {
v3 = &v3corepb.Node{}
}
v3.UserAgentName = gRPCUserAgentName
v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}
v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning)
c.NodeProto = v3
return nil
}
v2 := &v2corepb.Node{}
if c.NodeProto != nil {
v3, err := proto.Marshal(c.NodeProto)
if err != nil {
return fmt.Errorf("xds: proto.Marshal(%v): %v", c.NodeProto, err)
}
if err := proto.Unmarshal(v3, v2); err != nil {
return fmt.Errorf("xds: proto.Unmarshal(%v): %v", v3, err)
}
}
c.NodeProto = v2
// BuildVersion is deprecated, and is replaced by user_agent_name and
// user_agent_version. But the management servers are still using the old
// field, so we will keep both set.
v2.BuildVersion = gRPCVersion
v2.UserAgentName = gRPCUserAgentName
v2.UserAgentVersionType = &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}
v2.ClientFeatures = append(v2.ClientFeatures, clientFeatureNoOverprovisioning)
return nil
} | random_line_split | |
convert2coco.py | import argparse
import sys
import os
import os.path as osp
import sqlite3
import json
import re |
def get_img_size(image_filename):
im = Image.open(image_filename)
return im.size[0], im.size[1]
def exec_sqlite_query(cursor, select_str, from_str=None, where_str=None):
query_str = 'SELECT {}'.format(select_str)
query_str += ' FROM {}'.format(from_str)
if where_str:
query_str += ' WHERE {}'.format(where_str)
return [row for row in cursor.execute(query_str)]
def progress_updt(msg, total, progress):
bar_length, status = 20, ""
progress = float(progress) / float(total)
if progress >= 1.:
progress, status = 1, "\r\n"
block = int(round(bar_length * progress))
text = "\r{}[{}] {:.0f}% {}".format(msg, "#" * block + "-" * (bar_length - block), round(progress * 100, 0), status)
sys.stdout.write(text)
sys.stdout.flush()
def main():
# Set up a parser for command line arguments
parser = argparse.ArgumentParser("Convert AFLW dataset's annotation into COCO json format")
parser.add_argument('-v', '--verbose', action="store_true", help="increase output verbosity")
parser.add_argument('--dataset_root', type=str, required=True, help='AFLW root directory')
parser.add_argument('--json', type=str, default='aflw_annotations.json', help="output COCO json annotation file")
args = parser.parse_args()
# Get absolute path of dataset root dir
args.dataset_root = osp.abspath(args.dataset_root)
if args.verbose:
print("#. Transform AFLW annotations into COCO json format...")
# Open the original AFLW annotation (sqlite database)
if args.verbose:
print(" \\__Open the AFLW SQLight database...", end="")
sys.stdout.flush()
conn = sqlite3.connect(osp.join(args.dataset_root, 'aflw.sqlite'))
cursor = conn.cursor()
if args.verbose:
print("Done!")
# Build sqlite queries
select_str = "faces.face_id, " \
"imgs.filepath, " \
"rect.x, rect.y, " \
"rect.w, " \
"rect.h, " \
"pose.roll, " \
"pose.pitch, " \
"pose.yaw, " \
"metadata.sex"
from_str = "faces, " \
"faceimages " \
"imgs, " \
"facerect rect, " \
"facepose pose, " \
"facemetadata metadata"
where_str = "faces.file_id = imgs.file_id and " \
"faces.face_id = rect.face_id and " \
"faces.face_id = pose.face_id and " \
"faces.face_id = metadata.face_id"
query_res = exec_sqlite_query(cursor, select_str, from_str, where_str)
# Count total number of images in AFLW dataset
if args.verbose:
print(" \\__Count total number of images in AFLW database: ", end="")
sys.stdout.flush()
total_num_images = 0
for _ in query_res:
total_num_images += 1
if args.verbose:
print(total_num_images)
# Output file for appending the file paths of not found images
not_found_images_file = 'not_found_images_aflw.txt'
try:
os.remove(not_found_images_file)
except OSError:
pass
# Temporary dataset variables
aflw_dataset_dict = dict()
# Register to dataset_dict
img_cnt = 0
for face_id, path, rectx, recty, rectw, recth, roll, pitch, yaw, gender in query_res:
img_cnt += 1
# Get current image path
img_path = osp.join(args.dataset_root, 'flickr', path)
# Process current image
if osp.isfile(img_path):
img_w, img_h = get_img_size(img_path)
keypoints = N_LANDMARK * 3 * [0]
pose = [roll, pitch, yaw]
gender = 0 if gender == 'm' else 1
# Register
aflw_dataset_dict[face_id] = {
'face_id': face_id,
'img_path': osp.join('flickr', path),
'width': img_w,
'height': img_h,
'bbox': (rectx, recty, rectw, recth),
'keypoints': keypoints,
'pose': pose,
'gender': gender}
# If current image file does not exist append the not found images filepaths to `not_found_images_file` and
# continue with the next image file.
else:
with open(not_found_images_file, "a") as out:
out.write("%s\n" % img_path)
continue
# Show progress bar
if args.verbose:
progress_updt(" \\__Populate AFLW dataset dictionary...", total_num_images, img_cnt)
if args.verbose:
print(" \\__Update AFLW dataset dictionary with keypoints...", end="")
sys.stdout.flush()
# Landmark property
# (Visibility is expressed by lack of the coordinate's row.)
select_str = "faces.face_id, coords.feature_id, coords.x, coords.y"
from_str = "faces, featurecoords coords"
where_str = "faces.face_id = coords.face_id"
query_res = exec_sqlite_query(cursor, select_str, from_str, where_str)
# Register to dataset_dict
invalid_face_ids = list()
for face_id, feature_id, x, y in query_res:
assert (1 <= feature_id <= N_LANDMARK)
if face_id in aflw_dataset_dict:
idx = feature_id - 1
aflw_dataset_dict[face_id]['keypoints'][3 * idx] = x
aflw_dataset_dict[face_id]['keypoints'][3 * idx + 1] = y
aflw_dataset_dict[face_id]['keypoints'][3 * idx + 2] = 1
elif face_id not in invalid_face_ids:
invalid_face_ids.append(face_id)
if args.verbose:
print("Done!")
# Close database
if args.verbose:
print(" \\__Close the AFLW SQLight database...", end="")
sys.stdout.flush()
cursor.close()
if args.verbose:
print("Done!")
# Close database
if args.verbose:
print(" \\__Convert to COCO format...", end="")
sys.stdout.flush()
images_list = []
annotations_list = []
for face_id, face_ann in aflw_dataset_dict.items():
img_dir_num = int(face_ann['img_path'].split("/")[1])
img_file_num = int(re.findall(r'\d+', face_ann['img_path'].split("/")[-1].split(".")[0])[0])
image_id = int("%d%05d" % (img_dir_num, img_file_num))
images_list.append({'id': image_id,
'file_name': face_ann['img_path'],
'height': face_ann['height'],
'width': face_ann['width'],
'date_captured': '',
'flickr_url': '',
'license': 1,
'dataset': 'aflw'})
annotations_list.append({'id': face_id,
'image_id': image_id,
'segmentation': [],
'num_keypoints': len(face_ann['keypoints']),
'area': 0,
'iscrowd': 0,
'keypoints': face_ann['keypoints'],
'bbox': face_ann['bbox'],
'category_id': 0})
# Build COCO-like dictionary
dataset_dict = dict()
# =============================== Dataset Info =============================== #
dataset_info = {
'description': 'Annotated Facial Landmarks in the Wild (AFLW)',
'url': 'https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/',
'version': '1.0',
'year': 2011,
'contributor': '',
'date_created': '2011'
}
dataset_dict.update(dataset_info)
# ============================= Dataset Licenses ============================= #
dataset_licenses = {
'licenses': [
{'id': 0,
'url': 'https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/',
'name': 'aflw_license'}
]
}
dataset_dict.update(dataset_licenses)
# ============================== Dataset Images ============================== #
dataset_images = {'images': images_list}
dataset_dict.update(dataset_images)
# =========================== Dataset Annotations ============================ #
dataset_annotations = {'annotations': annotations_list}
dataset_dict.update(dataset_annotations)
# ============================ Dataset Categories ============================ #
dataset_categories = {
'categories':
[
{'supercategory': 'face',
'name': 'face',
'skeleton': [],
'keypoints': ['LeftBrowLeftCorner',
'LeftBrowCenter',
'LeftBrowRightCorner',
'RightBrowLeftCorner',
'RightBrowCenter',
'RightBrowRightCorner',
'LeftEyeLeftCorner',
'LeftEyeCenter',
'LeftEyeRightCorner',
'RightEyeLeftCorner',
'RightEyeCenter',
'RightEyeRightCorner',
'LeftEar',
'NoseLeft',
'NoseCenter',
'NoseRight',
'RightEar',
'MouthLeftCorner',
'MouthCenter',
'MouthRightCorner',
'ChinCenter'],
'id': 0}
]
}
dataset_dict.update(dataset_categories)
if args.verbose:
print("Done!")
# Save dataset dictionary as json file
if args.verbose:
print(" \\__Save dataset dictionary as json file...", end="")
sys.stdout.flush()
with open(args.json, 'w') as fp:
json.dump(dataset_dict, fp)
if args.verbose:
print("Done!")
if __name__ == "__main__":
main() | from PIL import Image
# Number of facial landmarks provided by AFLW dataset
N_LANDMARK = 21
| random_line_split |
convert2coco.py | import argparse
import sys
import os
import os.path as osp
import sqlite3
import json
import re
from PIL import Image
# Number of facial landmarks provided by AFLW dataset
N_LANDMARK = 21
def get_img_size(image_filename):
im = Image.open(image_filename)
return im.size[0], im.size[1]
def exec_sqlite_query(cursor, select_str, from_str=None, where_str=None):
query_str = 'SELECT {}'.format(select_str)
query_str += ' FROM {}'.format(from_str)
if where_str:
query_str += ' WHERE {}'.format(where_str)
return [row for row in cursor.execute(query_str)]
def progress_updt(msg, total, progress):
bar_length, status = 20, ""
progress = float(progress) / float(total)
if progress >= 1.:
progress, status = 1, "\r\n"
block = int(round(bar_length * progress))
text = "\r{}[{}] {:.0f}% {}".format(msg, "#" * block + "-" * (bar_length - block), round(progress * 100, 0), status)
sys.stdout.write(text)
sys.stdout.flush()
def main():
# Set up a parser for command line arguments
parser = argparse.ArgumentParser("Convert AFLW dataset's annotation into COCO json format")
parser.add_argument('-v', '--verbose', action="store_true", help="increase output verbosity")
parser.add_argument('--dataset_root', type=str, required=True, help='AFLW root directory')
parser.add_argument('--json', type=str, default='aflw_annotations.json', help="output COCO json annotation file")
args = parser.parse_args()
# Get absolute path of dataset root dir
args.dataset_root = osp.abspath(args.dataset_root)
if args.verbose:
print("#. Transform AFLW annotations into COCO json format...")
# Open the original AFLW annotation (sqlite database)
if args.verbose:
print(" \\__Open the AFLW SQLight database...", end="")
sys.stdout.flush()
conn = sqlite3.connect(osp.join(args.dataset_root, 'aflw.sqlite'))
cursor = conn.cursor()
if args.verbose:
print("Done!")
# Build sqlite queries
select_str = "faces.face_id, " \
"imgs.filepath, " \
"rect.x, rect.y, " \
"rect.w, " \
"rect.h, " \
"pose.roll, " \
"pose.pitch, " \
"pose.yaw, " \
"metadata.sex"
from_str = "faces, " \
"faceimages " \
"imgs, " \
"facerect rect, " \
"facepose pose, " \
"facemetadata metadata"
where_str = "faces.file_id = imgs.file_id and " \
"faces.face_id = rect.face_id and " \
"faces.face_id = pose.face_id and " \
"faces.face_id = metadata.face_id"
query_res = exec_sqlite_query(cursor, select_str, from_str, where_str)
# Count total number of images in AFLW dataset
if args.verbose:
print(" \\__Count total number of images in AFLW database: ", end="")
sys.stdout.flush()
total_num_images = 0
for _ in query_res:
total_num_images += 1
if args.verbose:
print(total_num_images)
# Output file for appending the file paths of not found images
not_found_images_file = 'not_found_images_aflw.txt'
try:
os.remove(not_found_images_file)
except OSError:
pass
# Temporary dataset variables
aflw_dataset_dict = dict()
# Register to dataset_dict
img_cnt = 0
for face_id, path, rectx, recty, rectw, recth, roll, pitch, yaw, gender in query_res:
img_cnt += 1
# Get current image path
img_path = osp.join(args.dataset_root, 'flickr', path)
# Process current image
if osp.isfile(img_path):
img_w, img_h = get_img_size(img_path)
keypoints = N_LANDMARK * 3 * [0]
pose = [roll, pitch, yaw]
gender = 0 if gender == 'm' else 1
# Register
aflw_dataset_dict[face_id] = {
'face_id': face_id,
'img_path': osp.join('flickr', path),
'width': img_w,
'height': img_h,
'bbox': (rectx, recty, rectw, recth),
'keypoints': keypoints,
'pose': pose,
'gender': gender}
# If current image file does not exist append the not found images filepaths to `not_found_images_file` and
# continue with the next image file.
else:
with open(not_found_images_file, "a") as out:
out.write("%s\n" % img_path)
continue
# Show progress bar
if args.verbose:
progress_updt(" \\__Populate AFLW dataset dictionary...", total_num_images, img_cnt)
if args.verbose:
print(" \\__Update AFLW dataset dictionary with keypoints...", end="")
sys.stdout.flush()
# Landmark property
# (Visibility is expressed by lack of the coordinate's row.)
select_str = "faces.face_id, coords.feature_id, coords.x, coords.y"
from_str = "faces, featurecoords coords"
where_str = "faces.face_id = coords.face_id"
query_res = exec_sqlite_query(cursor, select_str, from_str, where_str)
# Register to dataset_dict
invalid_face_ids = list()
for face_id, feature_id, x, y in query_res:
assert (1 <= feature_id <= N_LANDMARK)
if face_id in aflw_dataset_dict:
idx = feature_id - 1
aflw_dataset_dict[face_id]['keypoints'][3 * idx] = x
aflw_dataset_dict[face_id]['keypoints'][3 * idx + 1] = y
aflw_dataset_dict[face_id]['keypoints'][3 * idx + 2] = 1
elif face_id not in invalid_face_ids:
|
if args.verbose:
print("Done!")
# Close database
if args.verbose:
print(" \\__Close the AFLW SQLight database...", end="")
sys.stdout.flush()
cursor.close()
if args.verbose:
print("Done!")
# Close database
if args.verbose:
print(" \\__Convert to COCO format...", end="")
sys.stdout.flush()
images_list = []
annotations_list = []
for face_id, face_ann in aflw_dataset_dict.items():
img_dir_num = int(face_ann['img_path'].split("/")[1])
img_file_num = int(re.findall(r'\d+', face_ann['img_path'].split("/")[-1].split(".")[0])[0])
image_id = int("%d%05d" % (img_dir_num, img_file_num))
images_list.append({'id': image_id,
'file_name': face_ann['img_path'],
'height': face_ann['height'],
'width': face_ann['width'],
'date_captured': '',
'flickr_url': '',
'license': 1,
'dataset': 'aflw'})
annotations_list.append({'id': face_id,
'image_id': image_id,
'segmentation': [],
'num_keypoints': len(face_ann['keypoints']),
'area': 0,
'iscrowd': 0,
'keypoints': face_ann['keypoints'],
'bbox': face_ann['bbox'],
'category_id': 0})
# Build COCO-like dictionary
dataset_dict = dict()
# =============================== Dataset Info =============================== #
dataset_info = {
'description': 'Annotated Facial Landmarks in the Wild (AFLW)',
'url': 'https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/',
'version': '1.0',
'year': 2011,
'contributor': '',
'date_created': '2011'
}
dataset_dict.update(dataset_info)
# ============================= Dataset Licenses ============================= #
dataset_licenses = {
'licenses': [
{'id': 0,
'url': 'https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/',
'name': 'aflw_license'}
]
}
dataset_dict.update(dataset_licenses)
# ============================== Dataset Images ============================== #
dataset_images = {'images': images_list}
dataset_dict.update(dataset_images)
# =========================== Dataset Annotations ============================ #
dataset_annotations = {'annotations': annotations_list}
dataset_dict.update(dataset_annotations)
# ============================ Dataset Categories ============================ #
dataset_categories = {
'categories':
[
{'supercategory': 'face',
'name': 'face',
'skeleton': [],
'keypoints': ['LeftBrowLeftCorner',
'LeftBrowCenter',
'LeftBrowRightCorner',
'RightBrowLeftCorner',
'RightBrowCenter',
'RightBrowRightCorner',
'LeftEyeLeftCorner',
'LeftEyeCenter',
'LeftEyeRightCorner',
'RightEyeLeftCorner',
'RightEyeCenter',
'RightEyeRightCorner',
'LeftEar',
'NoseLeft',
'NoseCenter',
'NoseRight',
'RightEar',
'MouthLeftCorner',
'MouthCenter',
'MouthRightCorner',
'ChinCenter'],
'id': 0}
]
}
dataset_dict.update(dataset_categories)
if args.verbose:
print("Done!")
# Save dataset dictionary as json file
if args.verbose:
print(" \\__Save dataset dictionary as json file...", end="")
sys.stdout.flush()
with open(args.json, 'w') as fp:
json.dump(dataset_dict, fp)
if args.verbose:
print("Done!")
if __name__ == "__main__":
main()
| invalid_face_ids.append(face_id) | conditional_block |
convert2coco.py | import argparse
import sys
import os
import os.path as osp
import sqlite3
import json
import re
from PIL import Image
# Number of facial landmarks provided by AFLW dataset
N_LANDMARK = 21
def get_img_size(image_filename):
im = Image.open(image_filename)
return im.size[0], im.size[1]
def exec_sqlite_query(cursor, select_str, from_str=None, where_str=None):
query_str = 'SELECT {}'.format(select_str)
query_str += ' FROM {}'.format(from_str)
if where_str:
query_str += ' WHERE {}'.format(where_str)
return [row for row in cursor.execute(query_str)]
def progress_updt(msg, total, progress):
|
def main():
# Set up a parser for command line arguments
parser = argparse.ArgumentParser("Convert AFLW dataset's annotation into COCO json format")
parser.add_argument('-v', '--verbose', action="store_true", help="increase output verbosity")
parser.add_argument('--dataset_root', type=str, required=True, help='AFLW root directory')
parser.add_argument('--json', type=str, default='aflw_annotations.json', help="output COCO json annotation file")
args = parser.parse_args()
# Get absolute path of dataset root dir
args.dataset_root = osp.abspath(args.dataset_root)
if args.verbose:
print("#. Transform AFLW annotations into COCO json format...")
# Open the original AFLW annotation (sqlite database)
if args.verbose:
print(" \\__Open the AFLW SQLight database...", end="")
sys.stdout.flush()
conn = sqlite3.connect(osp.join(args.dataset_root, 'aflw.sqlite'))
cursor = conn.cursor()
if args.verbose:
print("Done!")
# Build sqlite queries
select_str = "faces.face_id, " \
"imgs.filepath, " \
"rect.x, rect.y, " \
"rect.w, " \
"rect.h, " \
"pose.roll, " \
"pose.pitch, " \
"pose.yaw, " \
"metadata.sex"
from_str = "faces, " \
"faceimages " \
"imgs, " \
"facerect rect, " \
"facepose pose, " \
"facemetadata metadata"
where_str = "faces.file_id = imgs.file_id and " \
"faces.face_id = rect.face_id and " \
"faces.face_id = pose.face_id and " \
"faces.face_id = metadata.face_id"
query_res = exec_sqlite_query(cursor, select_str, from_str, where_str)
# Count total number of images in AFLW dataset
if args.verbose:
print(" \\__Count total number of images in AFLW database: ", end="")
sys.stdout.flush()
total_num_images = 0
for _ in query_res:
total_num_images += 1
if args.verbose:
print(total_num_images)
# Output file for appending the file paths of not found images
not_found_images_file = 'not_found_images_aflw.txt'
try:
os.remove(not_found_images_file)
except OSError:
pass
# Temporary dataset variables
aflw_dataset_dict = dict()
# Register to dataset_dict
img_cnt = 0
for face_id, path, rectx, recty, rectw, recth, roll, pitch, yaw, gender in query_res:
img_cnt += 1
# Get current image path
img_path = osp.join(args.dataset_root, 'flickr', path)
# Process current image
if osp.isfile(img_path):
img_w, img_h = get_img_size(img_path)
keypoints = N_LANDMARK * 3 * [0]
pose = [roll, pitch, yaw]
gender = 0 if gender == 'm' else 1
# Register
aflw_dataset_dict[face_id] = {
'face_id': face_id,
'img_path': osp.join('flickr', path),
'width': img_w,
'height': img_h,
'bbox': (rectx, recty, rectw, recth),
'keypoints': keypoints,
'pose': pose,
'gender': gender}
# If current image file does not exist append the not found images filepaths to `not_found_images_file` and
# continue with the next image file.
else:
with open(not_found_images_file, "a") as out:
out.write("%s\n" % img_path)
continue
# Show progress bar
if args.verbose:
progress_updt(" \\__Populate AFLW dataset dictionary...", total_num_images, img_cnt)
if args.verbose:
print(" \\__Update AFLW dataset dictionary with keypoints...", end="")
sys.stdout.flush()
# Landmark property
# (Visibility is expressed by lack of the coordinate's row.)
select_str = "faces.face_id, coords.feature_id, coords.x, coords.y"
from_str = "faces, featurecoords coords"
where_str = "faces.face_id = coords.face_id"
query_res = exec_sqlite_query(cursor, select_str, from_str, where_str)
# Register to dataset_dict
invalid_face_ids = list()
for face_id, feature_id, x, y in query_res:
assert (1 <= feature_id <= N_LANDMARK)
if face_id in aflw_dataset_dict:
idx = feature_id - 1
aflw_dataset_dict[face_id]['keypoints'][3 * idx] = x
aflw_dataset_dict[face_id]['keypoints'][3 * idx + 1] = y
aflw_dataset_dict[face_id]['keypoints'][3 * idx + 2] = 1
elif face_id not in invalid_face_ids:
invalid_face_ids.append(face_id)
if args.verbose:
print("Done!")
# Close database
if args.verbose:
print(" \\__Close the AFLW SQLight database...", end="")
sys.stdout.flush()
cursor.close()
if args.verbose:
print("Done!")
# Close database
if args.verbose:
print(" \\__Convert to COCO format...", end="")
sys.stdout.flush()
images_list = []
annotations_list = []
for face_id, face_ann in aflw_dataset_dict.items():
img_dir_num = int(face_ann['img_path'].split("/")[1])
img_file_num = int(re.findall(r'\d+', face_ann['img_path'].split("/")[-1].split(".")[0])[0])
image_id = int("%d%05d" % (img_dir_num, img_file_num))
images_list.append({'id': image_id,
'file_name': face_ann['img_path'],
'height': face_ann['height'],
'width': face_ann['width'],
'date_captured': '',
'flickr_url': '',
'license': 1,
'dataset': 'aflw'})
annotations_list.append({'id': face_id,
'image_id': image_id,
'segmentation': [],
'num_keypoints': len(face_ann['keypoints']),
'area': 0,
'iscrowd': 0,
'keypoints': face_ann['keypoints'],
'bbox': face_ann['bbox'],
'category_id': 0})
# Build COCO-like dictionary
dataset_dict = dict()
# =============================== Dataset Info =============================== #
dataset_info = {
'description': 'Annotated Facial Landmarks in the Wild (AFLW)',
'url': 'https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/',
'version': '1.0',
'year': 2011,
'contributor': '',
'date_created': '2011'
}
dataset_dict.update(dataset_info)
# ============================= Dataset Licenses ============================= #
dataset_licenses = {
'licenses': [
{'id': 0,
'url': 'https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/',
'name': 'aflw_license'}
]
}
dataset_dict.update(dataset_licenses)
# ============================== Dataset Images ============================== #
dataset_images = {'images': images_list}
dataset_dict.update(dataset_images)
# =========================== Dataset Annotations ============================ #
dataset_annotations = {'annotations': annotations_list}
dataset_dict.update(dataset_annotations)
# ============================ Dataset Categories ============================ #
dataset_categories = {
'categories':
[
{'supercategory': 'face',
'name': 'face',
'skeleton': [],
'keypoints': ['LeftBrowLeftCorner',
'LeftBrowCenter',
'LeftBrowRightCorner',
'RightBrowLeftCorner',
'RightBrowCenter',
'RightBrowRightCorner',
'LeftEyeLeftCorner',
'LeftEyeCenter',
'LeftEyeRightCorner',
'RightEyeLeftCorner',
'RightEyeCenter',
'RightEyeRightCorner',
'LeftEar',
'NoseLeft',
'NoseCenter',
'NoseRight',
'RightEar',
'MouthLeftCorner',
'MouthCenter',
'MouthRightCorner',
'ChinCenter'],
'id': 0}
]
}
dataset_dict.update(dataset_categories)
if args.verbose:
print("Done!")
# Save dataset dictionary as json file
if args.verbose:
print(" \\__Save dataset dictionary as json file...", end="")
sys.stdout.flush()
with open(args.json, 'w') as fp:
json.dump(dataset_dict, fp)
if args.verbose:
print("Done!")
if __name__ == "__main__":
main()
| bar_length, status = 20, ""
progress = float(progress) / float(total)
if progress >= 1.:
progress, status = 1, "\r\n"
block = int(round(bar_length * progress))
text = "\r{}[{}] {:.0f}% {}".format(msg, "#" * block + "-" * (bar_length - block), round(progress * 100, 0), status)
sys.stdout.write(text)
sys.stdout.flush() | identifier_body |
convert2coco.py | import argparse
import sys
import os
import os.path as osp
import sqlite3
import json
import re
from PIL import Image
# Number of facial landmarks provided by AFLW dataset
N_LANDMARK = 21
def get_img_size(image_filename):
im = Image.open(image_filename)
return im.size[0], im.size[1]
def exec_sqlite_query(cursor, select_str, from_str=None, where_str=None):
query_str = 'SELECT {}'.format(select_str)
query_str += ' FROM {}'.format(from_str)
if where_str:
query_str += ' WHERE {}'.format(where_str)
return [row for row in cursor.execute(query_str)]
def | (msg, total, progress):
bar_length, status = 20, ""
progress = float(progress) / float(total)
if progress >= 1.:
progress, status = 1, "\r\n"
block = int(round(bar_length * progress))
text = "\r{}[{}] {:.0f}% {}".format(msg, "#" * block + "-" * (bar_length - block), round(progress * 100, 0), status)
sys.stdout.write(text)
sys.stdout.flush()
def main():
# Set up a parser for command line arguments
parser = argparse.ArgumentParser("Convert AFLW dataset's annotation into COCO json format")
parser.add_argument('-v', '--verbose', action="store_true", help="increase output verbosity")
parser.add_argument('--dataset_root', type=str, required=True, help='AFLW root directory')
parser.add_argument('--json', type=str, default='aflw_annotations.json', help="output COCO json annotation file")
args = parser.parse_args()
# Get absolute path of dataset root dir
args.dataset_root = osp.abspath(args.dataset_root)
if args.verbose:
print("#. Transform AFLW annotations into COCO json format...")
# Open the original AFLW annotation (sqlite database)
if args.verbose:
print(" \\__Open the AFLW SQLight database...", end="")
sys.stdout.flush()
conn = sqlite3.connect(osp.join(args.dataset_root, 'aflw.sqlite'))
cursor = conn.cursor()
if args.verbose:
print("Done!")
# Build sqlite queries
select_str = "faces.face_id, " \
"imgs.filepath, " \
"rect.x, rect.y, " \
"rect.w, " \
"rect.h, " \
"pose.roll, " \
"pose.pitch, " \
"pose.yaw, " \
"metadata.sex"
from_str = "faces, " \
"faceimages " \
"imgs, " \
"facerect rect, " \
"facepose pose, " \
"facemetadata metadata"
where_str = "faces.file_id = imgs.file_id and " \
"faces.face_id = rect.face_id and " \
"faces.face_id = pose.face_id and " \
"faces.face_id = metadata.face_id"
query_res = exec_sqlite_query(cursor, select_str, from_str, where_str)
# Count total number of images in AFLW dataset
if args.verbose:
print(" \\__Count total number of images in AFLW database: ", end="")
sys.stdout.flush()
total_num_images = 0
for _ in query_res:
total_num_images += 1
if args.verbose:
print(total_num_images)
# Output file for appending the file paths of not found images
not_found_images_file = 'not_found_images_aflw.txt'
try:
os.remove(not_found_images_file)
except OSError:
pass
# Temporary dataset variables
aflw_dataset_dict = dict()
# Register to dataset_dict
img_cnt = 0
for face_id, path, rectx, recty, rectw, recth, roll, pitch, yaw, gender in query_res:
img_cnt += 1
# Get current image path
img_path = osp.join(args.dataset_root, 'flickr', path)
# Process current image
if osp.isfile(img_path):
img_w, img_h = get_img_size(img_path)
keypoints = N_LANDMARK * 3 * [0]
pose = [roll, pitch, yaw]
gender = 0 if gender == 'm' else 1
# Register
aflw_dataset_dict[face_id] = {
'face_id': face_id,
'img_path': osp.join('flickr', path),
'width': img_w,
'height': img_h,
'bbox': (rectx, recty, rectw, recth),
'keypoints': keypoints,
'pose': pose,
'gender': gender}
# If current image file does not exist append the not found images filepaths to `not_found_images_file` and
# continue with the next image file.
else:
with open(not_found_images_file, "a") as out:
out.write("%s\n" % img_path)
continue
# Show progress bar
if args.verbose:
progress_updt(" \\__Populate AFLW dataset dictionary...", total_num_images, img_cnt)
if args.verbose:
print(" \\__Update AFLW dataset dictionary with keypoints...", end="")
sys.stdout.flush()
# Landmark property
# (Visibility is expressed by lack of the coordinate's row.)
select_str = "faces.face_id, coords.feature_id, coords.x, coords.y"
from_str = "faces, featurecoords coords"
where_str = "faces.face_id = coords.face_id"
query_res = exec_sqlite_query(cursor, select_str, from_str, where_str)
# Register to dataset_dict
invalid_face_ids = list()
for face_id, feature_id, x, y in query_res:
assert (1 <= feature_id <= N_LANDMARK)
if face_id in aflw_dataset_dict:
idx = feature_id - 1
aflw_dataset_dict[face_id]['keypoints'][3 * idx] = x
aflw_dataset_dict[face_id]['keypoints'][3 * idx + 1] = y
aflw_dataset_dict[face_id]['keypoints'][3 * idx + 2] = 1
elif face_id not in invalid_face_ids:
invalid_face_ids.append(face_id)
if args.verbose:
print("Done!")
# Close database
if args.verbose:
print(" \\__Close the AFLW SQLight database...", end="")
sys.stdout.flush()
cursor.close()
if args.verbose:
print("Done!")
# Close database
if args.verbose:
print(" \\__Convert to COCO format...", end="")
sys.stdout.flush()
images_list = []
annotations_list = []
for face_id, face_ann in aflw_dataset_dict.items():
img_dir_num = int(face_ann['img_path'].split("/")[1])
img_file_num = int(re.findall(r'\d+', face_ann['img_path'].split("/")[-1].split(".")[0])[0])
image_id = int("%d%05d" % (img_dir_num, img_file_num))
images_list.append({'id': image_id,
'file_name': face_ann['img_path'],
'height': face_ann['height'],
'width': face_ann['width'],
'date_captured': '',
'flickr_url': '',
'license': 1,
'dataset': 'aflw'})
annotations_list.append({'id': face_id,
'image_id': image_id,
'segmentation': [],
'num_keypoints': len(face_ann['keypoints']),
'area': 0,
'iscrowd': 0,
'keypoints': face_ann['keypoints'],
'bbox': face_ann['bbox'],
'category_id': 0})
# Build COCO-like dictionary
dataset_dict = dict()
# =============================== Dataset Info =============================== #
dataset_info = {
'description': 'Annotated Facial Landmarks in the Wild (AFLW)',
'url': 'https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/',
'version': '1.0',
'year': 2011,
'contributor': '',
'date_created': '2011'
}
dataset_dict.update(dataset_info)
# ============================= Dataset Licenses ============================= #
dataset_licenses = {
'licenses': [
{'id': 0,
'url': 'https://www.tugraz.at/institute/icg/research/team-bischof/lrs/downloads/aflw/',
'name': 'aflw_license'}
]
}
dataset_dict.update(dataset_licenses)
# ============================== Dataset Images ============================== #
dataset_images = {'images': images_list}
dataset_dict.update(dataset_images)
# =========================== Dataset Annotations ============================ #
dataset_annotations = {'annotations': annotations_list}
dataset_dict.update(dataset_annotations)
# ============================ Dataset Categories ============================ #
dataset_categories = {
'categories':
[
{'supercategory': 'face',
'name': 'face',
'skeleton': [],
'keypoints': ['LeftBrowLeftCorner',
'LeftBrowCenter',
'LeftBrowRightCorner',
'RightBrowLeftCorner',
'RightBrowCenter',
'RightBrowRightCorner',
'LeftEyeLeftCorner',
'LeftEyeCenter',
'LeftEyeRightCorner',
'RightEyeLeftCorner',
'RightEyeCenter',
'RightEyeRightCorner',
'LeftEar',
'NoseLeft',
'NoseCenter',
'NoseRight',
'RightEar',
'MouthLeftCorner',
'MouthCenter',
'MouthRightCorner',
'ChinCenter'],
'id': 0}
]
}
dataset_dict.update(dataset_categories)
if args.verbose:
print("Done!")
# Save dataset dictionary as json file
if args.verbose:
print(" \\__Save dataset dictionary as json file...", end="")
sys.stdout.flush()
with open(args.json, 'w') as fp:
json.dump(dataset_dict, fp)
if args.verbose:
print("Done!")
if __name__ == "__main__":
main()
| progress_updt | identifier_name |
main.go | // Copyright (c) 2020 Michael Madgett
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
package main
import (
"fmt"
"github.com/magic53/go-chainloader/block"
"github.com/magic53/go-chainloader/btc"
"github.com/magic53/go-chainloader/data"
"github.com/magic53/go-chainloader/ltc"
"log"
"os"
"os/signal"
"path/filepath"
"sync"
"time"
)
func init() {
//log.SetFlags(0) // logging
//log.SetOutput(ioutil.Discard)
}
func main() {
var err error
// Trace
//f, err := os.Create("trace.out")
//if err != nil {
// panic(err)
//}
//defer f.Close()
//err = trace.Start(f)
//if err != nil {
// panic(err)
//}
//defer trace.Stop()
// Memory profiler
//defer func() {
// f, err := os.Create("mem.prof")
// if err != nil {
// log.Fatal("could not create memory profile: ", err)
// }
// defer f.Close() // error handling omitted for example
// runtime.GC() // get up-to-date statistics
// if err := pprof.WriteHeapProfile(f); err != nil {
// log.Fatal("could not write memory profile: ", err)
// }
//}()
shutdown := make(chan os.Signal, 1)
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
out:
for {
select {
case sig := <-c:
if sig == os.Interrupt {
log.Printf("Shutting down: received signal %v\n", sig)
data.ShutdownNow()
shutdown <- sig
break out
}
}
}
}()
// Default tx cache date (last 2 months)
toMonth := time.Now()
fromMonth := toMonth.AddDate(0, -2, 0)
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
if !data.LoadTokenConfigs(dir) {
log.Println("failed to load token configuration file: real time updates will be disabled")
}
var plugins []interface{}
tokens := data.GetTokenConfigs()
for _, config := range tokens {
switch config.Ticker {
case "BLOCK":
// load block config
plugin := block.NewPlugin(&block.MainNetParams, config)
if err = plugin.LoadBlocks(plugin.BlocksDir()); err != nil {
log.Println("BLOCK failed!", err.Error())
return
}
plugins = append(plugins, plugin)
go func() {
if err = plugin.WriteListTransactions(fromMonth, toMonth, plugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}()
case "LTC":
// load ltc config
plugin := ltc.NewPlugin(<c.MainNetParams, config)
if err = plugin.LoadBlocks(plugin.BlocksDir()); err != nil {
log.Println("LTC failed!", err.Error())
return
}
plugins = append(plugins, plugin)
go func() {
if err = plugin.WriteListTransactions(fromMonth, toMonth, plugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}()
case "BTC":
// load btc config
plugin := btc.NewPlugin(&btc.MainNetParams, config)
if err = plugin.LoadBlocks(plugin.BlocksDir()); err != nil {
log.Println("BTC failed!", err.Error())
return
}
plugins = append(plugins, plugin)
go func() {
if err = plugin.WriteListTransactions(fromMonth, toMonth, plugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}()
}
}
go watchMempools(plugins, false)
out:
for {
select {
case sig := <-shutdown:
if sig == os.Interrupt {
log.Println("Exiting...")
break out
}
}
}
}
// watchMempools watches the mempool on a timer and saves new transaction data
// to disk.
func watchMempools(plugins []interface{}, writeFiles bool) {
var counter uint64
for {
if data.IsShuttingDown() {
break
}
if counter%30000 == 0 { // every ~30 seconds
var wg sync.WaitGroup
wg.Add(len(plugins))
for _, plugin := range plugins {
go func(plugin interface{}) {
defer wg.Done()
dataPlugin, ok := plugin.(data.Plugin)
if !ok {
return
}
mempoolPlugin, ok := plugin.(data.RPCMempoolPlugin)
if !ok {
return
}
rawTxPlugin, ok := plugin.(data.RPCRawTransactionsPlugin)
if !ok {
return
}
listTxPlugin, ok := plugin.(data.ListTransactionsPlugin)
if !ok {
return
}
mempool, err := mempoolPlugin.GetRawMempool()
if err != nil {
log.Printf("failed to getrawmempool on %s", dataPlugin.Ticker())
return
}
if len(mempool) < 1 {
return
}
wireTxs, err := rawTxPlugin.GetRawTransactions(mempool)
if err != nil || len(wireTxs) < 1 {
return
}
var txs []*data.Tx
txs, err = dataPlugin.ImportTransactions(wireTxs)
if err != nil {
log.Printf("failed to import transactions for %s", dataPlugin.Ticker())
return
}
if !writeFiles {
return
}
now := time.Now()
fromMonth := time.Date(2020, 8, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, time.UTC)
for _, tx := range txs {
_ = listTxPlugin.WriteListTransactionsForAddress(tx.Address, fromMonth, toMonth, dataPlugin.TokenConf().OutputDir)
}
}(plugin)
}
wg.Wait()
}
time.Sleep(250 * time.Millisecond)
counter += 250 | // TODO Handle lookup requests
//func startServer() *http.Server {
// handler := func(w http.ResponseWriter, req *http.Request) {
// _, _ = io.WriteString(w, "Hello, world!\n")
// }
// server := &http.Server{Addr: ":8080", Handler: handler}
// return server
//}
func debugBLOCK(blockPlugin *block.Plugin, config *data.TokenConfig) {
var err error
//var txids []string
//txids, err = data.RPCRawMempool(config)
//if err != nil {
// fmt.Println(err.Error())
//}
//txids = []string{
// "6fa76deb0382c1cee0c56f7f5e5ea7266c22bd6134d4738be2c1ec35ff8cf550",
// "6aaa8db67a20abfa86998c40a67bed9eb2ac01d57048488cc4db3a4e1f544bc9",
// "ea1d76239e42745a4ffcfe16b10060f01ac0441545da8bd052b73c1ce74b9135",
// "3d910d1265c33b023d520c99944e6c1987cc149a543096862a9299f142932b1b",
// "2f58de4b43538e794c862e9d62f8c9e559e3d4c62b5d0f4014ede11bb415eea7",
// "e402a318b8b3a95a499812220eb56ed07c0a35c2e61a9cdc9ecf0ec940abc34c",
// "1af5413887c3124b87f0e801c9d96bf86fa347b0f6febd7303360cf70fc52bde",
// "672ad61d9da652ad688cec1f10ab11f018361fc79ed63781291ee3551c8766b6",
// "6ddc1222d9aa9b0768e333f0aa2e44dad00a41a78b31f0268aa9cd85507e1857",
// "47534111299075b129305a74a914f56195ead27d3bff5e75d90f374603f2222b",
// "17e1f44ccda93cb8a198d5ab5179fb6f3fe8e20d521016b37146317d821ea829",
// "e09b625ec7b9e31f12987ff9541354f16e7ebfad4021114eb932b724a6478d8e",
// "8a8c227298ba90f4dc07a1d0efe73c1737292eaa83522b24ec41ce522f3a5290",
//}
//if rawtxs, err := data.RPCGetRawTransactions(blockPlugin, txids, config); err != nil {
// fmt.Println(err.Error())
//} else {
// _, _ = blockPlugin.ImportTransactions(rawtxs)
//}
//txs, err := blockPlugin.ListTransactions(0, math.MaxInt32, []string{"BakbDabCMM1PVuFx8ruVM9AcWCWYfc66eV"})
//txs, err := blockPlugin.ListTransactions(0, math.MaxInt32, []string{"BoWcezbZ9vFTwArtVTHJHp51zQZSGdcLXt"})
//txs, err := blockPlugin.ListTransactions(0, math.MaxInt32, []string{"BreP7JHmYfp9YaGXBwN1F2X9BRq9sRdkiS"})
//if err != nil {
// log.Println("BLOCK listtransactions failed!", err.Error())
// return
//}
//sort.Slice(txs, func(i, j int) bool {
// return txs[i].Time < txs[j].Time
//})
//if js, err2 := json.Marshal(txs); err2 == nil {
// fmt.Println(string(js))
//}
fromMonth := time.Date(2020, 6, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC)
if err = blockPlugin.WriteListTransactions(fromMonth, toMonth, blockPlugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}
func debugLTC(ltcPlugin *ltc.Plugin, config *data.TokenConfig) {
var err error
//var txids []string
//txids, err = data.RPCRawMempool(config)
//if err != nil {
// fmt.Println(err.Error())
//}
//// block 1922810
//txids = []string{
// "645569e5ba9fd384dee3e19602cf5d4f81fc963c766def0f3fafd6afb5d6402b",
// "4e7178a1b6345230c6d47fa2b69a8300afd68668a94896606f066f697062d37b",
// "0afd7828844e12de18d4160281b27573bf54a676bf65bb8cfd2e79ccb474711b",
// "d19ed1aa14280693933a9c9e552ed7a294e537413e24780ddf9f865dde90546f",
// "eee223b6f6624ac079259fd379499513e6ac1440c68ddd1e20b91850fba66d22",
// "d2fad87261dcb381883e51953bbf7800ad02186180423a3138547296c165f0cd",
// "760f9f3385a0ada632c750f4603867504bdcda7be245b0b99de7bd065c27d5a3",
// "754421493b42ae1c2940b307d489b59c7bbdcb88d3535464b65519a997ca21a2",
// "e1ae5692f3b1189f4e38b9e0c8544d300fe83a4f1d40f524efec50e369b37901",
// "e0df0fd5f9b2c046ed754585ce45947753e79389272acff70c8cc9a094424db8",
// "8c7e73110c6a1d010d71221ffc35e32d14bd28d02ea8cec689f67e3a669045e6",
// "37294c514341cf787750d75d7ae4d4e7876cb355f8795f3ddd134df0f4910e07",
// "825de0fde9d159889dc3d2ac807cf39e6a4a3dfd7fbe9a3ebbc3d32271446eff",
// "49e64ab1747bffb08d6c61686588aa4bfe77a9b7015b4a45e03ed7385ad9d626",
// "fd1af7fed0f3aced908ec80355e5526152b05cad0973fff141ef7ad812b6a21c",
// "c549e50815c45e85092b2fa4c18706a54d2bcdfb515e1b921972e2e7d245542b",
// "0094b803cfac6f159d0cfe0cec80c7b658b28b8f41e71eb424b5380cb09475fc",
// "b0d59afccfd659b24bf72388263a71ea97e3c627171d32e31de35e95d14a0b27",
// "d62543ebe94d4de95bf037bd345782dd040f93fe95d7aa034661e5ca75c7e934",
// "4ee5dfd35dcf4920b5336f52cdd0402105e2099c5151f4ac932c62d91cf88037",
// "3061ab123835eb2fc68cd160c6c99e0f698512eec09e4c0b813a7af64d8f1b71",
// "1de4cf2323a6adda6a2616c5823bfffdea19731f29d252f691fe040a6f1cb877",
// "b540af13a49cad742614fa88cdfdf4ccfcdd4af9998a10106ee4b0ad471316e3",
// "0011e3e91dff7fca68a45386caf47e47bdc85fbc481ec694a5ab5c7b4e5cc19c",
// "eb9cda055b183fb063f5f9091d6c134268f029f72d022fe0fbf9997ef7426aa4",
// "9b46c4c4e65875780aebc31310621d96199ff3883a38eec08f1c9db5d2735aae",
// "e7d3d7636a28b7b2874e7edb8e2f4af859b9d58be6206f13b088cce4ac66e0b4",
// "e9bcafa9778ae022881fc42502b3e82a48664361472fad667b4e2686c0a8c6b5",
// "e7119ff43b250e3452f1329976c6aa8266ca6fc06c7df16959875e36fa44bfc6",
// "8d296fab683661b8b01552283d872bda80a9a43042172597f5618349eb6ecbc6",
// "50f8f6d82fac6c7f3b5a18a3a8e49d8211424b15e773556928b7872c76d5cdd6",
// "4b664a73e42cb6b72937abb6871c894a6a08957901f868534d56800dca8eefee",
// "907499cfd9752cd2e5caa2ce022f8942e7498be9d4abec429d4a6492f8a0a8f5",
//}
//if rawtxs, err := data.RPCGetRawTransactions(ltcPlugin, txids, config); err != nil {
// fmt.Println(err.Error())
//} else {
// _, _ = ltcPlugin.ImportTransactions(rawtxs)
//}
//
//txs, err := ltcPlugin.ListTransactions(0, math.MaxInt32, []string{"LV5nrreyVZJVvptA9PZSD4ViegKh7Qa8MA"})
//if err != nil {
// log.Println("LTC listtransactions failed!", err.Error())
// return
//}
//sort.Slice(txs, func(i, j int) bool {
// return txs[i].Time < txs[j].Time
//})
//if js, err2 := json.Marshal(txs); err2 == nil {
// fmt.Println(string(js))
//}
fromMonth := time.Date(2020, 8, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC)
if err = ltcPlugin.WriteListTransactions(fromMonth, toMonth, ltcPlugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}
func debugBTC(btcPlugin *btc.Plugin, config *data.TokenConfig) {
var err error
//var txids []string
//txids, err = data.RPCRawMempool(config)
//if err != nil {
// fmt.Println(err.Error())
//}
//// block 1922810
//txids = []string{
//}
//if rawtxs, err := data.RPCGetRawTransactions(ltcPlugin, txids, config); err != nil {
// fmt.Println(err.Error())
//} else {
// _, _ = ltcPlugin.ImportTransactions(rawtxs)
//}
//
//txs, err := btcPlugin.ListTransactions(0, math.MaxInt32, []string{"1F184JoctgpLnTQmABig3sJNG6QqkG9JuL"})
//if err != nil {
// log.Println("BTC listtransactions failed!", err.Error())
// return
//}
//sort.Slice(txs, func(i, j int) bool {
// return txs[i].Time < txs[j].Time
//})
//if js, err2 := json.Marshal(txs); err2 == nil {
// fmt.Println(string(js))
//}
fromMonth := time.Date(2020, 8, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC)
if err = btcPlugin.WriteListTransactions(fromMonth, toMonth, btcPlugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
} | }
}
| random_line_split |
main.go | // Copyright (c) 2020 Michael Madgett
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
package main
import (
"fmt"
"github.com/magic53/go-chainloader/block"
"github.com/magic53/go-chainloader/btc"
"github.com/magic53/go-chainloader/data"
"github.com/magic53/go-chainloader/ltc"
"log"
"os"
"os/signal"
"path/filepath"
"sync"
"time"
)
func init() {
//log.SetFlags(0) // logging
//log.SetOutput(ioutil.Discard)
}
func main() {
var err error
// Trace
//f, err := os.Create("trace.out")
//if err != nil {
// panic(err)
//}
//defer f.Close()
//err = trace.Start(f)
//if err != nil {
// panic(err)
//}
//defer trace.Stop()
// Memory profiler
//defer func() {
// f, err := os.Create("mem.prof")
// if err != nil {
// log.Fatal("could not create memory profile: ", err)
// }
// defer f.Close() // error handling omitted for example
// runtime.GC() // get up-to-date statistics
// if err := pprof.WriteHeapProfile(f); err != nil {
// log.Fatal("could not write memory profile: ", err)
// }
//}()
shutdown := make(chan os.Signal, 1)
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
out:
for {
select {
case sig := <-c:
if sig == os.Interrupt {
log.Printf("Shutting down: received signal %v\n", sig)
data.ShutdownNow()
shutdown <- sig
break out
}
}
}
}()
// Default tx cache date (last 2 months)
toMonth := time.Now()
fromMonth := toMonth.AddDate(0, -2, 0)
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
if !data.LoadTokenConfigs(dir) {
log.Println("failed to load token configuration file: real time updates will be disabled")
}
var plugins []interface{}
tokens := data.GetTokenConfigs()
for _, config := range tokens {
switch config.Ticker {
case "BLOCK":
// load block config
plugin := block.NewPlugin(&block.MainNetParams, config)
if err = plugin.LoadBlocks(plugin.BlocksDir()); err != nil {
log.Println("BLOCK failed!", err.Error())
return
}
plugins = append(plugins, plugin)
go func() {
if err = plugin.WriteListTransactions(fromMonth, toMonth, plugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}()
case "LTC":
// load ltc config
plugin := ltc.NewPlugin(<c.MainNetParams, config)
if err = plugin.LoadBlocks(plugin.BlocksDir()); err != nil {
log.Println("LTC failed!", err.Error())
return
}
plugins = append(plugins, plugin)
go func() {
if err = plugin.WriteListTransactions(fromMonth, toMonth, plugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}()
case "BTC":
// load btc config
plugin := btc.NewPlugin(&btc.MainNetParams, config)
if err = plugin.LoadBlocks(plugin.BlocksDir()); err != nil {
log.Println("BTC failed!", err.Error())
return
}
plugins = append(plugins, plugin)
go func() {
if err = plugin.WriteListTransactions(fromMonth, toMonth, plugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}()
}
}
go watchMempools(plugins, false)
out:
for {
select {
case sig := <-shutdown:
if sig == os.Interrupt {
log.Println("Exiting...")
break out
}
}
}
}
// watchMempools watches the mempool on a timer and saves new transaction data
// to disk.
func watchMempools(plugins []interface{}, writeFiles bool) {
var counter uint64
for {
if data.IsShuttingDown() {
break
}
if counter%30000 == 0 { // every ~30 seconds
var wg sync.WaitGroup
wg.Add(len(plugins))
for _, plugin := range plugins {
go func(plugin interface{}) {
defer wg.Done()
dataPlugin, ok := plugin.(data.Plugin)
if !ok {
return
}
mempoolPlugin, ok := plugin.(data.RPCMempoolPlugin)
if !ok {
return
}
rawTxPlugin, ok := plugin.(data.RPCRawTransactionsPlugin)
if !ok {
return
}
listTxPlugin, ok := plugin.(data.ListTransactionsPlugin)
if !ok {
return
}
mempool, err := mempoolPlugin.GetRawMempool()
if err != nil {
log.Printf("failed to getrawmempool on %s", dataPlugin.Ticker())
return
}
if len(mempool) < 1 {
return
}
wireTxs, err := rawTxPlugin.GetRawTransactions(mempool)
if err != nil || len(wireTxs) < 1 {
return
}
var txs []*data.Tx
txs, err = dataPlugin.ImportTransactions(wireTxs)
if err != nil {
log.Printf("failed to import transactions for %s", dataPlugin.Ticker())
return
}
if !writeFiles {
return
}
now := time.Now()
fromMonth := time.Date(2020, 8, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, time.UTC)
for _, tx := range txs {
_ = listTxPlugin.WriteListTransactionsForAddress(tx.Address, fromMonth, toMonth, dataPlugin.TokenConf().OutputDir)
}
}(plugin)
}
wg.Wait()
}
time.Sleep(250 * time.Millisecond)
counter += 250
}
}
// TODO Handle lookup requests
//func startServer() *http.Server {
// handler := func(w http.ResponseWriter, req *http.Request) {
// _, _ = io.WriteString(w, "Hello, world!\n")
// }
// server := &http.Server{Addr: ":8080", Handler: handler}
// return server
//}
func debugBLOCK(blockPlugin *block.Plugin, config *data.TokenConfig) {
var err error
//var txids []string
//txids, err = data.RPCRawMempool(config)
//if err != nil {
// fmt.Println(err.Error())
//}
//txids = []string{
// "6fa76deb0382c1cee0c56f7f5e5ea7266c22bd6134d4738be2c1ec35ff8cf550",
// "6aaa8db67a20abfa86998c40a67bed9eb2ac01d57048488cc4db3a4e1f544bc9",
// "ea1d76239e42745a4ffcfe16b10060f01ac0441545da8bd052b73c1ce74b9135",
// "3d910d1265c33b023d520c99944e6c1987cc149a543096862a9299f142932b1b",
// "2f58de4b43538e794c862e9d62f8c9e559e3d4c62b5d0f4014ede11bb415eea7",
// "e402a318b8b3a95a499812220eb56ed07c0a35c2e61a9cdc9ecf0ec940abc34c",
// "1af5413887c3124b87f0e801c9d96bf86fa347b0f6febd7303360cf70fc52bde",
// "672ad61d9da652ad688cec1f10ab11f018361fc79ed63781291ee3551c8766b6",
// "6ddc1222d9aa9b0768e333f0aa2e44dad00a41a78b31f0268aa9cd85507e1857",
// "47534111299075b129305a74a914f56195ead27d3bff5e75d90f374603f2222b",
// "17e1f44ccda93cb8a198d5ab5179fb6f3fe8e20d521016b37146317d821ea829",
// "e09b625ec7b9e31f12987ff9541354f16e7ebfad4021114eb932b724a6478d8e",
// "8a8c227298ba90f4dc07a1d0efe73c1737292eaa83522b24ec41ce522f3a5290",
//}
//if rawtxs, err := data.RPCGetRawTransactions(blockPlugin, txids, config); err != nil {
// fmt.Println(err.Error())
//} else {
// _, _ = blockPlugin.ImportTransactions(rawtxs)
//}
//txs, err := blockPlugin.ListTransactions(0, math.MaxInt32, []string{"BakbDabCMM1PVuFx8ruVM9AcWCWYfc66eV"})
//txs, err := blockPlugin.ListTransactions(0, math.MaxInt32, []string{"BoWcezbZ9vFTwArtVTHJHp51zQZSGdcLXt"})
//txs, err := blockPlugin.ListTransactions(0, math.MaxInt32, []string{"BreP7JHmYfp9YaGXBwN1F2X9BRq9sRdkiS"})
//if err != nil {
// log.Println("BLOCK listtransactions failed!", err.Error())
// return
//}
//sort.Slice(txs, func(i, j int) bool {
// return txs[i].Time < txs[j].Time
//})
//if js, err2 := json.Marshal(txs); err2 == nil {
// fmt.Println(string(js))
//}
fromMonth := time.Date(2020, 6, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC)
if err = blockPlugin.WriteListTransactions(fromMonth, toMonth, blockPlugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}
func debugLTC(ltcPlugin *ltc.Plugin, config *data.TokenConfig) {
var err error
//var txids []string
//txids, err = data.RPCRawMempool(config)
//if err != nil {
// fmt.Println(err.Error())
//}
//// block 1922810
//txids = []string{
// "645569e5ba9fd384dee3e19602cf5d4f81fc963c766def0f3fafd6afb5d6402b",
// "4e7178a1b6345230c6d47fa2b69a8300afd68668a94896606f066f697062d37b",
// "0afd7828844e12de18d4160281b27573bf54a676bf65bb8cfd2e79ccb474711b",
// "d19ed1aa14280693933a9c9e552ed7a294e537413e24780ddf9f865dde90546f",
// "eee223b6f6624ac079259fd379499513e6ac1440c68ddd1e20b91850fba66d22",
// "d2fad87261dcb381883e51953bbf7800ad02186180423a3138547296c165f0cd",
// "760f9f3385a0ada632c750f4603867504bdcda7be245b0b99de7bd065c27d5a3",
// "754421493b42ae1c2940b307d489b59c7bbdcb88d3535464b65519a997ca21a2",
// "e1ae5692f3b1189f4e38b9e0c8544d300fe83a4f1d40f524efec50e369b37901",
// "e0df0fd5f9b2c046ed754585ce45947753e79389272acff70c8cc9a094424db8",
// "8c7e73110c6a1d010d71221ffc35e32d14bd28d02ea8cec689f67e3a669045e6",
// "37294c514341cf787750d75d7ae4d4e7876cb355f8795f3ddd134df0f4910e07",
// "825de0fde9d159889dc3d2ac807cf39e6a4a3dfd7fbe9a3ebbc3d32271446eff",
// "49e64ab1747bffb08d6c61686588aa4bfe77a9b7015b4a45e03ed7385ad9d626",
// "fd1af7fed0f3aced908ec80355e5526152b05cad0973fff141ef7ad812b6a21c",
// "c549e50815c45e85092b2fa4c18706a54d2bcdfb515e1b921972e2e7d245542b",
// "0094b803cfac6f159d0cfe0cec80c7b658b28b8f41e71eb424b5380cb09475fc",
// "b0d59afccfd659b24bf72388263a71ea97e3c627171d32e31de35e95d14a0b27",
// "d62543ebe94d4de95bf037bd345782dd040f93fe95d7aa034661e5ca75c7e934",
// "4ee5dfd35dcf4920b5336f52cdd0402105e2099c5151f4ac932c62d91cf88037",
// "3061ab123835eb2fc68cd160c6c99e0f698512eec09e4c0b813a7af64d8f1b71",
// "1de4cf2323a6adda6a2616c5823bfffdea19731f29d252f691fe040a6f1cb877",
// "b540af13a49cad742614fa88cdfdf4ccfcdd4af9998a10106ee4b0ad471316e3",
// "0011e3e91dff7fca68a45386caf47e47bdc85fbc481ec694a5ab5c7b4e5cc19c",
// "eb9cda055b183fb063f5f9091d6c134268f029f72d022fe0fbf9997ef7426aa4",
// "9b46c4c4e65875780aebc31310621d96199ff3883a38eec08f1c9db5d2735aae",
// "e7d3d7636a28b7b2874e7edb8e2f4af859b9d58be6206f13b088cce4ac66e0b4",
// "e9bcafa9778ae022881fc42502b3e82a48664361472fad667b4e2686c0a8c6b5",
// "e7119ff43b250e3452f1329976c6aa8266ca6fc06c7df16959875e36fa44bfc6",
// "8d296fab683661b8b01552283d872bda80a9a43042172597f5618349eb6ecbc6",
// "50f8f6d82fac6c7f3b5a18a3a8e49d8211424b15e773556928b7872c76d5cdd6",
// "4b664a73e42cb6b72937abb6871c894a6a08957901f868534d56800dca8eefee",
// "907499cfd9752cd2e5caa2ce022f8942e7498be9d4abec429d4a6492f8a0a8f5",
//}
//if rawtxs, err := data.RPCGetRawTransactions(ltcPlugin, txids, config); err != nil {
// fmt.Println(err.Error())
//} else {
// _, _ = ltcPlugin.ImportTransactions(rawtxs)
//}
//
//txs, err := ltcPlugin.ListTransactions(0, math.MaxInt32, []string{"LV5nrreyVZJVvptA9PZSD4ViegKh7Qa8MA"})
//if err != nil {
// log.Println("LTC listtransactions failed!", err.Error())
// return
//}
//sort.Slice(txs, func(i, j int) bool {
// return txs[i].Time < txs[j].Time
//})
//if js, err2 := json.Marshal(txs); err2 == nil {
// fmt.Println(string(js))
//}
fromMonth := time.Date(2020, 8, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC)
if err = ltcPlugin.WriteListTransactions(fromMonth, toMonth, ltcPlugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}
func | (btcPlugin *btc.Plugin, config *data.TokenConfig) {
var err error
//var txids []string
//txids, err = data.RPCRawMempool(config)
//if err != nil {
// fmt.Println(err.Error())
//}
//// block 1922810
//txids = []string{
//}
//if rawtxs, err := data.RPCGetRawTransactions(ltcPlugin, txids, config); err != nil {
// fmt.Println(err.Error())
//} else {
// _, _ = ltcPlugin.ImportTransactions(rawtxs)
//}
//
//txs, err := btcPlugin.ListTransactions(0, math.MaxInt32, []string{"1F184JoctgpLnTQmABig3sJNG6QqkG9JuL"})
//if err != nil {
// log.Println("BTC listtransactions failed!", err.Error())
// return
//}
//sort.Slice(txs, func(i, j int) bool {
// return txs[i].Time < txs[j].Time
//})
//if js, err2 := json.Marshal(txs); err2 == nil {
// fmt.Println(string(js))
//}
fromMonth := time.Date(2020, 8, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC)
if err = btcPlugin.WriteListTransactions(fromMonth, toMonth, btcPlugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}
| debugBTC | identifier_name |
main.go | // Copyright (c) 2020 Michael Madgett
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
package main
import (
"fmt"
"github.com/magic53/go-chainloader/block"
"github.com/magic53/go-chainloader/btc"
"github.com/magic53/go-chainloader/data"
"github.com/magic53/go-chainloader/ltc"
"log"
"os"
"os/signal"
"path/filepath"
"sync"
"time"
)
func init() {
//log.SetFlags(0) // logging
//log.SetOutput(ioutil.Discard)
}
func main() {
var err error
// Trace
//f, err := os.Create("trace.out")
//if err != nil {
// panic(err)
//}
//defer f.Close()
//err = trace.Start(f)
//if err != nil {
// panic(err)
//}
//defer trace.Stop()
// Memory profiler
//defer func() {
// f, err := os.Create("mem.prof")
// if err != nil {
// log.Fatal("could not create memory profile: ", err)
// }
// defer f.Close() // error handling omitted for example
// runtime.GC() // get up-to-date statistics
// if err := pprof.WriteHeapProfile(f); err != nil {
// log.Fatal("could not write memory profile: ", err)
// }
//}()
shutdown := make(chan os.Signal, 1)
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
out:
for {
select {
case sig := <-c:
if sig == os.Interrupt {
log.Printf("Shutting down: received signal %v\n", sig)
data.ShutdownNow()
shutdown <- sig
break out
}
}
}
}()
// Default tx cache date (last 2 months)
toMonth := time.Now()
fromMonth := toMonth.AddDate(0, -2, 0)
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
if !data.LoadTokenConfigs(dir) {
log.Println("failed to load token configuration file: real time updates will be disabled")
}
var plugins []interface{}
tokens := data.GetTokenConfigs()
for _, config := range tokens {
switch config.Ticker {
case "BLOCK":
// load block config
plugin := block.NewPlugin(&block.MainNetParams, config)
if err = plugin.LoadBlocks(plugin.BlocksDir()); err != nil {
log.Println("BLOCK failed!", err.Error())
return
}
plugins = append(plugins, plugin)
go func() {
if err = plugin.WriteListTransactions(fromMonth, toMonth, plugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}()
case "LTC":
// load ltc config
plugin := ltc.NewPlugin(<c.MainNetParams, config)
if err = plugin.LoadBlocks(plugin.BlocksDir()); err != nil {
log.Println("LTC failed!", err.Error())
return
}
plugins = append(plugins, plugin)
go func() {
if err = plugin.WriteListTransactions(fromMonth, toMonth, plugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}()
case "BTC":
// load btc config
plugin := btc.NewPlugin(&btc.MainNetParams, config)
if err = plugin.LoadBlocks(plugin.BlocksDir()); err != nil {
log.Println("BTC failed!", err.Error())
return
}
plugins = append(plugins, plugin)
go func() {
if err = plugin.WriteListTransactions(fromMonth, toMonth, plugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}()
}
}
go watchMempools(plugins, false)
out:
for {
select {
case sig := <-shutdown:
if sig == os.Interrupt {
log.Println("Exiting...")
break out
}
}
}
}
// watchMempools watches the mempool on a timer and saves new transaction data
// to disk.
func watchMempools(plugins []interface{}, writeFiles bool) {
var counter uint64
for {
if data.IsShuttingDown() {
break
}
if counter%30000 == 0 { // every ~30 seconds
var wg sync.WaitGroup
wg.Add(len(plugins))
for _, plugin := range plugins {
go func(plugin interface{}) {
defer wg.Done()
dataPlugin, ok := plugin.(data.Plugin)
if !ok {
return
}
mempoolPlugin, ok := plugin.(data.RPCMempoolPlugin)
if !ok {
return
}
rawTxPlugin, ok := plugin.(data.RPCRawTransactionsPlugin)
if !ok {
return
}
listTxPlugin, ok := plugin.(data.ListTransactionsPlugin)
if !ok {
return
}
mempool, err := mempoolPlugin.GetRawMempool()
if err != nil {
log.Printf("failed to getrawmempool on %s", dataPlugin.Ticker())
return
}
if len(mempool) < 1 {
return
}
wireTxs, err := rawTxPlugin.GetRawTransactions(mempool)
if err != nil || len(wireTxs) < 1 {
return
}
var txs []*data.Tx
txs, err = dataPlugin.ImportTransactions(wireTxs)
if err != nil {
log.Printf("failed to import transactions for %s", dataPlugin.Ticker())
return
}
if !writeFiles {
return
}
now := time.Now()
fromMonth := time.Date(2020, 8, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, time.UTC)
for _, tx := range txs {
_ = listTxPlugin.WriteListTransactionsForAddress(tx.Address, fromMonth, toMonth, dataPlugin.TokenConf().OutputDir)
}
}(plugin)
}
wg.Wait()
}
time.Sleep(250 * time.Millisecond)
counter += 250
}
}
// TODO Handle lookup requests
//func startServer() *http.Server {
// handler := func(w http.ResponseWriter, req *http.Request) {
// _, _ = io.WriteString(w, "Hello, world!\n")
// }
// server := &http.Server{Addr: ":8080", Handler: handler}
// return server
//}
func debugBLOCK(blockPlugin *block.Plugin, config *data.TokenConfig) {
var err error
//var txids []string
//txids, err = data.RPCRawMempool(config)
//if err != nil {
// fmt.Println(err.Error())
//}
//txids = []string{
// "6fa76deb0382c1cee0c56f7f5e5ea7266c22bd6134d4738be2c1ec35ff8cf550",
// "6aaa8db67a20abfa86998c40a67bed9eb2ac01d57048488cc4db3a4e1f544bc9",
// "ea1d76239e42745a4ffcfe16b10060f01ac0441545da8bd052b73c1ce74b9135",
// "3d910d1265c33b023d520c99944e6c1987cc149a543096862a9299f142932b1b",
// "2f58de4b43538e794c862e9d62f8c9e559e3d4c62b5d0f4014ede11bb415eea7",
// "e402a318b8b3a95a499812220eb56ed07c0a35c2e61a9cdc9ecf0ec940abc34c",
// "1af5413887c3124b87f0e801c9d96bf86fa347b0f6febd7303360cf70fc52bde",
// "672ad61d9da652ad688cec1f10ab11f018361fc79ed63781291ee3551c8766b6",
// "6ddc1222d9aa9b0768e333f0aa2e44dad00a41a78b31f0268aa9cd85507e1857",
// "47534111299075b129305a74a914f56195ead27d3bff5e75d90f374603f2222b",
// "17e1f44ccda93cb8a198d5ab5179fb6f3fe8e20d521016b37146317d821ea829",
// "e09b625ec7b9e31f12987ff9541354f16e7ebfad4021114eb932b724a6478d8e",
// "8a8c227298ba90f4dc07a1d0efe73c1737292eaa83522b24ec41ce522f3a5290",
//}
//if rawtxs, err := data.RPCGetRawTransactions(blockPlugin, txids, config); err != nil {
// fmt.Println(err.Error())
//} else {
// _, _ = blockPlugin.ImportTransactions(rawtxs)
//}
//txs, err := blockPlugin.ListTransactions(0, math.MaxInt32, []string{"BakbDabCMM1PVuFx8ruVM9AcWCWYfc66eV"})
//txs, err := blockPlugin.ListTransactions(0, math.MaxInt32, []string{"BoWcezbZ9vFTwArtVTHJHp51zQZSGdcLXt"})
//txs, err := blockPlugin.ListTransactions(0, math.MaxInt32, []string{"BreP7JHmYfp9YaGXBwN1F2X9BRq9sRdkiS"})
//if err != nil {
// log.Println("BLOCK listtransactions failed!", err.Error())
// return
//}
//sort.Slice(txs, func(i, j int) bool {
// return txs[i].Time < txs[j].Time
//})
//if js, err2 := json.Marshal(txs); err2 == nil {
// fmt.Println(string(js))
//}
fromMonth := time.Date(2020, 6, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC)
if err = blockPlugin.WriteListTransactions(fromMonth, toMonth, blockPlugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}
func debugLTC(ltcPlugin *ltc.Plugin, config *data.TokenConfig) {
var err error
//var txids []string
//txids, err = data.RPCRawMempool(config)
//if err != nil {
// fmt.Println(err.Error())
//}
//// block 1922810
//txids = []string{
// "645569e5ba9fd384dee3e19602cf5d4f81fc963c766def0f3fafd6afb5d6402b",
// "4e7178a1b6345230c6d47fa2b69a8300afd68668a94896606f066f697062d37b",
// "0afd7828844e12de18d4160281b27573bf54a676bf65bb8cfd2e79ccb474711b",
// "d19ed1aa14280693933a9c9e552ed7a294e537413e24780ddf9f865dde90546f",
// "eee223b6f6624ac079259fd379499513e6ac1440c68ddd1e20b91850fba66d22",
// "d2fad87261dcb381883e51953bbf7800ad02186180423a3138547296c165f0cd",
// "760f9f3385a0ada632c750f4603867504bdcda7be245b0b99de7bd065c27d5a3",
// "754421493b42ae1c2940b307d489b59c7bbdcb88d3535464b65519a997ca21a2",
// "e1ae5692f3b1189f4e38b9e0c8544d300fe83a4f1d40f524efec50e369b37901",
// "e0df0fd5f9b2c046ed754585ce45947753e79389272acff70c8cc9a094424db8",
// "8c7e73110c6a1d010d71221ffc35e32d14bd28d02ea8cec689f67e3a669045e6",
// "37294c514341cf787750d75d7ae4d4e7876cb355f8795f3ddd134df0f4910e07",
// "825de0fde9d159889dc3d2ac807cf39e6a4a3dfd7fbe9a3ebbc3d32271446eff",
// "49e64ab1747bffb08d6c61686588aa4bfe77a9b7015b4a45e03ed7385ad9d626",
// "fd1af7fed0f3aced908ec80355e5526152b05cad0973fff141ef7ad812b6a21c",
// "c549e50815c45e85092b2fa4c18706a54d2bcdfb515e1b921972e2e7d245542b",
// "0094b803cfac6f159d0cfe0cec80c7b658b28b8f41e71eb424b5380cb09475fc",
// "b0d59afccfd659b24bf72388263a71ea97e3c627171d32e31de35e95d14a0b27",
// "d62543ebe94d4de95bf037bd345782dd040f93fe95d7aa034661e5ca75c7e934",
// "4ee5dfd35dcf4920b5336f52cdd0402105e2099c5151f4ac932c62d91cf88037",
// "3061ab123835eb2fc68cd160c6c99e0f698512eec09e4c0b813a7af64d8f1b71",
// "1de4cf2323a6adda6a2616c5823bfffdea19731f29d252f691fe040a6f1cb877",
// "b540af13a49cad742614fa88cdfdf4ccfcdd4af9998a10106ee4b0ad471316e3",
// "0011e3e91dff7fca68a45386caf47e47bdc85fbc481ec694a5ab5c7b4e5cc19c",
// "eb9cda055b183fb063f5f9091d6c134268f029f72d022fe0fbf9997ef7426aa4",
// "9b46c4c4e65875780aebc31310621d96199ff3883a38eec08f1c9db5d2735aae",
// "e7d3d7636a28b7b2874e7edb8e2f4af859b9d58be6206f13b088cce4ac66e0b4",
// "e9bcafa9778ae022881fc42502b3e82a48664361472fad667b4e2686c0a8c6b5",
// "e7119ff43b250e3452f1329976c6aa8266ca6fc06c7df16959875e36fa44bfc6",
// "8d296fab683661b8b01552283d872bda80a9a43042172597f5618349eb6ecbc6",
// "50f8f6d82fac6c7f3b5a18a3a8e49d8211424b15e773556928b7872c76d5cdd6",
// "4b664a73e42cb6b72937abb6871c894a6a08957901f868534d56800dca8eefee",
// "907499cfd9752cd2e5caa2ce022f8942e7498be9d4abec429d4a6492f8a0a8f5",
//}
//if rawtxs, err := data.RPCGetRawTransactions(ltcPlugin, txids, config); err != nil {
// fmt.Println(err.Error())
//} else {
// _, _ = ltcPlugin.ImportTransactions(rawtxs)
//}
//
//txs, err := ltcPlugin.ListTransactions(0, math.MaxInt32, []string{"LV5nrreyVZJVvptA9PZSD4ViegKh7Qa8MA"})
//if err != nil {
// log.Println("LTC listtransactions failed!", err.Error())
// return
//}
//sort.Slice(txs, func(i, j int) bool {
// return txs[i].Time < txs[j].Time
//})
//if js, err2 := json.Marshal(txs); err2 == nil {
// fmt.Println(string(js))
//}
fromMonth := time.Date(2020, 8, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC)
if err = ltcPlugin.WriteListTransactions(fromMonth, toMonth, ltcPlugin.TokenConf().OutputDir); err != nil |
}
func debugBTC(btcPlugin *btc.Plugin, config *data.TokenConfig) {
var err error
//var txids []string
//txids, err = data.RPCRawMempool(config)
//if err != nil {
// fmt.Println(err.Error())
//}
//// block 1922810
//txids = []string{
//}
//if rawtxs, err := data.RPCGetRawTransactions(ltcPlugin, txids, config); err != nil {
// fmt.Println(err.Error())
//} else {
// _, _ = ltcPlugin.ImportTransactions(rawtxs)
//}
//
//txs, err := btcPlugin.ListTransactions(0, math.MaxInt32, []string{"1F184JoctgpLnTQmABig3sJNG6QqkG9JuL"})
//if err != nil {
// log.Println("BTC listtransactions failed!", err.Error())
// return
//}
//sort.Slice(txs, func(i, j int) bool {
// return txs[i].Time < txs[j].Time
//})
//if js, err2 := json.Marshal(txs); err2 == nil {
// fmt.Println(string(js))
//}
fromMonth := time.Date(2020, 8, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC)
if err = btcPlugin.WriteListTransactions(fromMonth, toMonth, btcPlugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}
| {
fmt.Println("error", err.Error())
} | conditional_block |
main.go | // Copyright (c) 2020 Michael Madgett
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
package main
import (
"fmt"
"github.com/magic53/go-chainloader/block"
"github.com/magic53/go-chainloader/btc"
"github.com/magic53/go-chainloader/data"
"github.com/magic53/go-chainloader/ltc"
"log"
"os"
"os/signal"
"path/filepath"
"sync"
"time"
)
func init() {
//log.SetFlags(0) // logging
//log.SetOutput(ioutil.Discard)
}
func main() |
// watchMempools watches the mempool on a timer and saves new transaction data
// to disk.
func watchMempools(plugins []interface{}, writeFiles bool) {
var counter uint64
for {
if data.IsShuttingDown() {
break
}
if counter%30000 == 0 { // every ~30 seconds
var wg sync.WaitGroup
wg.Add(len(plugins))
for _, plugin := range plugins {
go func(plugin interface{}) {
defer wg.Done()
dataPlugin, ok := plugin.(data.Plugin)
if !ok {
return
}
mempoolPlugin, ok := plugin.(data.RPCMempoolPlugin)
if !ok {
return
}
rawTxPlugin, ok := plugin.(data.RPCRawTransactionsPlugin)
if !ok {
return
}
listTxPlugin, ok := plugin.(data.ListTransactionsPlugin)
if !ok {
return
}
mempool, err := mempoolPlugin.GetRawMempool()
if err != nil {
log.Printf("failed to getrawmempool on %s", dataPlugin.Ticker())
return
}
if len(mempool) < 1 {
return
}
wireTxs, err := rawTxPlugin.GetRawTransactions(mempool)
if err != nil || len(wireTxs) < 1 {
return
}
var txs []*data.Tx
txs, err = dataPlugin.ImportTransactions(wireTxs)
if err != nil {
log.Printf("failed to import transactions for %s", dataPlugin.Ticker())
return
}
if !writeFiles {
return
}
now := time.Now()
fromMonth := time.Date(2020, 8, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, time.UTC)
for _, tx := range txs {
_ = listTxPlugin.WriteListTransactionsForAddress(tx.Address, fromMonth, toMonth, dataPlugin.TokenConf().OutputDir)
}
}(plugin)
}
wg.Wait()
}
time.Sleep(250 * time.Millisecond)
counter += 250
}
}
// TODO Handle lookup requests
//func startServer() *http.Server {
// handler := func(w http.ResponseWriter, req *http.Request) {
// _, _ = io.WriteString(w, "Hello, world!\n")
// }
// server := &http.Server{Addr: ":8080", Handler: handler}
// return server
//}
func debugBLOCK(blockPlugin *block.Plugin, config *data.TokenConfig) {
var err error
//var txids []string
//txids, err = data.RPCRawMempool(config)
//if err != nil {
// fmt.Println(err.Error())
//}
//txids = []string{
// "6fa76deb0382c1cee0c56f7f5e5ea7266c22bd6134d4738be2c1ec35ff8cf550",
// "6aaa8db67a20abfa86998c40a67bed9eb2ac01d57048488cc4db3a4e1f544bc9",
// "ea1d76239e42745a4ffcfe16b10060f01ac0441545da8bd052b73c1ce74b9135",
// "3d910d1265c33b023d520c99944e6c1987cc149a543096862a9299f142932b1b",
// "2f58de4b43538e794c862e9d62f8c9e559e3d4c62b5d0f4014ede11bb415eea7",
// "e402a318b8b3a95a499812220eb56ed07c0a35c2e61a9cdc9ecf0ec940abc34c",
// "1af5413887c3124b87f0e801c9d96bf86fa347b0f6febd7303360cf70fc52bde",
// "672ad61d9da652ad688cec1f10ab11f018361fc79ed63781291ee3551c8766b6",
// "6ddc1222d9aa9b0768e333f0aa2e44dad00a41a78b31f0268aa9cd85507e1857",
// "47534111299075b129305a74a914f56195ead27d3bff5e75d90f374603f2222b",
// "17e1f44ccda93cb8a198d5ab5179fb6f3fe8e20d521016b37146317d821ea829",
// "e09b625ec7b9e31f12987ff9541354f16e7ebfad4021114eb932b724a6478d8e",
// "8a8c227298ba90f4dc07a1d0efe73c1737292eaa83522b24ec41ce522f3a5290",
//}
//if rawtxs, err := data.RPCGetRawTransactions(blockPlugin, txids, config); err != nil {
// fmt.Println(err.Error())
//} else {
// _, _ = blockPlugin.ImportTransactions(rawtxs)
//}
//txs, err := blockPlugin.ListTransactions(0, math.MaxInt32, []string{"BakbDabCMM1PVuFx8ruVM9AcWCWYfc66eV"})
//txs, err := blockPlugin.ListTransactions(0, math.MaxInt32, []string{"BoWcezbZ9vFTwArtVTHJHp51zQZSGdcLXt"})
//txs, err := blockPlugin.ListTransactions(0, math.MaxInt32, []string{"BreP7JHmYfp9YaGXBwN1F2X9BRq9sRdkiS"})
//if err != nil {
// log.Println("BLOCK listtransactions failed!", err.Error())
// return
//}
//sort.Slice(txs, func(i, j int) bool {
// return txs[i].Time < txs[j].Time
//})
//if js, err2 := json.Marshal(txs); err2 == nil {
// fmt.Println(string(js))
//}
fromMonth := time.Date(2020, 6, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC)
if err = blockPlugin.WriteListTransactions(fromMonth, toMonth, blockPlugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}
func debugLTC(ltcPlugin *ltc.Plugin, config *data.TokenConfig) {
var err error
//var txids []string
//txids, err = data.RPCRawMempool(config)
//if err != nil {
// fmt.Println(err.Error())
//}
//// block 1922810
//txids = []string{
// "645569e5ba9fd384dee3e19602cf5d4f81fc963c766def0f3fafd6afb5d6402b",
// "4e7178a1b6345230c6d47fa2b69a8300afd68668a94896606f066f697062d37b",
// "0afd7828844e12de18d4160281b27573bf54a676bf65bb8cfd2e79ccb474711b",
// "d19ed1aa14280693933a9c9e552ed7a294e537413e24780ddf9f865dde90546f",
// "eee223b6f6624ac079259fd379499513e6ac1440c68ddd1e20b91850fba66d22",
// "d2fad87261dcb381883e51953bbf7800ad02186180423a3138547296c165f0cd",
// "760f9f3385a0ada632c750f4603867504bdcda7be245b0b99de7bd065c27d5a3",
// "754421493b42ae1c2940b307d489b59c7bbdcb88d3535464b65519a997ca21a2",
// "e1ae5692f3b1189f4e38b9e0c8544d300fe83a4f1d40f524efec50e369b37901",
// "e0df0fd5f9b2c046ed754585ce45947753e79389272acff70c8cc9a094424db8",
// "8c7e73110c6a1d010d71221ffc35e32d14bd28d02ea8cec689f67e3a669045e6",
// "37294c514341cf787750d75d7ae4d4e7876cb355f8795f3ddd134df0f4910e07",
// "825de0fde9d159889dc3d2ac807cf39e6a4a3dfd7fbe9a3ebbc3d32271446eff",
// "49e64ab1747bffb08d6c61686588aa4bfe77a9b7015b4a45e03ed7385ad9d626",
// "fd1af7fed0f3aced908ec80355e5526152b05cad0973fff141ef7ad812b6a21c",
// "c549e50815c45e85092b2fa4c18706a54d2bcdfb515e1b921972e2e7d245542b",
// "0094b803cfac6f159d0cfe0cec80c7b658b28b8f41e71eb424b5380cb09475fc",
// "b0d59afccfd659b24bf72388263a71ea97e3c627171d32e31de35e95d14a0b27",
// "d62543ebe94d4de95bf037bd345782dd040f93fe95d7aa034661e5ca75c7e934",
// "4ee5dfd35dcf4920b5336f52cdd0402105e2099c5151f4ac932c62d91cf88037",
// "3061ab123835eb2fc68cd160c6c99e0f698512eec09e4c0b813a7af64d8f1b71",
// "1de4cf2323a6adda6a2616c5823bfffdea19731f29d252f691fe040a6f1cb877",
// "b540af13a49cad742614fa88cdfdf4ccfcdd4af9998a10106ee4b0ad471316e3",
// "0011e3e91dff7fca68a45386caf47e47bdc85fbc481ec694a5ab5c7b4e5cc19c",
// "eb9cda055b183fb063f5f9091d6c134268f029f72d022fe0fbf9997ef7426aa4",
// "9b46c4c4e65875780aebc31310621d96199ff3883a38eec08f1c9db5d2735aae",
// "e7d3d7636a28b7b2874e7edb8e2f4af859b9d58be6206f13b088cce4ac66e0b4",
// "e9bcafa9778ae022881fc42502b3e82a48664361472fad667b4e2686c0a8c6b5",
// "e7119ff43b250e3452f1329976c6aa8266ca6fc06c7df16959875e36fa44bfc6",
// "8d296fab683661b8b01552283d872bda80a9a43042172597f5618349eb6ecbc6",
// "50f8f6d82fac6c7f3b5a18a3a8e49d8211424b15e773556928b7872c76d5cdd6",
// "4b664a73e42cb6b72937abb6871c894a6a08957901f868534d56800dca8eefee",
// "907499cfd9752cd2e5caa2ce022f8942e7498be9d4abec429d4a6492f8a0a8f5",
//}
//if rawtxs, err := data.RPCGetRawTransactions(ltcPlugin, txids, config); err != nil {
// fmt.Println(err.Error())
//} else {
// _, _ = ltcPlugin.ImportTransactions(rawtxs)
//}
//
//txs, err := ltcPlugin.ListTransactions(0, math.MaxInt32, []string{"LV5nrreyVZJVvptA9PZSD4ViegKh7Qa8MA"})
//if err != nil {
// log.Println("LTC listtransactions failed!", err.Error())
// return
//}
//sort.Slice(txs, func(i, j int) bool {
// return txs[i].Time < txs[j].Time
//})
//if js, err2 := json.Marshal(txs); err2 == nil {
// fmt.Println(string(js))
//}
fromMonth := time.Date(2020, 8, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC)
if err = ltcPlugin.WriteListTransactions(fromMonth, toMonth, ltcPlugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}
func debugBTC(btcPlugin *btc.Plugin, config *data.TokenConfig) {
var err error
//var txids []string
//txids, err = data.RPCRawMempool(config)
//if err != nil {
// fmt.Println(err.Error())
//}
//// block 1922810
//txids = []string{
//}
//if rawtxs, err := data.RPCGetRawTransactions(ltcPlugin, txids, config); err != nil {
// fmt.Println(err.Error())
//} else {
// _, _ = ltcPlugin.ImportTransactions(rawtxs)
//}
//
//txs, err := btcPlugin.ListTransactions(0, math.MaxInt32, []string{"1F184JoctgpLnTQmABig3sJNG6QqkG9JuL"})
//if err != nil {
// log.Println("BTC listtransactions failed!", err.Error())
// return
//}
//sort.Slice(txs, func(i, j int) bool {
// return txs[i].Time < txs[j].Time
//})
//if js, err2 := json.Marshal(txs); err2 == nil {
// fmt.Println(string(js))
//}
fromMonth := time.Date(2020, 8, 1, 0, 0, 0, 0, time.UTC)
toMonth := time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC)
if err = btcPlugin.WriteListTransactions(fromMonth, toMonth, btcPlugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}
| {
var err error
// Trace
//f, err := os.Create("trace.out")
//if err != nil {
// panic(err)
//}
//defer f.Close()
//err = trace.Start(f)
//if err != nil {
// panic(err)
//}
//defer trace.Stop()
// Memory profiler
//defer func() {
// f, err := os.Create("mem.prof")
// if err != nil {
// log.Fatal("could not create memory profile: ", err)
// }
// defer f.Close() // error handling omitted for example
// runtime.GC() // get up-to-date statistics
// if err := pprof.WriteHeapProfile(f); err != nil {
// log.Fatal("could not write memory profile: ", err)
// }
//}()
shutdown := make(chan os.Signal, 1)
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
out:
for {
select {
case sig := <-c:
if sig == os.Interrupt {
log.Printf("Shutting down: received signal %v\n", sig)
data.ShutdownNow()
shutdown <- sig
break out
}
}
}
}()
// Default tx cache date (last 2 months)
toMonth := time.Now()
fromMonth := toMonth.AddDate(0, -2, 0)
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
if !data.LoadTokenConfigs(dir) {
log.Println("failed to load token configuration file: real time updates will be disabled")
}
var plugins []interface{}
tokens := data.GetTokenConfigs()
for _, config := range tokens {
switch config.Ticker {
case "BLOCK":
// load block config
plugin := block.NewPlugin(&block.MainNetParams, config)
if err = plugin.LoadBlocks(plugin.BlocksDir()); err != nil {
log.Println("BLOCK failed!", err.Error())
return
}
plugins = append(plugins, plugin)
go func() {
if err = plugin.WriteListTransactions(fromMonth, toMonth, plugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}()
case "LTC":
// load ltc config
plugin := ltc.NewPlugin(<c.MainNetParams, config)
if err = plugin.LoadBlocks(plugin.BlocksDir()); err != nil {
log.Println("LTC failed!", err.Error())
return
}
plugins = append(plugins, plugin)
go func() {
if err = plugin.WriteListTransactions(fromMonth, toMonth, plugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}()
case "BTC":
// load btc config
plugin := btc.NewPlugin(&btc.MainNetParams, config)
if err = plugin.LoadBlocks(plugin.BlocksDir()); err != nil {
log.Println("BTC failed!", err.Error())
return
}
plugins = append(plugins, plugin)
go func() {
if err = plugin.WriteListTransactions(fromMonth, toMonth, plugin.TokenConf().OutputDir); err != nil {
fmt.Println("error", err.Error())
}
}()
}
}
go watchMempools(plugins, false)
out:
for {
select {
case sig := <-shutdown:
if sig == os.Interrupt {
log.Println("Exiting...")
break out
}
}
}
} | identifier_body |
ssl-audit.py | import OpenSSL , ssl, argparse ,json, os.path, validators, requests, logging
from datetime import datetime
from dateutil.parser import parse
from urllib.parse import urljoin
from akamai.edgegrid import EdgeGridAuth, EdgeRc
from pathlib import Path
#TODO: FIX logger format
#turn off logger
#send ouput to tmp file
#improve help documentation
parser = argparse.ArgumentParser(description='Certificate Expiration Audit\nLatest version and documentation can be found here:\nhttps://github.com/roymartinezblanco/Akamai-SSL-Expiration-Audit',formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', help='Show script version information',
required=False, action='store_true')
parser.add_argument('--audit', type=str, choices=['account','config','file','list'], help='*required* Type of Audit to be done: [account,config,file,list]',
required=False)
parser.add_argument('--domains', nargs='+', type=str, help='List of domains to query.',
required=False)
parser.add_argument('--file-type', type=str, choices=['list','akamai'], help='File Type (list, akamai)',
required=False, default='akamai')
parser.add_argument('--file', type=str, help='File with list of domains (one per line)',
required=False)
parser.add_argument('--config-name',nargs='+', type=str, help='Name or List of Names to be audited.)',
required=False)
parser.add_argument('--verbose', help='Show debug information',
required=False, action='store_true')
parser.add_argument('--section', type=str, help='Select a Edgerc section other than the Default',
required=False)
parser.add_argument('--account-key', type=str, help='Account ID to Query for multi account management (switch key)',
required=False)
args = vars(parser.parse_args())
### Global Variables
version= "1.0.30"
errors = []
items = {}
item_list= []
logger = logging.getLogger("SSL-AUDIT")
def configure_logging():
logger.setLevel(logging.DEBUG)
# Format for our loglines
formatter = logging.Formatter("[%(asctime)s] - %(name)s - %(levelname)s - %(message)s")
# Setup console logging
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
# Setup file logging as well
# fh = logging.FileHandler(LOG_FILENAME)
# fh.setLevel(logging.INFO)
# fh.setFormatter(formatter)
# logger.addHandler(fh)
def readObject(File,Ftype:str,configName:str=None):
origins=[]
if Ftype != "API":
if os.path.exists(File):
if Ftype == "list":
if args['verbose']:
#print("...... Reading file '{}'.".format(File))
logger.debug("Reading file '{}'.".format(File))
lines = [line.rstrip('\n') for line in open(File)]
getCertificates(lines)
else:
try:
with open(File) as handle:
dictdump = json.loads(handle.read())
except:
parser.error("Unable to Parse JSON File, please validate format.")
else:
findOrigins(dictdump,origins,configName)
getCertificates(origins,configName)
else:
parser.error("The File {} does not exist!".format(File))
else:
if args['verbose']:
logger.debug("Reading rules for the property '{}' .".format(configName))
findOrigins(File,origins,configName)
getCertificates(origins,configName)
def findOrigins(obj,origins:list,configName:str=None):
for ok, ov in obj.items():
if ok == "name" and ov == "origin":
options = dict(obj["options"])
if options["originType"] == "CUSTOMER":
if args['verbose']:
logger.debug("Origin behavior found with the value '{}' on the configuration '{}'.".format(dict(obj["options"])["hostname"],configName))
origins.append (dict(obj["options"])["hostname"])
for k, v in obj.items():
if isinstance(v,dict) or isinstance(v,list):
if "values" not in k.lower():
if isinstance(v,list):
if len(v) > 0:
for i in v:
if isinstance(i, dict):
findOrigins(dict(i),origins,configName)
else:
findOrigins(v,origins,configName)
def printJson():
if args['verbose']:
logger.debug("Printing JSON.")
logger.debug("[end]")
if len(item_list) == 0:
logger.error("No output generated to print!")
return None
if item_list[0] != {}:
items['items'] = item_list
if args['audit'] == "list":
if len(errors) != 0:
items['errors'] = errors
formatted_json = json.dumps(items, sort_keys=False, indent=4)
print(formatted_json)
def getCertificates(domains: list,configName:str=None):
currentConfig={}
if args['audit'] != "list" and args['audit'] != "file":
currentConfig['propertyName'] = configName
certs=[]
er=[]
for host in domains:
if args['verbose']:
logger.debug("Looking up the certificate for '{}' ".format(host))
if "{{" in host:
if args['verbose']:
logger.warning("'{}' is a variable and will not be looked up!".format(host))
er.append("'{}' is a variable and will not be looked up!".format(host))
else:
if validators.domain(host) != True:
if args['verbose']:
if configName is not None:
logger.warning("'{}' is not a valid domain, on the configuration'{}'!".format(host,configName))
else:
logger.warning("'{}' is not a valid domain!".format(host))
er.append("'{}' is not a valid domain!".format(host))
continue
try:
hostname = host
port = 443
conn = ssl.create_connection((hostname,port), timeout=10)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sock = context.wrap_socket(conn, server_hostname=hostname)
certificate = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True))
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,certificate)
except BaseException as e:
if args['verbose']:
logger.error("Can't connect to '{}' error: {}".format(host,str(e)))
er.append("Can't connect to '{}' error: {}".format(host,str(e)))
else:
serial= '{0:x}'.format(x509.get_serial_number())
exp_date = str(x509.get_notAfter().decode('utf-8'))
dt = parse(exp_date)
daystoexp=dt.replace(tzinfo=None)-datetime.utcnow()
item = {}
item['Domain'] = str(host)
item['Serial'] = str(serial)
item['ExpDate'] = str(dt.date())
item['DaysLeft'] = daystoexp.days
certs.append(item)
if domains == []:
if configName is not None:
er.append("No customer origins found on the configuration '{}'.".format(configName))
if args['verbose']:
logger.warning("No customer origins found on the configuration '{}.".format(configName))
else:
er.append("No customer origins found.")
if args['verbose']:
logger.warning("No customer origins found.")
if certs != []:
currentConfig['certificates'] = certs
if er != []:
if args['audit'] != "list":
|
else:
errors.append(er)
item_list.append(currentConfig)
return
def propertyManagerAPI(action:str,config:str=None,p:list=None):
try:
home = str(Path.home())
edgerc = EdgeRc(home+"/.edgerc")
if args['section']:
section = args['section']
else:
section = 'papi'
host = edgerc.get(section,'host')
except Exception as e:
logger.debug("Error Autehticating Edgerc {}.".format(home+edgerc))
http = requests.Session()
http.auth= EdgeGridAuth.from_edgerc(edgerc,section)
validActions = ["ListGroups","ListContracts","ListProperties","GetRuleTree","SearchProperty"]
if action not in validActions:
parser.error("Error: PAPI Unknown Action")
#ListGroups
elif action == validActions[0]:
if args['verbose']:
logger.debug("Listing account groups with PAPI.")
if args['account_key']:
endpoint='/papi/v1/groups?accountSwitchKey={}'.format(args['account_key'])
else:
endpoint= '/papi/v1/groups'
result = http.get(urljoin("https://" + host + "/", endpoint))
response = json.loads(json.dumps(result.json()))
http.close()
return response
#ListProperties
elif action == validActions[2]:
gps = propertyManagerAPI("ListGroups")
if gps is None:
logger.warning("No Groups were found in account!")
return None
# elif gps['incidentId']:
# logger.error('{}'.format(gps['title']))
# return None
for gp in gps['groups']['items']:
for contract in gp['contractIds']:
if args['verbose']:
logger.debug("Listing properties in '{}'/'{}' with PAPI.".format(gp['groupId'],contract))
if args['account_key']:
endpoint= '/papi/v1/properties?contractId={}&groupId={}&accountSwitchKey={}'.format(contract,gp['groupId'],args['account_key'])
else:
endpoint= '/papi/v1/properties?contractId={}&groupId={}'.format(contract,gp['groupId'])
result = http.get(urljoin("https://" + host + "/", endpoint))
http.close()
response = json.loads(json.dumps(result.json()))
for p in response['properties']['items']:
if p['productionVersion'] is None or p is None:
item={}
er=[]
er.append("The configuration has no active version in production.")
if args['verbose']:
logger.warning("The configuration '{}' has no active version in production.".format(p['propertyName']))
item['propertyName']=p['propertyName']
item['errors']=er
item_list.append(item)
else:
p['propertyVersion']=p['productionVersion']
del p['productionVersion']
propertyManagerAPI("GetRuleTree","",p)
elif action == validActions[3]:
if args['verbose']:
logger.debug("Getting rule tree for the '{}' property with PAPI.".format(p['propertyName']))
if args['account_key']:
endpoint= "/papi/v1/properties/{}/versions/{}/rules?contractId={}&groupId={}&validateRules=true&validateMode=fast&accountSwitchKey={}".format(
p['propertyId'],
p['propertyVersion'],
p['contractId'],
p['groupId'],
args['account_key']
)
else:
endpoint= "/papi/v1/properties/{}/versions/{}/rules?contractId={}&groupId={}&validateRules=true&validateMode=fast".format(
p['propertyId'],
p['propertyVersion'],
p['contractId'],
p['groupId']
)
result = http.get(urljoin("https://" + host + "/", endpoint))
http.close()
readObject(json.loads(json.dumps(result.json())) ,"API",p['propertyName'])
elif action == validActions[4]:
if args['verbose']:
logger.debug("Looking for the configuration '{}'.".format(config))
if args['account_key']:
endpoint='/papi/v1/search/find-by-value?accountSwitchKey={}'.format(args['account_key'])
else:
endpoint='/papi/v1/search/find-by-value'
postbody = {}
postbody['propertyName'] = config
result = http.post(urljoin("https://" + host + "/", endpoint),json.dumps(postbody), headers={"Content-Type": "application/json"})
http.close()
if result.json()['versions']['items'] == []:
item={}
er=[]
item['propertyName']=config
if args['verbose']:
logger.warning("The configuration '{}' was not found.".format(config))
er.append("The configuration was not found.")
item['errors']=er
item_list.append(item)
return
else:
if args['verbose']:
logger.debug("The configuration '{}' was found.".format(config))
prodversion = None
for i in result.json()['versions']['items']:
if i['productionStatus'] == "ACTIVE":
prodversion = True
propertyManagerAPI("GetRuleTree","",i)
if prodversion is None:
item={}
er=[]
if args['verbose']:
logger.warning("The configuration '{}' has no active version in production.".format(config))
er.append("The configuration has no active version in production.")
item['propertyName']=config
item['errors']=er
item_list.append(item)
return json.loads(json.dumps(result.json()))
return None
def main():
if args['version']:
print(version)
return
if not args['audit']:
parser.print_help()
if args['verbose']:
configure_logging()
logger.info("[start]")
if args['audit'] == "list":
if args['domains'] is None:
parser.error("--domains is required to provide list of domains.")
else:
getCertificates(args['domains'])
printJson()
elif (args['audit'] == "file"):
if (args['file'] is None):
parser.error("--file is required to provide the file to audited.")
else:
readObject(args['file'],args['file_type'])
printJson()
elif (args['audit'] == "config"):
if args['config_name'] is None:
parser.error("--config-name is required to provide configuration to be audited.")
else:
for i in args['config_name']:
propertyManagerAPI("SearchProperty",i)
printJson()
elif (args['audit'] == "account"):
#a = readEdgeRC()
propertyManagerAPI("ListProperties")
printJson()
if __name__ == '__main__':
main()
| currentConfig['errors'] = er | conditional_block |
ssl-audit.py | import OpenSSL , ssl, argparse ,json, os.path, validators, requests, logging
from datetime import datetime
from dateutil.parser import parse
from urllib.parse import urljoin
from akamai.edgegrid import EdgeGridAuth, EdgeRc
from pathlib import Path
#TODO: FIX logger format
#turn off logger
#send ouput to tmp file
#improve help documentation
parser = argparse.ArgumentParser(description='Certificate Expiration Audit\nLatest version and documentation can be found here:\nhttps://github.com/roymartinezblanco/Akamai-SSL-Expiration-Audit',formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', help='Show script version information',
required=False, action='store_true')
parser.add_argument('--audit', type=str, choices=['account','config','file','list'], help='*required* Type of Audit to be done: [account,config,file,list]',
required=False)
parser.add_argument('--domains', nargs='+', type=str, help='List of domains to query.',
required=False)
parser.add_argument('--file-type', type=str, choices=['list','akamai'], help='File Type (list, akamai)',
required=False, default='akamai')
parser.add_argument('--file', type=str, help='File with list of domains (one per line)',
required=False)
parser.add_argument('--config-name',nargs='+', type=str, help='Name or List of Names to be audited.)',
required=False)
parser.add_argument('--verbose', help='Show debug information',
required=False, action='store_true')
parser.add_argument('--section', type=str, help='Select a Edgerc section other than the Default',
required=False)
parser.add_argument('--account-key', type=str, help='Account ID to Query for multi account management (switch key)',
required=False)
args = vars(parser.parse_args())
### Global Variables
version= "1.0.30"
errors = []
items = {}
item_list= []
logger = logging.getLogger("SSL-AUDIT")
def configure_logging():
logger.setLevel(logging.DEBUG)
# Format for our loglines
formatter = logging.Formatter("[%(asctime)s] - %(name)s - %(levelname)s - %(message)s")
# Setup console logging
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
# Setup file logging as well
# fh = logging.FileHandler(LOG_FILENAME)
# fh.setLevel(logging.INFO)
# fh.setFormatter(formatter)
# logger.addHandler(fh)
def readObject(File,Ftype:str,configName:str=None):
origins=[]
if Ftype != "API":
if os.path.exists(File):
if Ftype == "list":
if args['verbose']:
#print("...... Reading file '{}'.".format(File))
logger.debug("Reading file '{}'.".format(File))
lines = [line.rstrip('\n') for line in open(File)]
getCertificates(lines)
else:
try:
with open(File) as handle:
dictdump = json.loads(handle.read())
except:
parser.error("Unable to Parse JSON File, please validate format.")
else:
findOrigins(dictdump,origins,configName)
getCertificates(origins,configName)
else:
parser.error("The File {} does not exist!".format(File))
else:
if args['verbose']:
logger.debug("Reading rules for the property '{}' .".format(configName))
findOrigins(File,origins,configName)
getCertificates(origins,configName)
def findOrigins(obj,origins:list,configName:str=None):
for ok, ov in obj.items():
if ok == "name" and ov == "origin":
options = dict(obj["options"])
if options["originType"] == "CUSTOMER":
if args['verbose']:
logger.debug("Origin behavior found with the value '{}' on the configuration '{}'.".format(dict(obj["options"])["hostname"],configName))
origins.append (dict(obj["options"])["hostname"])
for k, v in obj.items():
if isinstance(v,dict) or isinstance(v,list):
if "values" not in k.lower():
if isinstance(v,list):
if len(v) > 0:
for i in v:
if isinstance(i, dict):
findOrigins(dict(i),origins,configName)
else:
findOrigins(v,origins,configName)
def printJson():
if args['verbose']:
logger.debug("Printing JSON.")
logger.debug("[end]")
if len(item_list) == 0:
logger.error("No output generated to print!")
return None
if item_list[0] != {}:
items['items'] = item_list
if args['audit'] == "list":
if len(errors) != 0:
items['errors'] = errors
formatted_json = json.dumps(items, sort_keys=False, indent=4)
print(formatted_json)
def getCertificates(domains: list,configName:str=None):
currentConfig={}
if args['audit'] != "list" and args['audit'] != "file":
currentConfig['propertyName'] = configName
certs=[]
er=[]
for host in domains:
if args['verbose']:
logger.debug("Looking up the certificate for '{}' ".format(host))
if "{{" in host:
if args['verbose']:
logger.warning("'{}' is a variable and will not be looked up!".format(host))
er.append("'{}' is a variable and will not be looked up!".format(host))
else:
if validators.domain(host) != True:
if args['verbose']:
if configName is not None:
logger.warning("'{}' is not a valid domain, on the configuration'{}'!".format(host,configName))
else:
logger.warning("'{}' is not a valid domain!".format(host))
er.append("'{}' is not a valid domain!".format(host))
continue
try:
hostname = host
port = 443
conn = ssl.create_connection((hostname,port), timeout=10)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sock = context.wrap_socket(conn, server_hostname=hostname)
certificate = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True))
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,certificate)
except BaseException as e:
if args['verbose']:
logger.error("Can't connect to '{}' error: {}".format(host,str(e)))
er.append("Can't connect to '{}' error: {}".format(host,str(e)))
else:
serial= '{0:x}'.format(x509.get_serial_number())
exp_date = str(x509.get_notAfter().decode('utf-8'))
dt = parse(exp_date)
daystoexp=dt.replace(tzinfo=None)-datetime.utcnow()
item = {}
item['Domain'] = str(host)
item['Serial'] = str(serial)
item['ExpDate'] = str(dt.date())
item['DaysLeft'] = daystoexp.days
certs.append(item)
if domains == []:
if configName is not None:
er.append("No customer origins found on the configuration '{}'.".format(configName))
if args['verbose']:
logger.warning("No customer origins found on the configuration '{}.".format(configName))
else:
er.append("No customer origins found.")
if args['verbose']:
logger.warning("No customer origins found.")
if certs != []:
currentConfig['certificates'] = certs
if er != []:
if args['audit'] != "list":
currentConfig['errors'] = er
else:
errors.append(er)
item_list.append(currentConfig)
return
def propertyManagerAPI(action:str,config:str=None,p:list=None):
try:
home = str(Path.home())
edgerc = EdgeRc(home+"/.edgerc")
if args['section']:
section = args['section']
else:
section = 'papi'
host = edgerc.get(section,'host')
except Exception as e:
logger.debug("Error Autehticating Edgerc {}.".format(home+edgerc))
http = requests.Session()
http.auth= EdgeGridAuth.from_edgerc(edgerc,section)
validActions = ["ListGroups","ListContracts","ListProperties","GetRuleTree","SearchProperty"]
if action not in validActions:
parser.error("Error: PAPI Unknown Action")
#ListGroups
elif action == validActions[0]:
if args['verbose']:
logger.debug("Listing account groups with PAPI.")
if args['account_key']:
endpoint='/papi/v1/groups?accountSwitchKey={}'.format(args['account_key'])
else:
endpoint= '/papi/v1/groups'
result = http.get(urljoin("https://" + host + "/", endpoint))
response = json.loads(json.dumps(result.json()))
http.close()
return response
#ListProperties
elif action == validActions[2]:
gps = propertyManagerAPI("ListGroups")
if gps is None:
logger.warning("No Groups were found in account!")
return None
# elif gps['incidentId']:
# logger.error('{}'.format(gps['title']))
# return None
for gp in gps['groups']['items']:
for contract in gp['contractIds']:
if args['verbose']:
logger.debug("Listing properties in '{}'/'{}' with PAPI.".format(gp['groupId'],contract))
if args['account_key']:
endpoint= '/papi/v1/properties?contractId={}&groupId={}&accountSwitchKey={}'.format(contract,gp['groupId'],args['account_key'])
else:
endpoint= '/papi/v1/properties?contractId={}&groupId={}'.format(contract,gp['groupId'])
result = http.get(urljoin("https://" + host + "/", endpoint))
http.close()
response = json.loads(json.dumps(result.json()))
for p in response['properties']['items']:
if p['productionVersion'] is None or p is None:
item={}
er=[]
er.append("The configuration has no active version in production.")
if args['verbose']:
logger.warning("The configuration '{}' has no active version in production.".format(p['propertyName']))
item['propertyName']=p['propertyName']
item['errors']=er
item_list.append(item)
else:
p['propertyVersion']=p['productionVersion']
del p['productionVersion']
propertyManagerAPI("GetRuleTree","",p)
elif action == validActions[3]:
if args['verbose']:
logger.debug("Getting rule tree for the '{}' property with PAPI.".format(p['propertyName']))
if args['account_key']:
endpoint= "/papi/v1/properties/{}/versions/{}/rules?contractId={}&groupId={}&validateRules=true&validateMode=fast&accountSwitchKey={}".format(
p['propertyId'],
p['propertyVersion'],
p['contractId'],
p['groupId'],
args['account_key']
)
else:
endpoint= "/papi/v1/properties/{}/versions/{}/rules?contractId={}&groupId={}&validateRules=true&validateMode=fast".format(
p['propertyId'],
p['propertyVersion'],
p['contractId'],
p['groupId']
)
result = http.get(urljoin("https://" + host + "/", endpoint))
http.close()
readObject(json.loads(json.dumps(result.json())) ,"API",p['propertyName'])
elif action == validActions[4]:
if args['verbose']:
logger.debug("Looking for the configuration '{}'.".format(config))
if args['account_key']:
endpoint='/papi/v1/search/find-by-value?accountSwitchKey={}'.format(args['account_key'])
else:
endpoint='/papi/v1/search/find-by-value'
postbody = {}
postbody['propertyName'] = config
result = http.post(urljoin("https://" + host + "/", endpoint),json.dumps(postbody), headers={"Content-Type": "application/json"})
http.close()
if result.json()['versions']['items'] == []:
item={}
er=[]
item['propertyName']=config
if args['verbose']:
logger.warning("The configuration '{}' was not found.".format(config))
er.append("The configuration was not found.")
item['errors']=er
item_list.append(item)
return
else:
if args['verbose']:
logger.debug("The configuration '{}' was found.".format(config))
prodversion = None
for i in result.json()['versions']['items']:
if i['productionStatus'] == "ACTIVE":
prodversion = True
propertyManagerAPI("GetRuleTree","",i)
if prodversion is None:
item={}
er=[]
if args['verbose']:
logger.warning("The configuration '{}' has no active version in production.".format(config))
er.append("The configuration has no active version in production.")
item['propertyName']=config
item['errors']=er
item_list.append(item)
return json.loads(json.dumps(result.json()))
return None
def | ():
if args['version']:
print(version)
return
if not args['audit']:
parser.print_help()
if args['verbose']:
configure_logging()
logger.info("[start]")
if args['audit'] == "list":
if args['domains'] is None:
parser.error("--domains is required to provide list of domains.")
else:
getCertificates(args['domains'])
printJson()
elif (args['audit'] == "file"):
if (args['file'] is None):
parser.error("--file is required to provide the file to audited.")
else:
readObject(args['file'],args['file_type'])
printJson()
elif (args['audit'] == "config"):
if args['config_name'] is None:
parser.error("--config-name is required to provide configuration to be audited.")
else:
for i in args['config_name']:
propertyManagerAPI("SearchProperty",i)
printJson()
elif (args['audit'] == "account"):
#a = readEdgeRC()
propertyManagerAPI("ListProperties")
printJson()
if __name__ == '__main__':
main()
| main | identifier_name |
ssl-audit.py | import OpenSSL , ssl, argparse ,json, os.path, validators, requests, logging
from datetime import datetime
from dateutil.parser import parse
from urllib.parse import urljoin
from akamai.edgegrid import EdgeGridAuth, EdgeRc
from pathlib import Path
#TODO: FIX logger format
#turn off logger
#send ouput to tmp file
#improve help documentation
parser = argparse.ArgumentParser(description='Certificate Expiration Audit\nLatest version and documentation can be found here:\nhttps://github.com/roymartinezblanco/Akamai-SSL-Expiration-Audit',formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', help='Show script version information',
required=False, action='store_true')
parser.add_argument('--audit', type=str, choices=['account','config','file','list'], help='*required* Type of Audit to be done: [account,config,file,list]',
required=False)
parser.add_argument('--domains', nargs='+', type=str, help='List of domains to query.',
required=False)
parser.add_argument('--file-type', type=str, choices=['list','akamai'], help='File Type (list, akamai)',
required=False, default='akamai')
parser.add_argument('--file', type=str, help='File with list of domains (one per line)',
required=False)
parser.add_argument('--config-name',nargs='+', type=str, help='Name or List of Names to be audited.)',
required=False)
parser.add_argument('--verbose', help='Show debug information',
required=False, action='store_true')
parser.add_argument('--section', type=str, help='Select a Edgerc section other than the Default',
required=False)
parser.add_argument('--account-key', type=str, help='Account ID to Query for multi account management (switch key)',
required=False)
args = vars(parser.parse_args())
### Global Variables
version= "1.0.30"
errors = []
items = {}
item_list= []
logger = logging.getLogger("SSL-AUDIT")
def configure_logging():
logger.setLevel(logging.DEBUG)
# Format for our loglines
formatter = logging.Formatter("[%(asctime)s] - %(name)s - %(levelname)s - %(message)s")
# Setup console logging
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
# Setup file logging as well
# fh = logging.FileHandler(LOG_FILENAME)
# fh.setLevel(logging.INFO)
# fh.setFormatter(formatter)
# logger.addHandler(fh)
def readObject(File,Ftype:str,configName:str=None):
origins=[]
if Ftype != "API":
if os.path.exists(File):
if Ftype == "list":
if args['verbose']:
#print("...... Reading file '{}'.".format(File))
logger.debug("Reading file '{}'.".format(File))
lines = [line.rstrip('\n') for line in open(File)]
getCertificates(lines)
else:
try:
with open(File) as handle:
dictdump = json.loads(handle.read())
except:
parser.error("Unable to Parse JSON File, please validate format.")
else:
findOrigins(dictdump,origins,configName)
getCertificates(origins,configName)
else:
parser.error("The File {} does not exist!".format(File))
else:
if args['verbose']:
logger.debug("Reading rules for the property '{}' .".format(configName))
findOrigins(File,origins,configName)
getCertificates(origins,configName)
def findOrigins(obj,origins:list,configName:str=None):
for ok, ov in obj.items():
if ok == "name" and ov == "origin":
options = dict(obj["options"])
if options["originType"] == "CUSTOMER":
if args['verbose']:
logger.debug("Origin behavior found with the value '{}' on the configuration '{}'.".format(dict(obj["options"])["hostname"],configName))
origins.append (dict(obj["options"])["hostname"])
for k, v in obj.items():
if isinstance(v,dict) or isinstance(v,list):
if "values" not in k.lower():
if isinstance(v,list):
if len(v) > 0:
for i in v:
if isinstance(i, dict):
findOrigins(dict(i),origins,configName)
else:
findOrigins(v,origins,configName)
def printJson():
if args['verbose']:
logger.debug("Printing JSON.")
logger.debug("[end]")
if len(item_list) == 0:
logger.error("No output generated to print!")
return None
if item_list[0] != {}:
items['items'] = item_list
if args['audit'] == "list":
if len(errors) != 0:
items['errors'] = errors
formatted_json = json.dumps(items, sort_keys=False, indent=4)
print(formatted_json)
def getCertificates(domains: list,configName:str=None):
currentConfig={}
if args['audit'] != "list" and args['audit'] != "file":
currentConfig['propertyName'] = configName
certs=[]
er=[]
for host in domains:
if args['verbose']:
logger.debug("Looking up the certificate for '{}' ".format(host))
if "{{" in host:
if args['verbose']:
logger.warning("'{}' is a variable and will not be looked up!".format(host))
er.append("'{}' is a variable and will not be looked up!".format(host))
else:
if validators.domain(host) != True:
if args['verbose']:
if configName is not None:
logger.warning("'{}' is not a valid domain, on the configuration'{}'!".format(host,configName))
else:
logger.warning("'{}' is not a valid domain!".format(host))
er.append("'{}' is not a valid domain!".format(host))
continue
try:
hostname = host
port = 443
conn = ssl.create_connection((hostname,port), timeout=10)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sock = context.wrap_socket(conn, server_hostname=hostname)
certificate = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True))
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,certificate)
except BaseException as e:
if args['verbose']:
logger.error("Can't connect to '{}' error: {}".format(host,str(e)))
er.append("Can't connect to '{}' error: {}".format(host,str(e)))
else:
serial= '{0:x}'.format(x509.get_serial_number())
exp_date = str(x509.get_notAfter().decode('utf-8'))
dt = parse(exp_date)
daystoexp=dt.replace(tzinfo=None)-datetime.utcnow()
item = {}
item['Domain'] = str(host)
item['Serial'] = str(serial)
item['ExpDate'] = str(dt.date())
item['DaysLeft'] = daystoexp.days
certs.append(item)
if domains == []:
if configName is not None:
er.append("No customer origins found on the configuration '{}'.".format(configName))
if args['verbose']:
logger.warning("No customer origins found on the configuration '{}.".format(configName))
else:
er.append("No customer origins found.")
if args['verbose']:
logger.warning("No customer origins found.")
if certs != []:
currentConfig['certificates'] = certs
if er != []:
if args['audit'] != "list":
currentConfig['errors'] = er
else:
errors.append(er)
item_list.append(currentConfig)
return
def propertyManagerAPI(action:str,config:str=None,p:list=None):
try:
home = str(Path.home())
edgerc = EdgeRc(home+"/.edgerc")
if args['section']:
section = args['section']
else:
section = 'papi'
host = edgerc.get(section,'host')
except Exception as e:
logger.debug("Error Autehticating Edgerc {}.".format(home+edgerc))
http = requests.Session()
http.auth= EdgeGridAuth.from_edgerc(edgerc,section)
validActions = ["ListGroups","ListContracts","ListProperties","GetRuleTree","SearchProperty"]
if action not in validActions:
parser.error("Error: PAPI Unknown Action")
#ListGroups
elif action == validActions[0]:
if args['verbose']:
logger.debug("Listing account groups with PAPI.")
if args['account_key']:
endpoint='/papi/v1/groups?accountSwitchKey={}'.format(args['account_key'])
else:
endpoint= '/papi/v1/groups'
result = http.get(urljoin("https://" + host + "/", endpoint))
response = json.loads(json.dumps(result.json()))
http.close()
return response
#ListProperties
elif action == validActions[2]:
gps = propertyManagerAPI("ListGroups")
if gps is None:
logger.warning("No Groups were found in account!")
return None
# elif gps['incidentId']:
# logger.error('{}'.format(gps['title']))
# return None
for gp in gps['groups']['items']:
for contract in gp['contractIds']:
if args['verbose']:
logger.debug("Listing properties in '{}'/'{}' with PAPI.".format(gp['groupId'],contract))
if args['account_key']:
endpoint= '/papi/v1/properties?contractId={}&groupId={}&accountSwitchKey={}'.format(contract,gp['groupId'],args['account_key'])
else:
endpoint= '/papi/v1/properties?contractId={}&groupId={}'.format(contract,gp['groupId'])
result = http.get(urljoin("https://" + host + "/", endpoint))
http.close()
response = json.loads(json.dumps(result.json()))
for p in response['properties']['items']:
if p['productionVersion'] is None or p is None:
item={}
er=[]
er.append("The configuration has no active version in production.")
if args['verbose']:
logger.warning("The configuration '{}' has no active version in production.".format(p['propertyName']))
item['propertyName']=p['propertyName']
item['errors']=er
item_list.append(item)
else:
p['propertyVersion']=p['productionVersion']
del p['productionVersion']
propertyManagerAPI("GetRuleTree","",p)
elif action == validActions[3]:
if args['verbose']:
logger.debug("Getting rule tree for the '{}' property with PAPI.".format(p['propertyName']))
if args['account_key']:
endpoint= "/papi/v1/properties/{}/versions/{}/rules?contractId={}&groupId={}&validateRules=true&validateMode=fast&accountSwitchKey={}".format(
p['propertyId'],
p['propertyVersion'],
p['contractId'],
p['groupId'],
args['account_key']
)
else:
endpoint= "/papi/v1/properties/{}/versions/{}/rules?contractId={}&groupId={}&validateRules=true&validateMode=fast".format(
p['propertyId'],
p['propertyVersion'],
p['contractId'],
p['groupId']
)
result = http.get(urljoin("https://" + host + "/", endpoint))
http.close()
readObject(json.loads(json.dumps(result.json())) ,"API",p['propertyName'])
elif action == validActions[4]:
if args['verbose']:
logger.debug("Looking for the configuration '{}'.".format(config))
if args['account_key']:
endpoint='/papi/v1/search/find-by-value?accountSwitchKey={}'.format(args['account_key'])
else:
endpoint='/papi/v1/search/find-by-value'
postbody = {}
postbody['propertyName'] = config
result = http.post(urljoin("https://" + host + "/", endpoint),json.dumps(postbody), headers={"Content-Type": "application/json"})
http.close()
if result.json()['versions']['items'] == []:
item={}
er=[]
item['propertyName']=config
if args['verbose']:
logger.warning("The configuration '{}' was not found.".format(config))
er.append("The configuration was not found.")
item['errors']=er
item_list.append(item)
return
else:
if args['verbose']:
logger.debug("The configuration '{}' was found.".format(config))
prodversion = None
for i in result.json()['versions']['items']:
if i['productionStatus'] == "ACTIVE":
prodversion = True
propertyManagerAPI("GetRuleTree","",i)
if prodversion is None:
item={}
er=[]
if args['verbose']:
logger.warning("The configuration '{}' has no active version in production.".format(config))
er.append("The configuration has no active version in production.")
item['propertyName']=config
item['errors']=er
item_list.append(item)
return json.loads(json.dumps(result.json()))
return None
def main():
|
if __name__ == '__main__':
main()
| if args['version']:
print(version)
return
if not args['audit']:
parser.print_help()
if args['verbose']:
configure_logging()
logger.info("[start]")
if args['audit'] == "list":
if args['domains'] is None:
parser.error("--domains is required to provide list of domains.")
else:
getCertificates(args['domains'])
printJson()
elif (args['audit'] == "file"):
if (args['file'] is None):
parser.error("--file is required to provide the file to audited.")
else:
readObject(args['file'],args['file_type'])
printJson()
elif (args['audit'] == "config"):
if args['config_name'] is None:
parser.error("--config-name is required to provide configuration to be audited.")
else:
for i in args['config_name']:
propertyManagerAPI("SearchProperty",i)
printJson()
elif (args['audit'] == "account"):
#a = readEdgeRC()
propertyManagerAPI("ListProperties")
printJson() | identifier_body |
ssl-audit.py | import OpenSSL , ssl, argparse ,json, os.path, validators, requests, logging
from datetime import datetime
from dateutil.parser import parse
from urllib.parse import urljoin
from akamai.edgegrid import EdgeGridAuth, EdgeRc
from pathlib import Path
#TODO: FIX logger format
#turn off logger
#send ouput to tmp file
#improve help documentation
parser = argparse.ArgumentParser(description='Certificate Expiration Audit\nLatest version and documentation can be found here:\nhttps://github.com/roymartinezblanco/Akamai-SSL-Expiration-Audit',formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', help='Show script version information',
required=False, action='store_true')
parser.add_argument('--audit', type=str, choices=['account','config','file','list'], help='*required* Type of Audit to be done: [account,config,file,list]',
required=False)
parser.add_argument('--domains', nargs='+', type=str, help='List of domains to query.',
required=False)
parser.add_argument('--file-type', type=str, choices=['list','akamai'], help='File Type (list, akamai)',
required=False, default='akamai')
parser.add_argument('--file', type=str, help='File with list of domains (one per line)',
required=False)
parser.add_argument('--config-name',nargs='+', type=str, help='Name or List of Names to be audited.)',
required=False)
parser.add_argument('--verbose', help='Show debug information',
required=False, action='store_true')
parser.add_argument('--section', type=str, help='Select a Edgerc section other than the Default',
required=False)
parser.add_argument('--account-key', type=str, help='Account ID to Query for multi account management (switch key)',
required=False)
args = vars(parser.parse_args())
### Global Variables
version= "1.0.30"
errors = []
items = {}
item_list= []
logger = logging.getLogger("SSL-AUDIT")
def configure_logging():
logger.setLevel(logging.DEBUG)
# Format for our loglines
formatter = logging.Formatter("[%(asctime)s] - %(name)s - %(levelname)s - %(message)s")
# Setup console logging
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
# Setup file logging as well
# fh = logging.FileHandler(LOG_FILENAME)
# fh.setLevel(logging.INFO)
# fh.setFormatter(formatter)
# logger.addHandler(fh)
def readObject(File,Ftype:str,configName:str=None):
origins=[]
if Ftype != "API":
if os.path.exists(File):
if Ftype == "list":
if args['verbose']:
#print("...... Reading file '{}'.".format(File))
logger.debug("Reading file '{}'.".format(File))
lines = [line.rstrip('\n') for line in open(File)]
getCertificates(lines)
else:
try:
with open(File) as handle:
dictdump = json.loads(handle.read())
except:
parser.error("Unable to Parse JSON File, please validate format.") |
getCertificates(origins,configName)
else:
parser.error("The File {} does not exist!".format(File))
else:
if args['verbose']:
logger.debug("Reading rules for the property '{}' .".format(configName))
findOrigins(File,origins,configName)
getCertificates(origins,configName)
def findOrigins(obj,origins:list,configName:str=None):
for ok, ov in obj.items():
if ok == "name" and ov == "origin":
options = dict(obj["options"])
if options["originType"] == "CUSTOMER":
if args['verbose']:
logger.debug("Origin behavior found with the value '{}' on the configuration '{}'.".format(dict(obj["options"])["hostname"],configName))
origins.append (dict(obj["options"])["hostname"])
for k, v in obj.items():
if isinstance(v,dict) or isinstance(v,list):
if "values" not in k.lower():
if isinstance(v,list):
if len(v) > 0:
for i in v:
if isinstance(i, dict):
findOrigins(dict(i),origins,configName)
else:
findOrigins(v,origins,configName)
def printJson():
if args['verbose']:
logger.debug("Printing JSON.")
logger.debug("[end]")
if len(item_list) == 0:
logger.error("No output generated to print!")
return None
if item_list[0] != {}:
items['items'] = item_list
if args['audit'] == "list":
if len(errors) != 0:
items['errors'] = errors
formatted_json = json.dumps(items, sort_keys=False, indent=4)
print(formatted_json)
def getCertificates(domains: list,configName:str=None):
currentConfig={}
if args['audit'] != "list" and args['audit'] != "file":
currentConfig['propertyName'] = configName
certs=[]
er=[]
for host in domains:
if args['verbose']:
logger.debug("Looking up the certificate for '{}' ".format(host))
if "{{" in host:
if args['verbose']:
logger.warning("'{}' is a variable and will not be looked up!".format(host))
er.append("'{}' is a variable and will not be looked up!".format(host))
else:
if validators.domain(host) != True:
if args['verbose']:
if configName is not None:
logger.warning("'{}' is not a valid domain, on the configuration'{}'!".format(host,configName))
else:
logger.warning("'{}' is not a valid domain!".format(host))
er.append("'{}' is not a valid domain!".format(host))
continue
try:
hostname = host
port = 443
conn = ssl.create_connection((hostname,port), timeout=10)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sock = context.wrap_socket(conn, server_hostname=hostname)
certificate = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True))
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,certificate)
except BaseException as e:
if args['verbose']:
logger.error("Can't connect to '{}' error: {}".format(host,str(e)))
er.append("Can't connect to '{}' error: {}".format(host,str(e)))
else:
serial= '{0:x}'.format(x509.get_serial_number())
exp_date = str(x509.get_notAfter().decode('utf-8'))
dt = parse(exp_date)
daystoexp=dt.replace(tzinfo=None)-datetime.utcnow()
item = {}
item['Domain'] = str(host)
item['Serial'] = str(serial)
item['ExpDate'] = str(dt.date())
item['DaysLeft'] = daystoexp.days
certs.append(item)
if domains == []:
if configName is not None:
er.append("No customer origins found on the configuration '{}'.".format(configName))
if args['verbose']:
logger.warning("No customer origins found on the configuration '{}.".format(configName))
else:
er.append("No customer origins found.")
if args['verbose']:
logger.warning("No customer origins found.")
if certs != []:
currentConfig['certificates'] = certs
if er != []:
if args['audit'] != "list":
currentConfig['errors'] = er
else:
errors.append(er)
item_list.append(currentConfig)
return
def propertyManagerAPI(action:str,config:str=None,p:list=None):
try:
home = str(Path.home())
edgerc = EdgeRc(home+"/.edgerc")
if args['section']:
section = args['section']
else:
section = 'papi'
host = edgerc.get(section,'host')
except Exception as e:
logger.debug("Error Autehticating Edgerc {}.".format(home+edgerc))
http = requests.Session()
http.auth= EdgeGridAuth.from_edgerc(edgerc,section)
validActions = ["ListGroups","ListContracts","ListProperties","GetRuleTree","SearchProperty"]
if action not in validActions:
parser.error("Error: PAPI Unknown Action")
#ListGroups
elif action == validActions[0]:
if args['verbose']:
logger.debug("Listing account groups with PAPI.")
if args['account_key']:
endpoint='/papi/v1/groups?accountSwitchKey={}'.format(args['account_key'])
else:
endpoint= '/papi/v1/groups'
result = http.get(urljoin("https://" + host + "/", endpoint))
response = json.loads(json.dumps(result.json()))
http.close()
return response
#ListProperties
elif action == validActions[2]:
gps = propertyManagerAPI("ListGroups")
if gps is None:
logger.warning("No Groups were found in account!")
return None
# elif gps['incidentId']:
# logger.error('{}'.format(gps['title']))
# return None
for gp in gps['groups']['items']:
for contract in gp['contractIds']:
if args['verbose']:
logger.debug("Listing properties in '{}'/'{}' with PAPI.".format(gp['groupId'],contract))
if args['account_key']:
endpoint= '/papi/v1/properties?contractId={}&groupId={}&accountSwitchKey={}'.format(contract,gp['groupId'],args['account_key'])
else:
endpoint= '/papi/v1/properties?contractId={}&groupId={}'.format(contract,gp['groupId'])
result = http.get(urljoin("https://" + host + "/", endpoint))
http.close()
response = json.loads(json.dumps(result.json()))
for p in response['properties']['items']:
if p['productionVersion'] is None or p is None:
item={}
er=[]
er.append("The configuration has no active version in production.")
if args['verbose']:
logger.warning("The configuration '{}' has no active version in production.".format(p['propertyName']))
item['propertyName']=p['propertyName']
item['errors']=er
item_list.append(item)
else:
p['propertyVersion']=p['productionVersion']
del p['productionVersion']
propertyManagerAPI("GetRuleTree","",p)
elif action == validActions[3]:
if args['verbose']:
logger.debug("Getting rule tree for the '{}' property with PAPI.".format(p['propertyName']))
if args['account_key']:
endpoint= "/papi/v1/properties/{}/versions/{}/rules?contractId={}&groupId={}&validateRules=true&validateMode=fast&accountSwitchKey={}".format(
p['propertyId'],
p['propertyVersion'],
p['contractId'],
p['groupId'],
args['account_key']
)
else:
endpoint= "/papi/v1/properties/{}/versions/{}/rules?contractId={}&groupId={}&validateRules=true&validateMode=fast".format(
p['propertyId'],
p['propertyVersion'],
p['contractId'],
p['groupId']
)
result = http.get(urljoin("https://" + host + "/", endpoint))
http.close()
readObject(json.loads(json.dumps(result.json())) ,"API",p['propertyName'])
elif action == validActions[4]:
if args['verbose']:
logger.debug("Looking for the configuration '{}'.".format(config))
if args['account_key']:
endpoint='/papi/v1/search/find-by-value?accountSwitchKey={}'.format(args['account_key'])
else:
endpoint='/papi/v1/search/find-by-value'
postbody = {}
postbody['propertyName'] = config
result = http.post(urljoin("https://" + host + "/", endpoint),json.dumps(postbody), headers={"Content-Type": "application/json"})
http.close()
if result.json()['versions']['items'] == []:
item={}
er=[]
item['propertyName']=config
if args['verbose']:
logger.warning("The configuration '{}' was not found.".format(config))
er.append("The configuration was not found.")
item['errors']=er
item_list.append(item)
return
else:
if args['verbose']:
logger.debug("The configuration '{}' was found.".format(config))
prodversion = None
for i in result.json()['versions']['items']:
if i['productionStatus'] == "ACTIVE":
prodversion = True
propertyManagerAPI("GetRuleTree","",i)
if prodversion is None:
item={}
er=[]
if args['verbose']:
logger.warning("The configuration '{}' has no active version in production.".format(config))
er.append("The configuration has no active version in production.")
item['propertyName']=config
item['errors']=er
item_list.append(item)
return json.loads(json.dumps(result.json()))
return None
def main():
if args['version']:
print(version)
return
if not args['audit']:
parser.print_help()
if args['verbose']:
configure_logging()
logger.info("[start]")
if args['audit'] == "list":
if args['domains'] is None:
parser.error("--domains is required to provide list of domains.")
else:
getCertificates(args['domains'])
printJson()
elif (args['audit'] == "file"):
if (args['file'] is None):
parser.error("--file is required to provide the file to audited.")
else:
readObject(args['file'],args['file_type'])
printJson()
elif (args['audit'] == "config"):
if args['config_name'] is None:
parser.error("--config-name is required to provide configuration to be audited.")
else:
for i in args['config_name']:
propertyManagerAPI("SearchProperty",i)
printJson()
elif (args['audit'] == "account"):
#a = readEdgeRC()
propertyManagerAPI("ListProperties")
printJson()
if __name__ == '__main__':
main() | else:
findOrigins(dictdump,origins,configName) | random_line_split |
LSTM-cartpole4.py | import gym, random, pickle, os.path, math, glob
import argparse
import rlscope.api as rlscope
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, LSTM, Activation
from tensorflow.keras.optimizers import RMSprop
import numpy as np
import tensorflow as tf
| # from tensorboardX import SummaryWriter
# USE_CUDA = torch.cuda.is_available()
# dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
# Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)
#physical_devices = tf.config.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(physical_devices[0],enable=True)
class DRQN:
def __init__(self, num_actions=2, state=None): # device=torch.device("cpu")):
"""
Initialize a deep Q-learning network as described in
Arguments:
in_channels: number of channel of input.
i.e The number of most recent frames stacked together as describe in the paper
num_actions: number of action-value to output, one-to-one correspondence to action in game.
device: cpu or gpu (cuda:0)
"""
super(DRQN, self).__init__()
# self.device = device
self.num_actions = num_actions
self.input = Input(shape=(1, 4))
self.lstm1 = LSTM(128, input_shape=(256, 32, 4), return_sequences=True)(self.input)
self.lstm2 = LSTM(128, return_sequences=True)(self.lstm1)
self.lstm3 = LSTM(128, return_sequences=True)(self.lstm2)
self.dense1 = Dense(128, activation='relu')(self.lstm3)
self.output = Dense(2, activation='linear')(self.dense1)
self.state = state
self.model = Model(inputs=self.input, outputs=self.output)
# self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
# self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
# self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
# self.fc4 = nn.Linear(7 * 7 * 64, 512)
# self.gru = nn.GRU(512, num_actions, batch_first=True) # input shape (batch, seq, feature)
def forward(self): # , hidden=None, max_seq=1, batch_size=1):
# DQN input B*C*feature (32 4 84 84)
# DRQN input B*C*feature (32*seq_len 4 84 84)
# x = F.relu(self.conv1(x))
# x = F.relu(self.conv2(x))
# x = F.relu(self.conv3(x))
# x = F.relu(self.fc4(x.reshape(x.size(0), -1)))
# hidden = self.init_hidden(batch_size) if hidden is None else hidden
# before go to RNN, reshape the input to (barch, seq, feature)
# x = x.reshape(batch_size, max_seq, 512)
# return self.gru(x, hidden)
predict = self.model.predict(self.state)
return predict
def compile(self):
self.model.compile(loss='categorical_crossentropy', optimizer='sgd')
def train(self, x=None, y=None):
loss = self.model.fit(x, y, steps_per_epoch=32, verbose=0)
return loss
def get_weights(self):
return self.model.get_weights()
def set_weights(self, weights):
self.model.set_weights(weights)
# def init_hidden(self, batch_size):
# initialize hidden state to 0
# return torch.zeros(1, batch_size, self.num_actions, device=self.device, dtype=torch.float)
class Recurrent_Memory_Buffer(object):
# memory buffer to store episodic memory
def __init__(self, memory_size=1000, max_seq=10):
self.buffer = []
self.memory_size = memory_size
self.max_seq = max_seq
self.next_idx = 0
def push(self, state, action, reward, next_state, done):
data = (state, action, reward, next_state, done)
if len(self.buffer) <= self.memory_size: # buffer not full
self.buffer.append(data)
else: # buffer is full
self.buffer[self.next_idx] = data
self.next_idx = (self.next_idx + 1) % self.memory_size
def sample(self, batch_size):
# sample episodic memory
states, actions, rewards, next_states, dones = [], [], [], [], []
for i in range(batch_size):
finish = random.randint(self.max_seq, self.size() - 1)
begin = finish - self.max_seq
data = self.buffer[begin:finish]
state, action, reward, next_state, done = zip(*data)
states.append(np.concatenate([self.observe(state_i) for state_i in state]))
actions.append(action)
rewards.append(reward)
next_states.append(np.concatenate([self.observe(state_i) for state_i in next_state]))
dones.append(done)
return np.concatenate(states), actions, rewards, np.concatenate(next_states), dones
def size(self):
return len(self.buffer)
def observe(self, lazyframe):
# from Lazy frame to tensor
# state = torch.from_numpy(lazyframe._force().transpose(2, 0, 1)[None] / 255).float()
# if self.USE_CUDA:
# state = state.cuda()
state = lazyframe.reshape(1, 4)
state = [state]
state = np.array(state)
return state
class DRQNAgent:
# DRQN agent
def __init__(self, action_space=None, USE_CUDA=False, memory_size=10000, epsilon=1, lr=1e-4,
max_seq=8, batch_size=32):
self.USE_CUDA = USE_CUDA
# self.device = torch.device("cuda:0" if USE_CUDA else "cpu")
self.max_seq = max_seq
self.batch_size = batch_size
self.epsilon = epsilon
self.action_space = action_space
self.rec_memory_buffer = Recurrent_Memory_Buffer(memory_size)
self.DRQN = DRQN(num_actions=action_space.n)
self.DRQN_target = DRQN(num_actions=action_space.n)
# self.DRQN_target.load_state_dict(self.DRQN.state_dict())
# if USE_CUDA:
# self.DRQN = self.DRQN.cuda()
# self.DRQN_target = self.DRQN_target.cuda()
self.optimizer = RMSprop(learning_rate=lr)
self.DRQN.model.compile(optimizer=self.optimizer, loss='mse')
def observe(self, lazyframe):
# from Lazy frame to tensor
# state = torch.from_numpy(lazyframe._force().transpose(2, 0, 1)[None] / 255).float()
# if self.USE_CUDA:
# state = state.cuda()
state = lazyframe.reshape(1, 4)
state = [state]
state = np.array(state)
return state
def value(self, state):
# get q_values of a given state
q_values = self.DRQN.model.predict(state)
return q_values
def act(self, state, epsilon=None):
"""
sample actions with epsilon-greedy policy
recap: with p = epsilon pick random action, else pick action with highest Q(s,a)
"""
if epsilon is None: epsilon = self.epsilon
q_values = self.value(state)
q_values = q_values.squeeze(1)
if random.random() < epsilon:
aciton = random.randrange(self.action_space.n)
else:
aciton = q_values.argmax(1)[0]
return aciton
def compute_td_loss(self, states, actions, rewards, next_states, is_done, gamma=0.99):
""" Compute td loss using torch operations only."""
actions = tf.convert_to_tensor(actions) # shape: [batch_size * seq_len]
rewards = tf.convert_to_tensor(rewards) # shape: [batch_size * seq_len]
is_done = tf.convert_to_tensor(is_done) # shape: [batch_size * seq_len]
actions = tf.reshape(actions, [-1])
rewards = tf.reshape(rewards, [-1])
is_done = tf.reshape(is_done, [-1])
states = tf.reshape(states, [batch_size * max_seq, 1, 4])
next_states = tf.reshape(next_states, [batch_size * max_seq, 1, 4])
# if self.USE_CUDA:
# actions = actions.cuda()
# rewards = rewards.cuda()
# is_done = is_done.cuda()
# get q-values for all actions in current states
predicted_qvalues = self.DRQN.model.predict(states, steps=1)
# predicted_qvalues = predicted_qvalues.reshape(-1, self.action_space.n)
# predicted_qvalues = predicted_qvalues.squeeze(0)
# select q-values for chosen actions
# a = np.concatenate(actions)
# predicted_qvalues_for_actions = predicted_qvalues[
# range(states.shape[0]), actions
# ]
# compute q-values for all actions in next states
predicted_next_qvalues = self.DRQN_target.model.predict(next_states, steps=1) # YOUR CODE
# predicted_next_qvalues = predicted_next_qvalues.squeeze(0)
predicted_next_qvalues = predicted_next_qvalues.reshape(-1, self.action_space.n)
# compute V*(next_states) using predicted next q-values
next_state_values = predicted_next_qvalues.max(-1)
next_state_values_arg = predicted_next_qvalues.argmax(-1)
# compute "target q-values" for loss - it's what's inside square parentheses in the above formula.
target_qvalues_for_actions = rewards + gamma * next_state_values
# at the last state we shall use simplified formula: Q(s,a) = r(s,a) since s' doesn't exist
target_qvalues_for_actions = tf.where(
is_done, rewards, target_qvalues_for_actions)
# if is_done:
# target_qvalues_for_actions = rewards
# else:
# target_qvalues_for_actions = target_qvalues_for_actions
for i in range(len(target_qvalues_for_actions)):
j = next_state_values_arg[i]
predicted_qvalues[i][0][j] = target_qvalues_for_actions[i]
# mean squared error loss to minimize
loss = self.DRQN.train(states, predicted_qvalues)
return loss
def sample_from_buffer(self, batch_size):
# rewriten sample() in buffer with pytorch operations
states, actions, rewards, next_states, dones = [], [], [], [], []
for i in range(batch_size):
finish = random.randint(self.max_seq, self.rec_memory_buffer.size() - 1)
begin = finish - self.max_seq
data = self.rec_memory_buffer.buffer[begin:finish]
state, action, reward, next_state, done = zip(*data)
states.append(np.concatenate([self.observe(state_i) for state_i in state]))
actions.append(action)
rewards.append(reward)
next_states.append(np.concatenate([self.observe(state_i) for state_i in next_state]))
dones.append(done)
states = tf.convert_to_tensor(states)
next_states = tf.convert_to_tensor(next_states)
# return np.concatenate(states), np.concatenate(actions), np.concatenate(rewards), np.concatenate(next_states), np.concatenate(dones)
return states, actions, rewards, next_states, dones
def learn_from_experience(self, batch_size):
# learn from experience
if self.rec_memory_buffer.size() > batch_size:
states, actions, rewards, next_states, dones = self.sample_from_buffer(batch_size)
td_loss = self.compute_td_loss(states, actions, rewards, next_states, dones)
# self.optimizer.zero_grad()
# td_loss.backward()
# for param in self.DRQN.model.get_weights():
# clip the gradient
# param.grad.data.clamp_(-1, 1)
# self.optimizer.step()
return td_loss.history['loss'][0]
else:
return (0)
if __name__ == '__main__':
#################################
parser = argparse.ArgumentParser(description="Evaluate an RL policy")
# rlscope will add custom arguments to the argparse argument parser
# that allow you to customize (e.g., "--rlscope-directory <dir>"
# for where to store results).
rlscope.add_rlscope_arguments(parser)
args = parser.parse_args()
# Using the parsed arguments, rlscope will instantiate a singleton
# profiler instance (rlscope.prof).
rlscope.handle_rlscope_args(
parser=parser,
args=args,
)
# Provide a name for the algorithm and simulator (env) used so we can
# generate meaningful plot labels.
# The "process_name" and "phase_name" are useful identifiers for
# multi-process workloads.
rlscope.prof.set_metadata({
'algo': 'LSTM',
'env': 'CartPole-v1',
})
process_name = 'Real_LSTM_CartPole'
phase_name = process_name
#####################################
env = gym.make('CartPole-v1')
# env = wrap_deepmind(env, scale = False, frame_stack=True)
gamma = 0.99 # discount factor
epsilon_max = 1 # epsilon greedy parameter max
epsilon_min = 0.01 # epsilon greedy parameter min
eps_decay = 3000 # epsilon greedy parameter decay
frames = 3000 # total training frames
USE_CUDA = True # training with gpu
learning_rate = 2e-4 # learning rate
max_buff = 10000 # maximum buffer size
update_tar_interval = 1000 # frames for updating target network
batch_size = 32
max_seq = 8
print_interval = 100
log_interval = 1000
learning_start = 1000 # 10000
action_space = env.action_space
print("action space is:", action_space)
# action_dim = env.action_space.n
state_dim = env.observation_space.shape[0]
# state_channel = env.observation_space.shape[2]
agent = DRQNAgent(action_space=action_space, USE_CUDA=USE_CUDA, lr=learning_rate, max_seq=max_seq,
batch_size=batch_size)
frame = env.reset()
episode_reward = 0
print("episode_reward is:", episode_reward)
all_rewards = []
print("all_rewards is:", all_rewards)
losses = []
print("losses is:", losses)
episode_num = 0
print("episode_num is:", episode_num)
# tensorboard
# summary_writer = SummaryWriter(log_dir="DRQN", comment="good_makeatari")
# e-greedy decay
epsilon_by_frame = lambda frame_idx: epsilon_min + (epsilon_max - epsilon_min) * math.exp(
-1. * frame_idx / eps_decay)
# plt.plot([epsilon_by_frame(i) for i in range(10000)])
for i in range(frames):
# print("i is:",i)
epsilon = epsilon_by_frame(i)
state_tensor = agent.observe(frame)
# print("state_tensor is", state_tensor)
action = agent.act(state_tensor, epsilon)
# print("action is: ",action)
next_frame, reward, done, _ = env.step(action)
episode_reward += reward
agent.rec_memory_buffer.push(frame, action, reward, next_frame, done)
frame = next_frame
loss = 0
if agent.rec_memory_buffer.size() >= learning_start:
# print("learn from experience+++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
loss = agent.learn_from_experience(batch_size)
losses.append(loss)
if i % print_interval == 0:
mean = np.mean(all_rewards[-10:]).item()
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("frames: %5d, reward: %5f, loss: %4f, epsilon: %5f, episode: %4d" % (
i, mean, loss, epsilon, episode_num))
# summary_writer.add_scalar("Temporal Difference Loss", loss, i)
# summary_writer.add_scalar("Mean Reward", np.mean(all_rewards[-10:]), i)
# summary_writer.add_scalar("Epsilon", epsilon, i)
if i % update_tar_interval == 0:
print("TARGET GET UPDATED")
print("****************************************************************************************")
agent.DRQN_target.set_weights(agent.DRQN.get_weights())
# agent.DRQN_target.load_state_dict(agent.DRQN.state_dict())
if done:
print("IT IS DONE")
print('i is', i)
print("--------------------------------------------------------------------------------------")
frame = env.reset()
# reset hidden to None
all_rewards.append(episode_reward)
print("all reward now is: ", all_rewards[-10:])
episode_reward = 0
episode_num += 1
avg_reward = float(np.mean(all_rewards[:]))
# summary_writer.close()
observation = env.reset()
count = 0
reward_sum = 0
random_episodes = 0
while random_episodes < 10:
# env.render()
x = observation.reshape(-1, 4)
x = [x]
x = np.array(x)
q_values = agent.DRQN.model.predict(x)
# print(q_values)
action = np.argmax(q_values)
# print(action)
observation, reward, done, _ = env.step(action)
count += 1
reward_sum += reward
if done:
print("Reward for this episode was: {}, turns was: {}".format(reward_sum, count))
random_episodes += 1
reward_sum = 0
count = 0
observation = env.reset()
env.close() | random_line_split | |
LSTM-cartpole4.py | import gym, random, pickle, os.path, math, glob
import argparse
import rlscope.api as rlscope
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, LSTM, Activation
from tensorflow.keras.optimizers import RMSprop
import numpy as np
import tensorflow as tf
# from tensorboardX import SummaryWriter
# USE_CUDA = torch.cuda.is_available()
# dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
# Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)
#physical_devices = tf.config.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(physical_devices[0],enable=True)
class DRQN:
def __init__(self, num_actions=2, state=None): # device=torch.device("cpu")):
"""
Initialize a deep Q-learning network as described in
Arguments:
in_channels: number of channel of input.
i.e The number of most recent frames stacked together as describe in the paper
num_actions: number of action-value to output, one-to-one correspondence to action in game.
device: cpu or gpu (cuda:0)
"""
super(DRQN, self).__init__()
# self.device = device
self.num_actions = num_actions
self.input = Input(shape=(1, 4))
self.lstm1 = LSTM(128, input_shape=(256, 32, 4), return_sequences=True)(self.input)
self.lstm2 = LSTM(128, return_sequences=True)(self.lstm1)
self.lstm3 = LSTM(128, return_sequences=True)(self.lstm2)
self.dense1 = Dense(128, activation='relu')(self.lstm3)
self.output = Dense(2, activation='linear')(self.dense1)
self.state = state
self.model = Model(inputs=self.input, outputs=self.output)
# self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
# self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
# self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
# self.fc4 = nn.Linear(7 * 7 * 64, 512)
# self.gru = nn.GRU(512, num_actions, batch_first=True) # input shape (batch, seq, feature)
def forward(self): # , hidden=None, max_seq=1, batch_size=1):
# DQN input B*C*feature (32 4 84 84)
# DRQN input B*C*feature (32*seq_len 4 84 84)
# x = F.relu(self.conv1(x))
# x = F.relu(self.conv2(x))
# x = F.relu(self.conv3(x))
# x = F.relu(self.fc4(x.reshape(x.size(0), -1)))
# hidden = self.init_hidden(batch_size) if hidden is None else hidden
# before go to RNN, reshape the input to (barch, seq, feature)
# x = x.reshape(batch_size, max_seq, 512)
# return self.gru(x, hidden)
predict = self.model.predict(self.state)
return predict
def compile(self):
self.model.compile(loss='categorical_crossentropy', optimizer='sgd')
def train(self, x=None, y=None):
loss = self.model.fit(x, y, steps_per_epoch=32, verbose=0)
return loss
def get_weights(self):
return self.model.get_weights()
def set_weights(self, weights):
self.model.set_weights(weights)
# def init_hidden(self, batch_size):
# initialize hidden state to 0
# return torch.zeros(1, batch_size, self.num_actions, device=self.device, dtype=torch.float)
class Recurrent_Memory_Buffer(object):
# memory buffer to store episodic memory
|
class DRQNAgent:
# DRQN agent
def __init__(self, action_space=None, USE_CUDA=False, memory_size=10000, epsilon=1, lr=1e-4,
max_seq=8, batch_size=32):
self.USE_CUDA = USE_CUDA
# self.device = torch.device("cuda:0" if USE_CUDA else "cpu")
self.max_seq = max_seq
self.batch_size = batch_size
self.epsilon = epsilon
self.action_space = action_space
self.rec_memory_buffer = Recurrent_Memory_Buffer(memory_size)
self.DRQN = DRQN(num_actions=action_space.n)
self.DRQN_target = DRQN(num_actions=action_space.n)
# self.DRQN_target.load_state_dict(self.DRQN.state_dict())
# if USE_CUDA:
# self.DRQN = self.DRQN.cuda()
# self.DRQN_target = self.DRQN_target.cuda()
self.optimizer = RMSprop(learning_rate=lr)
self.DRQN.model.compile(optimizer=self.optimizer, loss='mse')
def observe(self, lazyframe):
# from Lazy frame to tensor
# state = torch.from_numpy(lazyframe._force().transpose(2, 0, 1)[None] / 255).float()
# if self.USE_CUDA:
# state = state.cuda()
state = lazyframe.reshape(1, 4)
state = [state]
state = np.array(state)
return state
def value(self, state):
# get q_values of a given state
q_values = self.DRQN.model.predict(state)
return q_values
def act(self, state, epsilon=None):
"""
sample actions with epsilon-greedy policy
recap: with p = epsilon pick random action, else pick action with highest Q(s,a)
"""
if epsilon is None: epsilon = self.epsilon
q_values = self.value(state)
q_values = q_values.squeeze(1)
if random.random() < epsilon:
aciton = random.randrange(self.action_space.n)
else:
aciton = q_values.argmax(1)[0]
return aciton
def compute_td_loss(self, states, actions, rewards, next_states, is_done, gamma=0.99):
""" Compute td loss using torch operations only."""
actions = tf.convert_to_tensor(actions) # shape: [batch_size * seq_len]
rewards = tf.convert_to_tensor(rewards) # shape: [batch_size * seq_len]
is_done = tf.convert_to_tensor(is_done) # shape: [batch_size * seq_len]
actions = tf.reshape(actions, [-1])
rewards = tf.reshape(rewards, [-1])
is_done = tf.reshape(is_done, [-1])
states = tf.reshape(states, [batch_size * max_seq, 1, 4])
next_states = tf.reshape(next_states, [batch_size * max_seq, 1, 4])
# if self.USE_CUDA:
# actions = actions.cuda()
# rewards = rewards.cuda()
# is_done = is_done.cuda()
# get q-values for all actions in current states
predicted_qvalues = self.DRQN.model.predict(states, steps=1)
# predicted_qvalues = predicted_qvalues.reshape(-1, self.action_space.n)
# predicted_qvalues = predicted_qvalues.squeeze(0)
# select q-values for chosen actions
# a = np.concatenate(actions)
# predicted_qvalues_for_actions = predicted_qvalues[
# range(states.shape[0]), actions
# ]
# compute q-values for all actions in next states
predicted_next_qvalues = self.DRQN_target.model.predict(next_states, steps=1) # YOUR CODE
# predicted_next_qvalues = predicted_next_qvalues.squeeze(0)
predicted_next_qvalues = predicted_next_qvalues.reshape(-1, self.action_space.n)
# compute V*(next_states) using predicted next q-values
next_state_values = predicted_next_qvalues.max(-1)
next_state_values_arg = predicted_next_qvalues.argmax(-1)
# compute "target q-values" for loss - it's what's inside square parentheses in the above formula.
target_qvalues_for_actions = rewards + gamma * next_state_values
# at the last state we shall use simplified formula: Q(s,a) = r(s,a) since s' doesn't exist
target_qvalues_for_actions = tf.where(
is_done, rewards, target_qvalues_for_actions)
# if is_done:
# target_qvalues_for_actions = rewards
# else:
# target_qvalues_for_actions = target_qvalues_for_actions
for i in range(len(target_qvalues_for_actions)):
j = next_state_values_arg[i]
predicted_qvalues[i][0][j] = target_qvalues_for_actions[i]
# mean squared error loss to minimize
loss = self.DRQN.train(states, predicted_qvalues)
return loss
def sample_from_buffer(self, batch_size):
# rewriten sample() in buffer with pytorch operations
states, actions, rewards, next_states, dones = [], [], [], [], []
for i in range(batch_size):
finish = random.randint(self.max_seq, self.rec_memory_buffer.size() - 1)
begin = finish - self.max_seq
data = self.rec_memory_buffer.buffer[begin:finish]
state, action, reward, next_state, done = zip(*data)
states.append(np.concatenate([self.observe(state_i) for state_i in state]))
actions.append(action)
rewards.append(reward)
next_states.append(np.concatenate([self.observe(state_i) for state_i in next_state]))
dones.append(done)
states = tf.convert_to_tensor(states)
next_states = tf.convert_to_tensor(next_states)
# return np.concatenate(states), np.concatenate(actions), np.concatenate(rewards), np.concatenate(next_states), np.concatenate(dones)
return states, actions, rewards, next_states, dones
def learn_from_experience(self, batch_size):
# learn from experience
if self.rec_memory_buffer.size() > batch_size:
states, actions, rewards, next_states, dones = self.sample_from_buffer(batch_size)
td_loss = self.compute_td_loss(states, actions, rewards, next_states, dones)
# self.optimizer.zero_grad()
# td_loss.backward()
# for param in self.DRQN.model.get_weights():
# clip the gradient
# param.grad.data.clamp_(-1, 1)
# self.optimizer.step()
return td_loss.history['loss'][0]
else:
return (0)
if __name__ == '__main__':
#################################
parser = argparse.ArgumentParser(description="Evaluate an RL policy")
# rlscope will add custom arguments to the argparse argument parser
# that allow you to customize (e.g., "--rlscope-directory <dir>"
# for where to store results).
rlscope.add_rlscope_arguments(parser)
args = parser.parse_args()
# Using the parsed arguments, rlscope will instantiate a singleton
# profiler instance (rlscope.prof).
rlscope.handle_rlscope_args(
parser=parser,
args=args,
)
# Provide a name for the algorithm and simulator (env) used so we can
# generate meaningful plot labels.
# The "process_name" and "phase_name" are useful identifiers for
# multi-process workloads.
rlscope.prof.set_metadata({
'algo': 'LSTM',
'env': 'CartPole-v1',
})
process_name = 'Real_LSTM_CartPole'
phase_name = process_name
#####################################
env = gym.make('CartPole-v1')
# env = wrap_deepmind(env, scale = False, frame_stack=True)
gamma = 0.99 # discount factor
epsilon_max = 1 # epsilon greedy parameter max
epsilon_min = 0.01 # epsilon greedy parameter min
eps_decay = 3000 # epsilon greedy parameter decay
frames = 3000 # total training frames
USE_CUDA = True # training with gpu
learning_rate = 2e-4 # learning rate
max_buff = 10000 # maximum buffer size
update_tar_interval = 1000 # frames for updating target network
batch_size = 32
max_seq = 8
print_interval = 100
log_interval = 1000
learning_start = 1000 # 10000
action_space = env.action_space
print("action space is:", action_space)
# action_dim = env.action_space.n
state_dim = env.observation_space.shape[0]
# state_channel = env.observation_space.shape[2]
agent = DRQNAgent(action_space=action_space, USE_CUDA=USE_CUDA, lr=learning_rate, max_seq=max_seq,
batch_size=batch_size)
frame = env.reset()
episode_reward = 0
print("episode_reward is:", episode_reward)
all_rewards = []
print("all_rewards is:", all_rewards)
losses = []
print("losses is:", losses)
episode_num = 0
print("episode_num is:", episode_num)
# tensorboard
# summary_writer = SummaryWriter(log_dir="DRQN", comment="good_makeatari")
# e-greedy decay
epsilon_by_frame = lambda frame_idx: epsilon_min + (epsilon_max - epsilon_min) * math.exp(
-1. * frame_idx / eps_decay)
# plt.plot([epsilon_by_frame(i) for i in range(10000)])
for i in range(frames):
# print("i is:",i)
epsilon = epsilon_by_frame(i)
state_tensor = agent.observe(frame)
# print("state_tensor is", state_tensor)
action = agent.act(state_tensor, epsilon)
# print("action is: ",action)
next_frame, reward, done, _ = env.step(action)
episode_reward += reward
agent.rec_memory_buffer.push(frame, action, reward, next_frame, done)
frame = next_frame
loss = 0
if agent.rec_memory_buffer.size() >= learning_start:
# print("learn from experience+++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
loss = agent.learn_from_experience(batch_size)
losses.append(loss)
if i % print_interval == 0:
mean = np.mean(all_rewards[-10:]).item()
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("frames: %5d, reward: %5f, loss: %4f, epsilon: %5f, episode: %4d" % (
i, mean, loss, epsilon, episode_num))
# summary_writer.add_scalar("Temporal Difference Loss", loss, i)
# summary_writer.add_scalar("Mean Reward", np.mean(all_rewards[-10:]), i)
# summary_writer.add_scalar("Epsilon", epsilon, i)
if i % update_tar_interval == 0:
print("TARGET GET UPDATED")
print("****************************************************************************************")
agent.DRQN_target.set_weights(agent.DRQN.get_weights())
# agent.DRQN_target.load_state_dict(agent.DRQN.state_dict())
if done:
print("IT IS DONE")
print('i is', i)
print("--------------------------------------------------------------------------------------")
frame = env.reset()
# reset hidden to None
all_rewards.append(episode_reward)
print("all reward now is: ", all_rewards[-10:])
episode_reward = 0
episode_num += 1
avg_reward = float(np.mean(all_rewards[:]))
# summary_writer.close()
observation = env.reset()
count = 0
reward_sum = 0
random_episodes = 0
while random_episodes < 10:
# env.render()
x = observation.reshape(-1, 4)
x = [x]
x = np.array(x)
q_values = agent.DRQN.model.predict(x)
# print(q_values)
action = np.argmax(q_values)
# print(action)
observation, reward, done, _ = env.step(action)
count += 1
reward_sum += reward
if done:
print("Reward for this episode was: {}, turns was: {}".format(reward_sum, count))
random_episodes += 1
reward_sum = 0
count = 0
observation = env.reset()
env.close()
| def __init__(self, memory_size=1000, max_seq=10):
self.buffer = []
self.memory_size = memory_size
self.max_seq = max_seq
self.next_idx = 0
def push(self, state, action, reward, next_state, done):
data = (state, action, reward, next_state, done)
if len(self.buffer) <= self.memory_size: # buffer not full
self.buffer.append(data)
else: # buffer is full
self.buffer[self.next_idx] = data
self.next_idx = (self.next_idx + 1) % self.memory_size
def sample(self, batch_size):
# sample episodic memory
states, actions, rewards, next_states, dones = [], [], [], [], []
for i in range(batch_size):
finish = random.randint(self.max_seq, self.size() - 1)
begin = finish - self.max_seq
data = self.buffer[begin:finish]
state, action, reward, next_state, done = zip(*data)
states.append(np.concatenate([self.observe(state_i) for state_i in state]))
actions.append(action)
rewards.append(reward)
next_states.append(np.concatenate([self.observe(state_i) for state_i in next_state]))
dones.append(done)
return np.concatenate(states), actions, rewards, np.concatenate(next_states), dones
def size(self):
return len(self.buffer)
def observe(self, lazyframe):
# from Lazy frame to tensor
# state = torch.from_numpy(lazyframe._force().transpose(2, 0, 1)[None] / 255).float()
# if self.USE_CUDA:
# state = state.cuda()
state = lazyframe.reshape(1, 4)
state = [state]
state = np.array(state)
return state | identifier_body |
LSTM-cartpole4.py | import gym, random, pickle, os.path, math, glob
import argparse
import rlscope.api as rlscope
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, LSTM, Activation
from tensorflow.keras.optimizers import RMSprop
import numpy as np
import tensorflow as tf
# from tensorboardX import SummaryWriter
# USE_CUDA = torch.cuda.is_available()
# dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
# Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)
#physical_devices = tf.config.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(physical_devices[0],enable=True)
class DRQN:
def | (self, num_actions=2, state=None): # device=torch.device("cpu")):
"""
Initialize a deep Q-learning network as described in
Arguments:
in_channels: number of channel of input.
i.e The number of most recent frames stacked together as describe in the paper
num_actions: number of action-value to output, one-to-one correspondence to action in game.
device: cpu or gpu (cuda:0)
"""
super(DRQN, self).__init__()
# self.device = device
self.num_actions = num_actions
self.input = Input(shape=(1, 4))
self.lstm1 = LSTM(128, input_shape=(256, 32, 4), return_sequences=True)(self.input)
self.lstm2 = LSTM(128, return_sequences=True)(self.lstm1)
self.lstm3 = LSTM(128, return_sequences=True)(self.lstm2)
self.dense1 = Dense(128, activation='relu')(self.lstm3)
self.output = Dense(2, activation='linear')(self.dense1)
self.state = state
self.model = Model(inputs=self.input, outputs=self.output)
# self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
# self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
# self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
# self.fc4 = nn.Linear(7 * 7 * 64, 512)
# self.gru = nn.GRU(512, num_actions, batch_first=True) # input shape (batch, seq, feature)
def forward(self): # , hidden=None, max_seq=1, batch_size=1):
# DQN input B*C*feature (32 4 84 84)
# DRQN input B*C*feature (32*seq_len 4 84 84)
# x = F.relu(self.conv1(x))
# x = F.relu(self.conv2(x))
# x = F.relu(self.conv3(x))
# x = F.relu(self.fc4(x.reshape(x.size(0), -1)))
# hidden = self.init_hidden(batch_size) if hidden is None else hidden
# before go to RNN, reshape the input to (barch, seq, feature)
# x = x.reshape(batch_size, max_seq, 512)
# return self.gru(x, hidden)
predict = self.model.predict(self.state)
return predict
def compile(self):
self.model.compile(loss='categorical_crossentropy', optimizer='sgd')
def train(self, x=None, y=None):
loss = self.model.fit(x, y, steps_per_epoch=32, verbose=0)
return loss
def get_weights(self):
return self.model.get_weights()
def set_weights(self, weights):
self.model.set_weights(weights)
# def init_hidden(self, batch_size):
# initialize hidden state to 0
# return torch.zeros(1, batch_size, self.num_actions, device=self.device, dtype=torch.float)
class Recurrent_Memory_Buffer(object):
# memory buffer to store episodic memory
def __init__(self, memory_size=1000, max_seq=10):
self.buffer = []
self.memory_size = memory_size
self.max_seq = max_seq
self.next_idx = 0
def push(self, state, action, reward, next_state, done):
data = (state, action, reward, next_state, done)
if len(self.buffer) <= self.memory_size: # buffer not full
self.buffer.append(data)
else: # buffer is full
self.buffer[self.next_idx] = data
self.next_idx = (self.next_idx + 1) % self.memory_size
def sample(self, batch_size):
# sample episodic memory
states, actions, rewards, next_states, dones = [], [], [], [], []
for i in range(batch_size):
finish = random.randint(self.max_seq, self.size() - 1)
begin = finish - self.max_seq
data = self.buffer[begin:finish]
state, action, reward, next_state, done = zip(*data)
states.append(np.concatenate([self.observe(state_i) for state_i in state]))
actions.append(action)
rewards.append(reward)
next_states.append(np.concatenate([self.observe(state_i) for state_i in next_state]))
dones.append(done)
return np.concatenate(states), actions, rewards, np.concatenate(next_states), dones
def size(self):
return len(self.buffer)
def observe(self, lazyframe):
# from Lazy frame to tensor
# state = torch.from_numpy(lazyframe._force().transpose(2, 0, 1)[None] / 255).float()
# if self.USE_CUDA:
# state = state.cuda()
state = lazyframe.reshape(1, 4)
state = [state]
state = np.array(state)
return state
class DRQNAgent:
# DRQN agent
def __init__(self, action_space=None, USE_CUDA=False, memory_size=10000, epsilon=1, lr=1e-4,
max_seq=8, batch_size=32):
self.USE_CUDA = USE_CUDA
# self.device = torch.device("cuda:0" if USE_CUDA else "cpu")
self.max_seq = max_seq
self.batch_size = batch_size
self.epsilon = epsilon
self.action_space = action_space
self.rec_memory_buffer = Recurrent_Memory_Buffer(memory_size)
self.DRQN = DRQN(num_actions=action_space.n)
self.DRQN_target = DRQN(num_actions=action_space.n)
# self.DRQN_target.load_state_dict(self.DRQN.state_dict())
# if USE_CUDA:
# self.DRQN = self.DRQN.cuda()
# self.DRQN_target = self.DRQN_target.cuda()
self.optimizer = RMSprop(learning_rate=lr)
self.DRQN.model.compile(optimizer=self.optimizer, loss='mse')
def observe(self, lazyframe):
# from Lazy frame to tensor
# state = torch.from_numpy(lazyframe._force().transpose(2, 0, 1)[None] / 255).float()
# if self.USE_CUDA:
# state = state.cuda()
state = lazyframe.reshape(1, 4)
state = [state]
state = np.array(state)
return state
def value(self, state):
# get q_values of a given state
q_values = self.DRQN.model.predict(state)
return q_values
def act(self, state, epsilon=None):
"""
sample actions with epsilon-greedy policy
recap: with p = epsilon pick random action, else pick action with highest Q(s,a)
"""
if epsilon is None: epsilon = self.epsilon
q_values = self.value(state)
q_values = q_values.squeeze(1)
if random.random() < epsilon:
aciton = random.randrange(self.action_space.n)
else:
aciton = q_values.argmax(1)[0]
return aciton
def compute_td_loss(self, states, actions, rewards, next_states, is_done, gamma=0.99):
""" Compute td loss using torch operations only."""
actions = tf.convert_to_tensor(actions) # shape: [batch_size * seq_len]
rewards = tf.convert_to_tensor(rewards) # shape: [batch_size * seq_len]
is_done = tf.convert_to_tensor(is_done) # shape: [batch_size * seq_len]
actions = tf.reshape(actions, [-1])
rewards = tf.reshape(rewards, [-1])
is_done = tf.reshape(is_done, [-1])
states = tf.reshape(states, [batch_size * max_seq, 1, 4])
next_states = tf.reshape(next_states, [batch_size * max_seq, 1, 4])
# if self.USE_CUDA:
# actions = actions.cuda()
# rewards = rewards.cuda()
# is_done = is_done.cuda()
# get q-values for all actions in current states
predicted_qvalues = self.DRQN.model.predict(states, steps=1)
# predicted_qvalues = predicted_qvalues.reshape(-1, self.action_space.n)
# predicted_qvalues = predicted_qvalues.squeeze(0)
# select q-values for chosen actions
# a = np.concatenate(actions)
# predicted_qvalues_for_actions = predicted_qvalues[
# range(states.shape[0]), actions
# ]
# compute q-values for all actions in next states
predicted_next_qvalues = self.DRQN_target.model.predict(next_states, steps=1) # YOUR CODE
# predicted_next_qvalues = predicted_next_qvalues.squeeze(0)
predicted_next_qvalues = predicted_next_qvalues.reshape(-1, self.action_space.n)
# compute V*(next_states) using predicted next q-values
next_state_values = predicted_next_qvalues.max(-1)
next_state_values_arg = predicted_next_qvalues.argmax(-1)
# compute "target q-values" for loss - it's what's inside square parentheses in the above formula.
target_qvalues_for_actions = rewards + gamma * next_state_values
# at the last state we shall use simplified formula: Q(s,a) = r(s,a) since s' doesn't exist
target_qvalues_for_actions = tf.where(
is_done, rewards, target_qvalues_for_actions)
# if is_done:
# target_qvalues_for_actions = rewards
# else:
# target_qvalues_for_actions = target_qvalues_for_actions
for i in range(len(target_qvalues_for_actions)):
j = next_state_values_arg[i]
predicted_qvalues[i][0][j] = target_qvalues_for_actions[i]
# mean squared error loss to minimize
loss = self.DRQN.train(states, predicted_qvalues)
return loss
def sample_from_buffer(self, batch_size):
# rewriten sample() in buffer with pytorch operations
states, actions, rewards, next_states, dones = [], [], [], [], []
for i in range(batch_size):
finish = random.randint(self.max_seq, self.rec_memory_buffer.size() - 1)
begin = finish - self.max_seq
data = self.rec_memory_buffer.buffer[begin:finish]
state, action, reward, next_state, done = zip(*data)
states.append(np.concatenate([self.observe(state_i) for state_i in state]))
actions.append(action)
rewards.append(reward)
next_states.append(np.concatenate([self.observe(state_i) for state_i in next_state]))
dones.append(done)
states = tf.convert_to_tensor(states)
next_states = tf.convert_to_tensor(next_states)
# return np.concatenate(states), np.concatenate(actions), np.concatenate(rewards), np.concatenate(next_states), np.concatenate(dones)
return states, actions, rewards, next_states, dones
def learn_from_experience(self, batch_size):
# learn from experience
if self.rec_memory_buffer.size() > batch_size:
states, actions, rewards, next_states, dones = self.sample_from_buffer(batch_size)
td_loss = self.compute_td_loss(states, actions, rewards, next_states, dones)
# self.optimizer.zero_grad()
# td_loss.backward()
# for param in self.DRQN.model.get_weights():
# clip the gradient
# param.grad.data.clamp_(-1, 1)
# self.optimizer.step()
return td_loss.history['loss'][0]
else:
return (0)
if __name__ == '__main__':
#################################
parser = argparse.ArgumentParser(description="Evaluate an RL policy")
# rlscope will add custom arguments to the argparse argument parser
# that allow you to customize (e.g., "--rlscope-directory <dir>"
# for where to store results).
rlscope.add_rlscope_arguments(parser)
args = parser.parse_args()
# Using the parsed arguments, rlscope will instantiate a singleton
# profiler instance (rlscope.prof).
rlscope.handle_rlscope_args(
parser=parser,
args=args,
)
# Provide a name for the algorithm and simulator (env) used so we can
# generate meaningful plot labels.
# The "process_name" and "phase_name" are useful identifiers for
# multi-process workloads.
rlscope.prof.set_metadata({
'algo': 'LSTM',
'env': 'CartPole-v1',
})
process_name = 'Real_LSTM_CartPole'
phase_name = process_name
#####################################
env = gym.make('CartPole-v1')
# env = wrap_deepmind(env, scale = False, frame_stack=True)
gamma = 0.99 # discount factor
epsilon_max = 1 # epsilon greedy parameter max
epsilon_min = 0.01 # epsilon greedy parameter min
eps_decay = 3000 # epsilon greedy parameter decay
frames = 3000 # total training frames
USE_CUDA = True # training with gpu
learning_rate = 2e-4 # learning rate
max_buff = 10000 # maximum buffer size
update_tar_interval = 1000 # frames for updating target network
batch_size = 32
max_seq = 8
print_interval = 100
log_interval = 1000
learning_start = 1000 # 10000
action_space = env.action_space
print("action space is:", action_space)
# action_dim = env.action_space.n
state_dim = env.observation_space.shape[0]
# state_channel = env.observation_space.shape[2]
agent = DRQNAgent(action_space=action_space, USE_CUDA=USE_CUDA, lr=learning_rate, max_seq=max_seq,
batch_size=batch_size)
frame = env.reset()
episode_reward = 0
print("episode_reward is:", episode_reward)
all_rewards = []
print("all_rewards is:", all_rewards)
losses = []
print("losses is:", losses)
episode_num = 0
print("episode_num is:", episode_num)
# tensorboard
# summary_writer = SummaryWriter(log_dir="DRQN", comment="good_makeatari")
# e-greedy decay
epsilon_by_frame = lambda frame_idx: epsilon_min + (epsilon_max - epsilon_min) * math.exp(
-1. * frame_idx / eps_decay)
# plt.plot([epsilon_by_frame(i) for i in range(10000)])
for i in range(frames):
# print("i is:",i)
epsilon = epsilon_by_frame(i)
state_tensor = agent.observe(frame)
# print("state_tensor is", state_tensor)
action = agent.act(state_tensor, epsilon)
# print("action is: ",action)
next_frame, reward, done, _ = env.step(action)
episode_reward += reward
agent.rec_memory_buffer.push(frame, action, reward, next_frame, done)
frame = next_frame
loss = 0
if agent.rec_memory_buffer.size() >= learning_start:
# print("learn from experience+++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
loss = agent.learn_from_experience(batch_size)
losses.append(loss)
if i % print_interval == 0:
mean = np.mean(all_rewards[-10:]).item()
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("frames: %5d, reward: %5f, loss: %4f, epsilon: %5f, episode: %4d" % (
i, mean, loss, epsilon, episode_num))
# summary_writer.add_scalar("Temporal Difference Loss", loss, i)
# summary_writer.add_scalar("Mean Reward", np.mean(all_rewards[-10:]), i)
# summary_writer.add_scalar("Epsilon", epsilon, i)
if i % update_tar_interval == 0:
print("TARGET GET UPDATED")
print("****************************************************************************************")
agent.DRQN_target.set_weights(agent.DRQN.get_weights())
# agent.DRQN_target.load_state_dict(agent.DRQN.state_dict())
if done:
print("IT IS DONE")
print('i is', i)
print("--------------------------------------------------------------------------------------")
frame = env.reset()
# reset hidden to None
all_rewards.append(episode_reward)
print("all reward now is: ", all_rewards[-10:])
episode_reward = 0
episode_num += 1
avg_reward = float(np.mean(all_rewards[:]))
# summary_writer.close()
observation = env.reset()
count = 0
reward_sum = 0
random_episodes = 0
while random_episodes < 10:
# env.render()
x = observation.reshape(-1, 4)
x = [x]
x = np.array(x)
q_values = agent.DRQN.model.predict(x)
# print(q_values)
action = np.argmax(q_values)
# print(action)
observation, reward, done, _ = env.step(action)
count += 1
reward_sum += reward
if done:
print("Reward for this episode was: {}, turns was: {}".format(reward_sum, count))
random_episodes += 1
reward_sum = 0
count = 0
observation = env.reset()
env.close()
| __init__ | identifier_name |
LSTM-cartpole4.py | import gym, random, pickle, os.path, math, glob
import argparse
import rlscope.api as rlscope
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, LSTM, Activation
from tensorflow.keras.optimizers import RMSprop
import numpy as np
import tensorflow as tf
# from tensorboardX import SummaryWriter
# USE_CUDA = torch.cuda.is_available()
# dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
# Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)
#physical_devices = tf.config.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(physical_devices[0],enable=True)
class DRQN:
def __init__(self, num_actions=2, state=None): # device=torch.device("cpu")):
"""
Initialize a deep Q-learning network as described in
Arguments:
in_channels: number of channel of input.
i.e The number of most recent frames stacked together as describe in the paper
num_actions: number of action-value to output, one-to-one correspondence to action in game.
device: cpu or gpu (cuda:0)
"""
super(DRQN, self).__init__()
# self.device = device
self.num_actions = num_actions
self.input = Input(shape=(1, 4))
self.lstm1 = LSTM(128, input_shape=(256, 32, 4), return_sequences=True)(self.input)
self.lstm2 = LSTM(128, return_sequences=True)(self.lstm1)
self.lstm3 = LSTM(128, return_sequences=True)(self.lstm2)
self.dense1 = Dense(128, activation='relu')(self.lstm3)
self.output = Dense(2, activation='linear')(self.dense1)
self.state = state
self.model = Model(inputs=self.input, outputs=self.output)
# self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
# self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
# self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
# self.fc4 = nn.Linear(7 * 7 * 64, 512)
# self.gru = nn.GRU(512, num_actions, batch_first=True) # input shape (batch, seq, feature)
def forward(self): # , hidden=None, max_seq=1, batch_size=1):
# DQN input B*C*feature (32 4 84 84)
# DRQN input B*C*feature (32*seq_len 4 84 84)
# x = F.relu(self.conv1(x))
# x = F.relu(self.conv2(x))
# x = F.relu(self.conv3(x))
# x = F.relu(self.fc4(x.reshape(x.size(0), -1)))
# hidden = self.init_hidden(batch_size) if hidden is None else hidden
# before go to RNN, reshape the input to (barch, seq, feature)
# x = x.reshape(batch_size, max_seq, 512)
# return self.gru(x, hidden)
predict = self.model.predict(self.state)
return predict
def compile(self):
self.model.compile(loss='categorical_crossentropy', optimizer='sgd')
def train(self, x=None, y=None):
loss = self.model.fit(x, y, steps_per_epoch=32, verbose=0)
return loss
def get_weights(self):
return self.model.get_weights()
def set_weights(self, weights):
self.model.set_weights(weights)
# def init_hidden(self, batch_size):
# initialize hidden state to 0
# return torch.zeros(1, batch_size, self.num_actions, device=self.device, dtype=torch.float)
class Recurrent_Memory_Buffer(object):
# memory buffer to store episodic memory
def __init__(self, memory_size=1000, max_seq=10):
self.buffer = []
self.memory_size = memory_size
self.max_seq = max_seq
self.next_idx = 0
def push(self, state, action, reward, next_state, done):
data = (state, action, reward, next_state, done)
if len(self.buffer) <= self.memory_size: # buffer not full
self.buffer.append(data)
else: # buffer is full
self.buffer[self.next_idx] = data
self.next_idx = (self.next_idx + 1) % self.memory_size
def sample(self, batch_size):
# sample episodic memory
states, actions, rewards, next_states, dones = [], [], [], [], []
for i in range(batch_size):
finish = random.randint(self.max_seq, self.size() - 1)
begin = finish - self.max_seq
data = self.buffer[begin:finish]
state, action, reward, next_state, done = zip(*data)
states.append(np.concatenate([self.observe(state_i) for state_i in state]))
actions.append(action)
rewards.append(reward)
next_states.append(np.concatenate([self.observe(state_i) for state_i in next_state]))
dones.append(done)
return np.concatenate(states), actions, rewards, np.concatenate(next_states), dones
def size(self):
return len(self.buffer)
def observe(self, lazyframe):
# from Lazy frame to tensor
# state = torch.from_numpy(lazyframe._force().transpose(2, 0, 1)[None] / 255).float()
# if self.USE_CUDA:
# state = state.cuda()
state = lazyframe.reshape(1, 4)
state = [state]
state = np.array(state)
return state
class DRQNAgent:
# DRQN agent
def __init__(self, action_space=None, USE_CUDA=False, memory_size=10000, epsilon=1, lr=1e-4,
max_seq=8, batch_size=32):
self.USE_CUDA = USE_CUDA
# self.device = torch.device("cuda:0" if USE_CUDA else "cpu")
self.max_seq = max_seq
self.batch_size = batch_size
self.epsilon = epsilon
self.action_space = action_space
self.rec_memory_buffer = Recurrent_Memory_Buffer(memory_size)
self.DRQN = DRQN(num_actions=action_space.n)
self.DRQN_target = DRQN(num_actions=action_space.n)
# self.DRQN_target.load_state_dict(self.DRQN.state_dict())
# if USE_CUDA:
# self.DRQN = self.DRQN.cuda()
# self.DRQN_target = self.DRQN_target.cuda()
self.optimizer = RMSprop(learning_rate=lr)
self.DRQN.model.compile(optimizer=self.optimizer, loss='mse')
def observe(self, lazyframe):
# from Lazy frame to tensor
# state = torch.from_numpy(lazyframe._force().transpose(2, 0, 1)[None] / 255).float()
# if self.USE_CUDA:
# state = state.cuda()
state = lazyframe.reshape(1, 4)
state = [state]
state = np.array(state)
return state
def value(self, state):
# get q_values of a given state
q_values = self.DRQN.model.predict(state)
return q_values
def act(self, state, epsilon=None):
"""
sample actions with epsilon-greedy policy
recap: with p = epsilon pick random action, else pick action with highest Q(s,a)
"""
if epsilon is None: epsilon = self.epsilon
q_values = self.value(state)
q_values = q_values.squeeze(1)
if random.random() < epsilon:
aciton = random.randrange(self.action_space.n)
else:
aciton = q_values.argmax(1)[0]
return aciton
def compute_td_loss(self, states, actions, rewards, next_states, is_done, gamma=0.99):
""" Compute td loss using torch operations only."""
actions = tf.convert_to_tensor(actions) # shape: [batch_size * seq_len]
rewards = tf.convert_to_tensor(rewards) # shape: [batch_size * seq_len]
is_done = tf.convert_to_tensor(is_done) # shape: [batch_size * seq_len]
actions = tf.reshape(actions, [-1])
rewards = tf.reshape(rewards, [-1])
is_done = tf.reshape(is_done, [-1])
states = tf.reshape(states, [batch_size * max_seq, 1, 4])
next_states = tf.reshape(next_states, [batch_size * max_seq, 1, 4])
# if self.USE_CUDA:
# actions = actions.cuda()
# rewards = rewards.cuda()
# is_done = is_done.cuda()
# get q-values for all actions in current states
predicted_qvalues = self.DRQN.model.predict(states, steps=1)
# predicted_qvalues = predicted_qvalues.reshape(-1, self.action_space.n)
# predicted_qvalues = predicted_qvalues.squeeze(0)
# select q-values for chosen actions
# a = np.concatenate(actions)
# predicted_qvalues_for_actions = predicted_qvalues[
# range(states.shape[0]), actions
# ]
# compute q-values for all actions in next states
predicted_next_qvalues = self.DRQN_target.model.predict(next_states, steps=1) # YOUR CODE
# predicted_next_qvalues = predicted_next_qvalues.squeeze(0)
predicted_next_qvalues = predicted_next_qvalues.reshape(-1, self.action_space.n)
# compute V*(next_states) using predicted next q-values
next_state_values = predicted_next_qvalues.max(-1)
next_state_values_arg = predicted_next_qvalues.argmax(-1)
# compute "target q-values" for loss - it's what's inside square parentheses in the above formula.
target_qvalues_for_actions = rewards + gamma * next_state_values
# at the last state we shall use simplified formula: Q(s,a) = r(s,a) since s' doesn't exist
target_qvalues_for_actions = tf.where(
is_done, rewards, target_qvalues_for_actions)
# if is_done:
# target_qvalues_for_actions = rewards
# else:
# target_qvalues_for_actions = target_qvalues_for_actions
for i in range(len(target_qvalues_for_actions)):
j = next_state_values_arg[i]
predicted_qvalues[i][0][j] = target_qvalues_for_actions[i]
# mean squared error loss to minimize
loss = self.DRQN.train(states, predicted_qvalues)
return loss
def sample_from_buffer(self, batch_size):
# rewriten sample() in buffer with pytorch operations
states, actions, rewards, next_states, dones = [], [], [], [], []
for i in range(batch_size):
finish = random.randint(self.max_seq, self.rec_memory_buffer.size() - 1)
begin = finish - self.max_seq
data = self.rec_memory_buffer.buffer[begin:finish]
state, action, reward, next_state, done = zip(*data)
states.append(np.concatenate([self.observe(state_i) for state_i in state]))
actions.append(action)
rewards.append(reward)
next_states.append(np.concatenate([self.observe(state_i) for state_i in next_state]))
dones.append(done)
states = tf.convert_to_tensor(states)
next_states = tf.convert_to_tensor(next_states)
# return np.concatenate(states), np.concatenate(actions), np.concatenate(rewards), np.concatenate(next_states), np.concatenate(dones)
return states, actions, rewards, next_states, dones
def learn_from_experience(self, batch_size):
# learn from experience
if self.rec_memory_buffer.size() > batch_size:
states, actions, rewards, next_states, dones = self.sample_from_buffer(batch_size)
td_loss = self.compute_td_loss(states, actions, rewards, next_states, dones)
# self.optimizer.zero_grad()
# td_loss.backward()
# for param in self.DRQN.model.get_weights():
# clip the gradient
# param.grad.data.clamp_(-1, 1)
# self.optimizer.step()
return td_loss.history['loss'][0]
else:
return (0)
if __name__ == '__main__':
#################################
parser = argparse.ArgumentParser(description="Evaluate an RL policy")
# rlscope will add custom arguments to the argparse argument parser
# that allow you to customize (e.g., "--rlscope-directory <dir>"
# for where to store results).
rlscope.add_rlscope_arguments(parser)
args = parser.parse_args()
# Using the parsed arguments, rlscope will instantiate a singleton
# profiler instance (rlscope.prof).
rlscope.handle_rlscope_args(
parser=parser,
args=args,
)
# Provide a name for the algorithm and simulator (env) used so we can
# generate meaningful plot labels.
# The "process_name" and "phase_name" are useful identifiers for
# multi-process workloads.
rlscope.prof.set_metadata({
'algo': 'LSTM',
'env': 'CartPole-v1',
})
process_name = 'Real_LSTM_CartPole'
phase_name = process_name
#####################################
env = gym.make('CartPole-v1')
# env = wrap_deepmind(env, scale = False, frame_stack=True)
gamma = 0.99 # discount factor
epsilon_max = 1 # epsilon greedy parameter max
epsilon_min = 0.01 # epsilon greedy parameter min
eps_decay = 3000 # epsilon greedy parameter decay
frames = 3000 # total training frames
USE_CUDA = True # training with gpu
learning_rate = 2e-4 # learning rate
max_buff = 10000 # maximum buffer size
update_tar_interval = 1000 # frames for updating target network
batch_size = 32
max_seq = 8
print_interval = 100
log_interval = 1000
learning_start = 1000 # 10000
action_space = env.action_space
print("action space is:", action_space)
# action_dim = env.action_space.n
state_dim = env.observation_space.shape[0]
# state_channel = env.observation_space.shape[2]
agent = DRQNAgent(action_space=action_space, USE_CUDA=USE_CUDA, lr=learning_rate, max_seq=max_seq,
batch_size=batch_size)
frame = env.reset()
episode_reward = 0
print("episode_reward is:", episode_reward)
all_rewards = []
print("all_rewards is:", all_rewards)
losses = []
print("losses is:", losses)
episode_num = 0
print("episode_num is:", episode_num)
# tensorboard
# summary_writer = SummaryWriter(log_dir="DRQN", comment="good_makeatari")
# e-greedy decay
epsilon_by_frame = lambda frame_idx: epsilon_min + (epsilon_max - epsilon_min) * math.exp(
-1. * frame_idx / eps_decay)
# plt.plot([epsilon_by_frame(i) for i in range(10000)])
for i in range(frames):
# print("i is:",i)
epsilon = epsilon_by_frame(i)
state_tensor = agent.observe(frame)
# print("state_tensor is", state_tensor)
action = agent.act(state_tensor, epsilon)
# print("action is: ",action)
next_frame, reward, done, _ = env.step(action)
episode_reward += reward
agent.rec_memory_buffer.push(frame, action, reward, next_frame, done)
frame = next_frame
loss = 0
if agent.rec_memory_buffer.size() >= learning_start:
# print("learn from experience+++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
loss = agent.learn_from_experience(batch_size)
losses.append(loss)
if i % print_interval == 0:
mean = np.mean(all_rewards[-10:]).item()
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("frames: %5d, reward: %5f, loss: %4f, epsilon: %5f, episode: %4d" % (
i, mean, loss, epsilon, episode_num))
# summary_writer.add_scalar("Temporal Difference Loss", loss, i)
# summary_writer.add_scalar("Mean Reward", np.mean(all_rewards[-10:]), i)
# summary_writer.add_scalar("Epsilon", epsilon, i)
if i % update_tar_interval == 0:
print("TARGET GET UPDATED")
print("****************************************************************************************")
agent.DRQN_target.set_weights(agent.DRQN.get_weights())
# agent.DRQN_target.load_state_dict(agent.DRQN.state_dict())
if done:
|
# summary_writer.close()
observation = env.reset()
count = 0
reward_sum = 0
random_episodes = 0
while random_episodes < 10:
# env.render()
x = observation.reshape(-1, 4)
x = [x]
x = np.array(x)
q_values = agent.DRQN.model.predict(x)
# print(q_values)
action = np.argmax(q_values)
# print(action)
observation, reward, done, _ = env.step(action)
count += 1
reward_sum += reward
if done:
print("Reward for this episode was: {}, turns was: {}".format(reward_sum, count))
random_episodes += 1
reward_sum = 0
count = 0
observation = env.reset()
env.close()
| print("IT IS DONE")
print('i is', i)
print("--------------------------------------------------------------------------------------")
frame = env.reset()
# reset hidden to None
all_rewards.append(episode_reward)
print("all reward now is: ", all_rewards[-10:])
episode_reward = 0
episode_num += 1
avg_reward = float(np.mean(all_rewards[:])) | conditional_block |
linker.rs | use crate::{
Extern, ExternType, Func, FuncType, GlobalType, ImportType, Instance, IntoFunc, Module, Store,
};
use anyhow::{anyhow, bail, Result};
use std::collections::hash_map::{Entry, HashMap};
use std::rc::Rc;
/// Structure used to link wasm modules/instances together.
///
/// This structure is used to assist in instantiating a [`Module`]. A `Linker`
/// is a way of performing name resolution to make instantiating a module easier
/// (as opposed to calling [`Instance::new`]). `Linker` is a name-based resolver
/// where names are dynamically defined and then used to instantiate a
/// [`Module`]. The goal of a `Linker` is to have a one-argument method,
/// [`Linker::instantiate`], which takes a [`Module`] and produces an
/// [`Instance`]. This method will automatically select all the right imports
/// for the [`Module`] to be instantiated, and will otherwise return an error
/// if an import isn't satisfied.
///
/// ## Name Resolution
///
/// As mentioned previously, `Linker` is a form of name resolver. It will be
/// using the string-based names of imports on a module to attempt to select a
/// matching item to hook up to it. This name resolution has two-levels of
/// namespaces, a module level and a name level. Each item is defined within a
/// module and then has its own name. This basically follows the wasm standard
/// for modularization.
///
/// Names in a `Linker` can be defined twice, but only for different signatures
/// of items. This means that every item defined in a `Linker` has a unique
/// name/type pair. For example you can define two functions with the module
/// name `foo` and item name `bar`, so long as they have different function
/// signatures. Currently duplicate memories and tables are not allowed, only
/// one-per-name is allowed.
///
/// Note that allowing duplicates by shadowing the previous definition can be
/// controlled with the [`Linker::allow_shadowing`] method as well.
pub struct Linker {
store: Store,
string2idx: HashMap<Rc<str>, usize>,
strings: Vec<Rc<str>>,
map: HashMap<ImportKey, Extern>,
allow_shadowing: bool,
}
#[derive(Hash, PartialEq, Eq)]
struct ImportKey {
name: usize,
module: usize,
kind: ImportKind,
}
#[derive(Hash, PartialEq, Eq, Debug)]
enum ImportKind {
Func(FuncType),
Global(GlobalType),
Memory,
Table,
}
impl Linker {
/// Creates a new [`Linker`].
///
/// This function will create a new [`Linker`] which is ready to start
/// linking modules. All items defined in this linker and produced by this
/// linker will be connected with `store` and must come from the same
/// `store`.
///
/// # Examples
///
/// ```
/// use wasmtime::{Linker, Store};
///
/// let store = Store::default();
/// let mut linker = Linker::new(&store);
/// // ...
/// ```
pub fn new(store: &Store) -> Linker {
Linker {
store: store.clone(),
map: HashMap::new(),
string2idx: HashMap::new(),
strings: Vec::new(),
allow_shadowing: false,
}
}
/// Configures whether this [`Linker`] will shadow previous duplicate
/// definitions of the same signature.
///
/// By default a [`Linker`] will disallow duplicate definitions of the same
/// signature. This method, however, can be used to instead allow duplicates
/// and have the latest definition take precedence when linking modules.
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
/// linker.func("", "", || {})?;
///
/// // by default, duplicates are disallowed
/// assert!(linker.func("", "", || {}).is_err());
///
/// // but shadowing can be configured to be allowed as well
/// linker.allow_shadowing(true);
/// linker.func("", "", || {})?;
/// # Ok(())
/// # }
/// ```
pub fn allow_shadowing(&mut self, allow: bool) -> &mut Linker {
self.allow_shadowing = allow;
self
}
/// Defines a new item in this [`Linker`].
///
/// This method will add a new definition, by name, to this instance of
/// [`Linker`]. The `module` and `name` provided are what to name the
/// `item`.
///
/// # Errors
///
/// Returns an error if the `module` and `name` already identify an item
/// of the same type as the `item` provided and if shadowing is disallowed.
/// For more information see the documentation on [`Linker`].
///
/// Also returns an error if `item` comes from a different store than this
/// [`Linker`] was created with.
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
/// let ty = GlobalType::new(ValType::I32, Mutability::Const);
/// let global = Global::new(&store, ty, Val::I32(0x1234))?;
/// linker.define("host", "offset", global)?;
///
/// let wat = r#"
/// (module
/// (import "host" "offset" (global i32))
/// (memory 1)
/// (data (global.get 0) "foo")
/// )
/// "#;
/// let module = Module::new(&store, wat)?;
/// linker.instantiate(&module)?;
/// # Ok(())
/// # }
/// ```
pub fn define(
&mut self,
module: &str,
name: &str,
item: impl Into<Extern>,
) -> Result<&mut Self> {
self._define(module, name, item.into())
}
fn _define(&mut self, module: &str, name: &str, item: Extern) -> Result<&mut Self> {
if !item.comes_from_same_store(&self.store) {
bail!("all linker items must be from the same store");
}
self.insert(module, name, item)?;
Ok(self)
}
/// Convenience wrapper to define a function import.
///
/// This method is a convenience wrapper around [`Linker::define`] which
/// internally delegates to [`Func::wrap`].
///
/// # Errors
///
/// Returns an error if the `module` and `name` already identify an item
/// of the same type as the `item` provided and if shadowing is disallowed.
/// For more information see the documentation on [`Linker`].
/// | /// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
/// linker.func("host", "double", |x: i32| x * 2)?;
/// linker.func("host", "log_i32", |x: i32| println!("{}", x))?;
/// linker.func("host", "log_str", |caller: Caller, ptr: i32, len: i32| {
/// // ...
/// })?;
///
/// let wat = r#"
/// (module
/// (import "host" "double" (func (param i32) (result i32)))
/// (import "host" "log_i32" (func (param i32)))
/// (import "host" "log_str" (func (param i32 i32)))
/// )
/// "#;
/// let module = Module::new(&store, wat)?;
/// linker.instantiate(&module)?;
/// # Ok(())
/// # }
/// ```
pub fn func<Params, Args>(
&mut self,
module: &str,
name: &str,
func: impl IntoFunc<Params, Args>,
) -> Result<&mut Self> {
self._define(module, name, Func::wrap(&self.store, func).into())
}
/// Convenience wrapper to define an entire [`Instance`] in this linker.
///
/// This function is a convenience wrapper around [`Linker::define`] which
/// will define all exports on `instance` into this linker. The module name
/// for each export is `module_name`, and the name for each export is the
/// name in the instance itself.
///
/// # Errors
///
/// Returns an error if the any item is redefined twice in this linker (for
/// example the same `module_name` was already defined) and shadowing is
/// disallowed, or if `instance` comes from a different [`Store`] than this
/// [`Linker`] originally was created with.
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
///
/// // Instantiate a small instance...
/// let wat = r#"(module (func (export "run") ))"#;
/// let module = Module::new(&store, wat)?;
/// let instance = linker.instantiate(&module)?;
///
/// // ... and inform the linker that the name of this instance is
/// // `instance1`. This defines the `instance1::run` name for our next
/// // module to use.
/// linker.instance("instance1", &instance)?;
///
/// let wat = r#"
/// (module
/// (import "instance1" "run" (func $instance1_run))
/// (func (export "run")
/// call $instance1_run
/// )
/// )
/// "#;
/// let module = Module::new(&store, wat)?;
/// let instance = linker.instantiate(&module)?;
/// # Ok(())
/// # }
/// ```
pub fn instance(&mut self, module_name: &str, instance: &Instance) -> Result<&mut Self> {
if !Store::same(&self.store, instance.store()) {
bail!("all linker items must be from the same store");
}
for export in instance.exports() {
self.insert(module_name, export.name(), export.into_extern())?;
}
Ok(self)
}
/// Aliases one module's name as another.
///
/// This method will alias all currently defined under `module` to also be
/// defined under the name `as_module` too.
///
/// # Errors
///
/// Returns an error if any shadowing violations happen while defining new
/// items.
pub fn alias(&mut self, module: &str, as_module: &str) -> Result<()> {
let items = self
.iter()
.filter(|(m, _, _)| *m == module)
.map(|(_, name, item)| (name.to_string(), item))
.collect::<Vec<_>>();
for (name, item) in items {
self.define(as_module, &name, item)?;
}
Ok(())
}
fn insert(&mut self, module: &str, name: &str, item: Extern) -> Result<()> {
let key = self.import_key(module, name, item.ty());
match self.map.entry(key) {
Entry::Occupied(o) if !self.allow_shadowing => bail!(
"import of `{}::{}` with kind {:?} defined twice",
module,
name,
o.key().kind,
),
Entry::Occupied(mut o) => {
o.insert(item);
}
Entry::Vacant(v) => {
v.insert(item);
}
}
Ok(())
}
fn import_key(&mut self, module: &str, name: &str, ty: ExternType) -> ImportKey {
ImportKey {
module: self.intern_str(module),
name: self.intern_str(name),
kind: self.import_kind(ty),
}
}
fn import_kind(&self, ty: ExternType) -> ImportKind {
match ty {
ExternType::Func(f) => ImportKind::Func(f),
ExternType::Global(f) => ImportKind::Global(f),
ExternType::Memory(_) => ImportKind::Memory,
ExternType::Table(_) => ImportKind::Table,
}
}
fn intern_str(&mut self, string: &str) -> usize {
if let Some(idx) = self.string2idx.get(string) {
return *idx;
}
let string: Rc<str> = string.into();
let idx = self.strings.len();
self.strings.push(string.clone());
self.string2idx.insert(string, idx);
idx
}
/// Attempts to instantiate the `module` provided.
///
/// This method will attempt to assemble a list of imports that correspond
/// to the imports required by the [`Module`] provided. This list
/// of imports is then passed to [`Instance::new`] to continue the
/// instantiation process.
///
/// Each import of `module` will be looked up in this [`Linker`] and must
/// have previously been defined. If it was previously defined with an
/// incorrect signature or if it was not prevoiusly defined then an error
/// will be returned because the import can not be satisfied.
///
/// # Errors
///
/// This method can fail because an import may not be found, or because
/// instantiation itself may fail. For information on instantiation
/// failures see [`Instance::new`].
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
/// linker.func("host", "double", |x: i32| x * 2)?;
///
/// let wat = r#"
/// (module
/// (import "host" "double" (func (param i32) (result i32)))
/// )
/// "#;
/// let module = Module::new(&store, wat)?;
/// linker.instantiate(&module)?;
/// # Ok(())
/// # }
/// ```
pub fn instantiate(&self, module: &Module) -> Result<Instance> {
let mut imports = Vec::new();
for import in module.imports() {
if let Some(item) = self.get(&import) {
imports.push(item);
continue;
}
let mut options = String::new();
for i in self.map.keys() {
if &*self.strings[i.module] != import.module()
|| &*self.strings[i.name] != import.name()
{
continue;
}
options.push_str(" * ");
options.push_str(&format!("{:?}", i.kind));
options.push_str("\n");
}
if options.len() == 0 {
bail!(
"unknown import: `{}::{}` has not been defined",
import.module(),
import.name()
)
}
bail!(
"incompatible import type for `{}::{}` specified\n\
desired signature was: {:?}\n\
signatures available:\n\n{}",
import.module(),
import.name(),
import.ty(),
options,
)
}
Instance::new(module, &imports)
}
/// Returns the [`Store`] that this linker is connected to.
pub fn store(&self) -> &Store {
&self.store
}
/// Returns an iterator over all items defined in this `Linker`.
///
/// The iterator returned will yield 3-tuples where the first two elements
/// are the module name and item name for the external item, and the third
/// item is the item itself that is defined.
///
/// Note that multiple `Extern` items may be defined for the same
/// module/name pair.
pub fn iter(&self) -> impl Iterator<Item = (&str, &str, Extern)> {
self.map.iter().map(move |(key, item)| {
(
&*self.strings[key.module],
&*self.strings[key.name],
item.clone(),
)
})
}
/// Looks up a value in this `Linker` which matches the `import` type
/// provided.
///
/// Returns `None` if no match was found.
pub fn get(&self, import: &ImportType) -> Option<Extern> {
let key = ImportKey {
module: *self.string2idx.get(import.module())?,
name: *self.string2idx.get(import.name())?,
kind: self.import_kind(import.ty()),
};
self.map.get(&key).cloned()
}
/// Returns all items defined for the `module` and `name` pair.
///
/// This may return an empty iterator, but it may also return multiple items
/// if the module/name have been defined twice.
pub fn get_by_name<'a: 'p, 'p>(
&'a self,
module: &'p str,
name: &'p str,
) -> impl Iterator<Item = &'a Extern> + 'p {
self.map
.iter()
.filter(move |(key, _item)| {
&*self.strings[key.module] == module && &*self.strings[key.name] == name
})
.map(|(_, item)| item)
}
/// Returns the single item defined for the `module` and `name` pair.
///
/// Unlike the similar [`Linker::get_by_name`] method this function returns
/// a single `Extern` item. If the `module` and `name` pair isn't defined
/// in this linker then an error is returned. If more than one value exists
/// for the `module` and `name` pairs, then an error is returned as well.
pub fn get_one_by_name(&self, module: &str, name: &str) -> Result<Extern> {
let mut items = self.get_by_name(module, name);
let ret = items
.next()
.ok_or_else(|| anyhow!("no item named `{}` in `{}`", name, module))?;
if items.next().is_some() {
bail!("too many items named `{}` in `{}`", name, module);
}
Ok(ret.clone())
}
} | /// # Examples
/// | random_line_split |
linker.rs | use crate::{
Extern, ExternType, Func, FuncType, GlobalType, ImportType, Instance, IntoFunc, Module, Store,
};
use anyhow::{anyhow, bail, Result};
use std::collections::hash_map::{Entry, HashMap};
use std::rc::Rc;
/// Structure used to link wasm modules/instances together.
///
/// This structure is used to assist in instantiating a [`Module`]. A `Linker`
/// is a way of performing name resolution to make instantiating a module easier
/// (as opposed to calling [`Instance::new`]). `Linker` is a name-based resolver
/// where names are dynamically defined and then used to instantiate a
/// [`Module`]. The goal of a `Linker` is to have a one-argument method,
/// [`Linker::instantiate`], which takes a [`Module`] and produces an
/// [`Instance`]. This method will automatically select all the right imports
/// for the [`Module`] to be instantiated, and will otherwise return an error
/// if an import isn't satisfied.
///
/// ## Name Resolution
///
/// As mentioned previously, `Linker` is a form of name resolver. It will be
/// using the string-based names of imports on a module to attempt to select a
/// matching item to hook up to it. This name resolution has two-levels of
/// namespaces, a module level and a name level. Each item is defined within a
/// module and then has its own name. This basically follows the wasm standard
/// for modularization.
///
/// Names in a `Linker` can be defined twice, but only for different signatures
/// of items. This means that every item defined in a `Linker` has a unique
/// name/type pair. For example you can define two functions with the module
/// name `foo` and item name `bar`, so long as they have different function
/// signatures. Currently duplicate memories and tables are not allowed, only
/// one-per-name is allowed.
///
/// Note that allowing duplicates by shadowing the previous definition can be
/// controlled with the [`Linker::allow_shadowing`] method as well.
pub struct Linker {
store: Store,
string2idx: HashMap<Rc<str>, usize>,
strings: Vec<Rc<str>>,
map: HashMap<ImportKey, Extern>,
allow_shadowing: bool,
}
#[derive(Hash, PartialEq, Eq)]
struct ImportKey {
name: usize,
module: usize,
kind: ImportKind,
}
#[derive(Hash, PartialEq, Eq, Debug)]
enum ImportKind {
Func(FuncType),
Global(GlobalType),
Memory,
Table,
}
impl Linker {
/// Creates a new [`Linker`].
///
/// This function will create a new [`Linker`] which is ready to start
/// linking modules. All items defined in this linker and produced by this
/// linker will be connected with `store` and must come from the same
/// `store`.
///
/// # Examples
///
/// ```
/// use wasmtime::{Linker, Store};
///
/// let store = Store::default();
/// let mut linker = Linker::new(&store);
/// // ...
/// ```
pub fn new(store: &Store) -> Linker {
Linker {
store: store.clone(),
map: HashMap::new(),
string2idx: HashMap::new(),
strings: Vec::new(),
allow_shadowing: false,
}
}
/// Configures whether this [`Linker`] will shadow previous duplicate
/// definitions of the same signature.
///
/// By default a [`Linker`] will disallow duplicate definitions of the same
/// signature. This method, however, can be used to instead allow duplicates
/// and have the latest definition take precedence when linking modules.
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
/// linker.func("", "", || {})?;
///
/// // by default, duplicates are disallowed
/// assert!(linker.func("", "", || {}).is_err());
///
/// // but shadowing can be configured to be allowed as well
/// linker.allow_shadowing(true);
/// linker.func("", "", || {})?;
/// # Ok(())
/// # }
/// ```
pub fn allow_shadowing(&mut self, allow: bool) -> &mut Linker {
self.allow_shadowing = allow;
self
}
/// Defines a new item in this [`Linker`].
///
/// This method will add a new definition, by name, to this instance of
/// [`Linker`]. The `module` and `name` provided are what to name the
/// `item`.
///
/// # Errors
///
/// Returns an error if the `module` and `name` already identify an item
/// of the same type as the `item` provided and if shadowing is disallowed.
/// For more information see the documentation on [`Linker`].
///
/// Also returns an error if `item` comes from a different store than this
/// [`Linker`] was created with.
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
/// let ty = GlobalType::new(ValType::I32, Mutability::Const);
/// let global = Global::new(&store, ty, Val::I32(0x1234))?;
/// linker.define("host", "offset", global)?;
///
/// let wat = r#"
/// (module
/// (import "host" "offset" (global i32))
/// (memory 1)
/// (data (global.get 0) "foo")
/// )
/// "#;
/// let module = Module::new(&store, wat)?;
/// linker.instantiate(&module)?;
/// # Ok(())
/// # }
/// ```
pub fn define(
&mut self,
module: &str,
name: &str,
item: impl Into<Extern>,
) -> Result<&mut Self> {
self._define(module, name, item.into())
}
fn _define(&mut self, module: &str, name: &str, item: Extern) -> Result<&mut Self> {
if !item.comes_from_same_store(&self.store) {
bail!("all linker items must be from the same store");
}
self.insert(module, name, item)?;
Ok(self)
}
/// Convenience wrapper to define a function import.
///
/// This method is a convenience wrapper around [`Linker::define`] which
/// internally delegates to [`Func::wrap`].
///
/// # Errors
///
/// Returns an error if the `module` and `name` already identify an item
/// of the same type as the `item` provided and if shadowing is disallowed.
/// For more information see the documentation on [`Linker`].
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
/// linker.func("host", "double", |x: i32| x * 2)?;
/// linker.func("host", "log_i32", |x: i32| println!("{}", x))?;
/// linker.func("host", "log_str", |caller: Caller, ptr: i32, len: i32| {
/// // ...
/// })?;
///
/// let wat = r#"
/// (module
/// (import "host" "double" (func (param i32) (result i32)))
/// (import "host" "log_i32" (func (param i32)))
/// (import "host" "log_str" (func (param i32 i32)))
/// )
/// "#;
/// let module = Module::new(&store, wat)?;
/// linker.instantiate(&module)?;
/// # Ok(())
/// # }
/// ```
pub fn func<Params, Args>(
&mut self,
module: &str,
name: &str,
func: impl IntoFunc<Params, Args>,
) -> Result<&mut Self> {
self._define(module, name, Func::wrap(&self.store, func).into())
}
/// Convenience wrapper to define an entire [`Instance`] in this linker.
///
/// This function is a convenience wrapper around [`Linker::define`] which
/// will define all exports on `instance` into this linker. The module name
/// for each export is `module_name`, and the name for each export is the
/// name in the instance itself.
///
/// # Errors
///
/// Returns an error if the any item is redefined twice in this linker (for
/// example the same `module_name` was already defined) and shadowing is
/// disallowed, or if `instance` comes from a different [`Store`] than this
/// [`Linker`] originally was created with.
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
///
/// // Instantiate a small instance...
/// let wat = r#"(module (func (export "run") ))"#;
/// let module = Module::new(&store, wat)?;
/// let instance = linker.instantiate(&module)?;
///
/// // ... and inform the linker that the name of this instance is
/// // `instance1`. This defines the `instance1::run` name for our next
/// // module to use.
/// linker.instance("instance1", &instance)?;
///
/// let wat = r#"
/// (module
/// (import "instance1" "run" (func $instance1_run))
/// (func (export "run")
/// call $instance1_run
/// )
/// )
/// "#;
/// let module = Module::new(&store, wat)?;
/// let instance = linker.instantiate(&module)?;
/// # Ok(())
/// # }
/// ```
pub fn instance(&mut self, module_name: &str, instance: &Instance) -> Result<&mut Self> {
if !Store::same(&self.store, instance.store()) {
bail!("all linker items must be from the same store");
}
for export in instance.exports() {
self.insert(module_name, export.name(), export.into_extern())?;
}
Ok(self)
}
/// Aliases one module's name as another.
///
/// This method will alias all currently defined under `module` to also be
/// defined under the name `as_module` too.
///
/// # Errors
///
/// Returns an error if any shadowing violations happen while defining new
/// items.
pub fn alias(&mut self, module: &str, as_module: &str) -> Result<()> {
let items = self
.iter()
.filter(|(m, _, _)| *m == module)
.map(|(_, name, item)| (name.to_string(), item))
.collect::<Vec<_>>();
for (name, item) in items {
self.define(as_module, &name, item)?;
}
Ok(())
}
fn insert(&mut self, module: &str, name: &str, item: Extern) -> Result<()> {
let key = self.import_key(module, name, item.ty());
match self.map.entry(key) {
Entry::Occupied(o) if !self.allow_shadowing => bail!(
"import of `{}::{}` with kind {:?} defined twice",
module,
name,
o.key().kind,
),
Entry::Occupied(mut o) => {
o.insert(item);
}
Entry::Vacant(v) => {
v.insert(item);
}
}
Ok(())
}
fn | (&mut self, module: &str, name: &str, ty: ExternType) -> ImportKey {
ImportKey {
module: self.intern_str(module),
name: self.intern_str(name),
kind: self.import_kind(ty),
}
}
fn import_kind(&self, ty: ExternType) -> ImportKind {
match ty {
ExternType::Func(f) => ImportKind::Func(f),
ExternType::Global(f) => ImportKind::Global(f),
ExternType::Memory(_) => ImportKind::Memory,
ExternType::Table(_) => ImportKind::Table,
}
}
fn intern_str(&mut self, string: &str) -> usize {
if let Some(idx) = self.string2idx.get(string) {
return *idx;
}
let string: Rc<str> = string.into();
let idx = self.strings.len();
self.strings.push(string.clone());
self.string2idx.insert(string, idx);
idx
}
/// Attempts to instantiate the `module` provided.
///
/// This method will attempt to assemble a list of imports that correspond
/// to the imports required by the [`Module`] provided. This list
/// of imports is then passed to [`Instance::new`] to continue the
/// instantiation process.
///
/// Each import of `module` will be looked up in this [`Linker`] and must
/// have previously been defined. If it was previously defined with an
/// incorrect signature or if it was not prevoiusly defined then an error
/// will be returned because the import can not be satisfied.
///
/// # Errors
///
/// This method can fail because an import may not be found, or because
/// instantiation itself may fail. For information on instantiation
/// failures see [`Instance::new`].
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
/// linker.func("host", "double", |x: i32| x * 2)?;
///
/// let wat = r#"
/// (module
/// (import "host" "double" (func (param i32) (result i32)))
/// )
/// "#;
/// let module = Module::new(&store, wat)?;
/// linker.instantiate(&module)?;
/// # Ok(())
/// # }
/// ```
pub fn instantiate(&self, module: &Module) -> Result<Instance> {
let mut imports = Vec::new();
for import in module.imports() {
if let Some(item) = self.get(&import) {
imports.push(item);
continue;
}
let mut options = String::new();
for i in self.map.keys() {
if &*self.strings[i.module] != import.module()
|| &*self.strings[i.name] != import.name()
{
continue;
}
options.push_str(" * ");
options.push_str(&format!("{:?}", i.kind));
options.push_str("\n");
}
if options.len() == 0 {
bail!(
"unknown import: `{}::{}` has not been defined",
import.module(),
import.name()
)
}
bail!(
"incompatible import type for `{}::{}` specified\n\
desired signature was: {:?}\n\
signatures available:\n\n{}",
import.module(),
import.name(),
import.ty(),
options,
)
}
Instance::new(module, &imports)
}
/// Returns the [`Store`] that this linker is connected to.
pub fn store(&self) -> &Store {
&self.store
}
/// Returns an iterator over all items defined in this `Linker`.
///
/// The iterator returned will yield 3-tuples where the first two elements
/// are the module name and item name for the external item, and the third
/// item is the item itself that is defined.
///
/// Note that multiple `Extern` items may be defined for the same
/// module/name pair.
pub fn iter(&self) -> impl Iterator<Item = (&str, &str, Extern)> {
self.map.iter().map(move |(key, item)| {
(
&*self.strings[key.module],
&*self.strings[key.name],
item.clone(),
)
})
}
/// Looks up a value in this `Linker` which matches the `import` type
/// provided.
///
/// Returns `None` if no match was found.
pub fn get(&self, import: &ImportType) -> Option<Extern> {
let key = ImportKey {
module: *self.string2idx.get(import.module())?,
name: *self.string2idx.get(import.name())?,
kind: self.import_kind(import.ty()),
};
self.map.get(&key).cloned()
}
/// Returns all items defined for the `module` and `name` pair.
///
/// This may return an empty iterator, but it may also return multiple items
/// if the module/name have been defined twice.
pub fn get_by_name<'a: 'p, 'p>(
&'a self,
module: &'p str,
name: &'p str,
) -> impl Iterator<Item = &'a Extern> + 'p {
self.map
.iter()
.filter(move |(key, _item)| {
&*self.strings[key.module] == module && &*self.strings[key.name] == name
})
.map(|(_, item)| item)
}
/// Returns the single item defined for the `module` and `name` pair.
///
/// Unlike the similar [`Linker::get_by_name`] method this function returns
/// a single `Extern` item. If the `module` and `name` pair isn't defined
/// in this linker then an error is returned. If more than one value exists
/// for the `module` and `name` pairs, then an error is returned as well.
pub fn get_one_by_name(&self, module: &str, name: &str) -> Result<Extern> {
let mut items = self.get_by_name(module, name);
let ret = items
.next()
.ok_or_else(|| anyhow!("no item named `{}` in `{}`", name, module))?;
if items.next().is_some() {
bail!("too many items named `{}` in `{}`", name, module);
}
Ok(ret.clone())
}
}
| import_key | identifier_name |
linker.rs | use crate::{
Extern, ExternType, Func, FuncType, GlobalType, ImportType, Instance, IntoFunc, Module, Store,
};
use anyhow::{anyhow, bail, Result};
use std::collections::hash_map::{Entry, HashMap};
use std::rc::Rc;
/// Structure used to link wasm modules/instances together.
///
/// This structure is used to assist in instantiating a [`Module`]. A `Linker`
/// is a way of performing name resolution to make instantiating a module easier
/// (as opposed to calling [`Instance::new`]). `Linker` is a name-based resolver
/// where names are dynamically defined and then used to instantiate a
/// [`Module`]. The goal of a `Linker` is to have a one-argument method,
/// [`Linker::instantiate`], which takes a [`Module`] and produces an
/// [`Instance`]. This method will automatically select all the right imports
/// for the [`Module`] to be instantiated, and will otherwise return an error
/// if an import isn't satisfied.
///
/// ## Name Resolution
///
/// As mentioned previously, `Linker` is a form of name resolver. It will be
/// using the string-based names of imports on a module to attempt to select a
/// matching item to hook up to it. This name resolution has two-levels of
/// namespaces, a module level and a name level. Each item is defined within a
/// module and then has its own name. This basically follows the wasm standard
/// for modularization.
///
/// Names in a `Linker` can be defined twice, but only for different signatures
/// of items. This means that every item defined in a `Linker` has a unique
/// name/type pair. For example you can define two functions with the module
/// name `foo` and item name `bar`, so long as they have different function
/// signatures. Currently duplicate memories and tables are not allowed, only
/// one-per-name is allowed.
///
/// Note that allowing duplicates by shadowing the previous definition can be
/// controlled with the [`Linker::allow_shadowing`] method as well.
pub struct Linker {
store: Store,
string2idx: HashMap<Rc<str>, usize>,
strings: Vec<Rc<str>>,
map: HashMap<ImportKey, Extern>,
allow_shadowing: bool,
}
#[derive(Hash, PartialEq, Eq)]
struct ImportKey {
name: usize,
module: usize,
kind: ImportKind,
}
#[derive(Hash, PartialEq, Eq, Debug)]
enum ImportKind {
Func(FuncType),
Global(GlobalType),
Memory,
Table,
}
impl Linker {
/// Creates a new [`Linker`].
///
/// This function will create a new [`Linker`] which is ready to start
/// linking modules. All items defined in this linker and produced by this
/// linker will be connected with `store` and must come from the same
/// `store`.
///
/// # Examples
///
/// ```
/// use wasmtime::{Linker, Store};
///
/// let store = Store::default();
/// let mut linker = Linker::new(&store);
/// // ...
/// ```
pub fn new(store: &Store) -> Linker {
Linker {
store: store.clone(),
map: HashMap::new(),
string2idx: HashMap::new(),
strings: Vec::new(),
allow_shadowing: false,
}
}
/// Configures whether this [`Linker`] will shadow previous duplicate
/// definitions of the same signature.
///
/// By default a [`Linker`] will disallow duplicate definitions of the same
/// signature. This method, however, can be used to instead allow duplicates
/// and have the latest definition take precedence when linking modules.
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
/// linker.func("", "", || {})?;
///
/// // by default, duplicates are disallowed
/// assert!(linker.func("", "", || {}).is_err());
///
/// // but shadowing can be configured to be allowed as well
/// linker.allow_shadowing(true);
/// linker.func("", "", || {})?;
/// # Ok(())
/// # }
/// ```
pub fn allow_shadowing(&mut self, allow: bool) -> &mut Linker {
self.allow_shadowing = allow;
self
}
/// Defines a new item in this [`Linker`].
///
/// This method will add a new definition, by name, to this instance of
/// [`Linker`]. The `module` and `name` provided are what to name the
/// `item`.
///
/// # Errors
///
/// Returns an error if the `module` and `name` already identify an item
/// of the same type as the `item` provided and if shadowing is disallowed.
/// For more information see the documentation on [`Linker`].
///
/// Also returns an error if `item` comes from a different store than this
/// [`Linker`] was created with.
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
/// let ty = GlobalType::new(ValType::I32, Mutability::Const);
/// let global = Global::new(&store, ty, Val::I32(0x1234))?;
/// linker.define("host", "offset", global)?;
///
/// let wat = r#"
/// (module
/// (import "host" "offset" (global i32))
/// (memory 1)
/// (data (global.get 0) "foo")
/// )
/// "#;
/// let module = Module::new(&store, wat)?;
/// linker.instantiate(&module)?;
/// # Ok(())
/// # }
/// ```
pub fn define(
&mut self,
module: &str,
name: &str,
item: impl Into<Extern>,
) -> Result<&mut Self> {
self._define(module, name, item.into())
}
fn _define(&mut self, module: &str, name: &str, item: Extern) -> Result<&mut Self> {
if !item.comes_from_same_store(&self.store) {
bail!("all linker items must be from the same store");
}
self.insert(module, name, item)?;
Ok(self)
}
/// Convenience wrapper to define a function import.
///
/// This method is a convenience wrapper around [`Linker::define`] which
/// internally delegates to [`Func::wrap`].
///
/// # Errors
///
/// Returns an error if the `module` and `name` already identify an item
/// of the same type as the `item` provided and if shadowing is disallowed.
/// For more information see the documentation on [`Linker`].
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
/// linker.func("host", "double", |x: i32| x * 2)?;
/// linker.func("host", "log_i32", |x: i32| println!("{}", x))?;
/// linker.func("host", "log_str", |caller: Caller, ptr: i32, len: i32| {
/// // ...
/// })?;
///
/// let wat = r#"
/// (module
/// (import "host" "double" (func (param i32) (result i32)))
/// (import "host" "log_i32" (func (param i32)))
/// (import "host" "log_str" (func (param i32 i32)))
/// )
/// "#;
/// let module = Module::new(&store, wat)?;
/// linker.instantiate(&module)?;
/// # Ok(())
/// # }
/// ```
pub fn func<Params, Args>(
&mut self,
module: &str,
name: &str,
func: impl IntoFunc<Params, Args>,
) -> Result<&mut Self> {
self._define(module, name, Func::wrap(&self.store, func).into())
}
/// Convenience wrapper to define an entire [`Instance`] in this linker.
///
/// This function is a convenience wrapper around [`Linker::define`] which
/// will define all exports on `instance` into this linker. The module name
/// for each export is `module_name`, and the name for each export is the
/// name in the instance itself.
///
/// # Errors
///
/// Returns an error if the any item is redefined twice in this linker (for
/// example the same `module_name` was already defined) and shadowing is
/// disallowed, or if `instance` comes from a different [`Store`] than this
/// [`Linker`] originally was created with.
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
///
/// // Instantiate a small instance...
/// let wat = r#"(module (func (export "run") ))"#;
/// let module = Module::new(&store, wat)?;
/// let instance = linker.instantiate(&module)?;
///
/// // ... and inform the linker that the name of this instance is
/// // `instance1`. This defines the `instance1::run` name for our next
/// // module to use.
/// linker.instance("instance1", &instance)?;
///
/// let wat = r#"
/// (module
/// (import "instance1" "run" (func $instance1_run))
/// (func (export "run")
/// call $instance1_run
/// )
/// )
/// "#;
/// let module = Module::new(&store, wat)?;
/// let instance = linker.instantiate(&module)?;
/// # Ok(())
/// # }
/// ```
pub fn instance(&mut self, module_name: &str, instance: &Instance) -> Result<&mut Self> |
/// Aliases one module's name as another.
///
/// This method will alias all currently defined under `module` to also be
/// defined under the name `as_module` too.
///
/// # Errors
///
/// Returns an error if any shadowing violations happen while defining new
/// items.
pub fn alias(&mut self, module: &str, as_module: &str) -> Result<()> {
let items = self
.iter()
.filter(|(m, _, _)| *m == module)
.map(|(_, name, item)| (name.to_string(), item))
.collect::<Vec<_>>();
for (name, item) in items {
self.define(as_module, &name, item)?;
}
Ok(())
}
fn insert(&mut self, module: &str, name: &str, item: Extern) -> Result<()> {
let key = self.import_key(module, name, item.ty());
match self.map.entry(key) {
Entry::Occupied(o) if !self.allow_shadowing => bail!(
"import of `{}::{}` with kind {:?} defined twice",
module,
name,
o.key().kind,
),
Entry::Occupied(mut o) => {
o.insert(item);
}
Entry::Vacant(v) => {
v.insert(item);
}
}
Ok(())
}
fn import_key(&mut self, module: &str, name: &str, ty: ExternType) -> ImportKey {
ImportKey {
module: self.intern_str(module),
name: self.intern_str(name),
kind: self.import_kind(ty),
}
}
fn import_kind(&self, ty: ExternType) -> ImportKind {
match ty {
ExternType::Func(f) => ImportKind::Func(f),
ExternType::Global(f) => ImportKind::Global(f),
ExternType::Memory(_) => ImportKind::Memory,
ExternType::Table(_) => ImportKind::Table,
}
}
fn intern_str(&mut self, string: &str) -> usize {
if let Some(idx) = self.string2idx.get(string) {
return *idx;
}
let string: Rc<str> = string.into();
let idx = self.strings.len();
self.strings.push(string.clone());
self.string2idx.insert(string, idx);
idx
}
/// Attempts to instantiate the `module` provided.
///
/// This method will attempt to assemble a list of imports that correspond
/// to the imports required by the [`Module`] provided. This list
/// of imports is then passed to [`Instance::new`] to continue the
/// instantiation process.
///
/// Each import of `module` will be looked up in this [`Linker`] and must
/// have previously been defined. If it was previously defined with an
/// incorrect signature or if it was not prevoiusly defined then an error
/// will be returned because the import can not be satisfied.
///
/// # Errors
///
/// This method can fail because an import may not be found, or because
/// instantiation itself may fail. For information on instantiation
/// failures see [`Instance::new`].
///
/// # Examples
///
/// ```
/// # use wasmtime::*;
/// # fn main() -> anyhow::Result<()> {
/// # let store = Store::default();
/// let mut linker = Linker::new(&store);
/// linker.func("host", "double", |x: i32| x * 2)?;
///
/// let wat = r#"
/// (module
/// (import "host" "double" (func (param i32) (result i32)))
/// )
/// "#;
/// let module = Module::new(&store, wat)?;
/// linker.instantiate(&module)?;
/// # Ok(())
/// # }
/// ```
pub fn instantiate(&self, module: &Module) -> Result<Instance> {
let mut imports = Vec::new();
for import in module.imports() {
if let Some(item) = self.get(&import) {
imports.push(item);
continue;
}
let mut options = String::new();
for i in self.map.keys() {
if &*self.strings[i.module] != import.module()
|| &*self.strings[i.name] != import.name()
{
continue;
}
options.push_str(" * ");
options.push_str(&format!("{:?}", i.kind));
options.push_str("\n");
}
if options.len() == 0 {
bail!(
"unknown import: `{}::{}` has not been defined",
import.module(),
import.name()
)
}
bail!(
"incompatible import type for `{}::{}` specified\n\
desired signature was: {:?}\n\
signatures available:\n\n{}",
import.module(),
import.name(),
import.ty(),
options,
)
}
Instance::new(module, &imports)
}
/// Returns the [`Store`] that this linker is connected to.
pub fn store(&self) -> &Store {
&self.store
}
/// Returns an iterator over all items defined in this `Linker`.
///
/// The iterator returned will yield 3-tuples where the first two elements
/// are the module name and item name for the external item, and the third
/// item is the item itself that is defined.
///
/// Note that multiple `Extern` items may be defined for the same
/// module/name pair.
pub fn iter(&self) -> impl Iterator<Item = (&str, &str, Extern)> {
self.map.iter().map(move |(key, item)| {
(
&*self.strings[key.module],
&*self.strings[key.name],
item.clone(),
)
})
}
/// Looks up a value in this `Linker` which matches the `import` type
/// provided.
///
/// Returns `None` if no match was found.
pub fn get(&self, import: &ImportType) -> Option<Extern> {
let key = ImportKey {
module: *self.string2idx.get(import.module())?,
name: *self.string2idx.get(import.name())?,
kind: self.import_kind(import.ty()),
};
self.map.get(&key).cloned()
}
/// Returns all items defined for the `module` and `name` pair.
///
/// This may return an empty iterator, but it may also return multiple items
/// if the module/name have been defined twice.
pub fn get_by_name<'a: 'p, 'p>(
&'a self,
module: &'p str,
name: &'p str,
) -> impl Iterator<Item = &'a Extern> + 'p {
self.map
.iter()
.filter(move |(key, _item)| {
&*self.strings[key.module] == module && &*self.strings[key.name] == name
})
.map(|(_, item)| item)
}
/// Returns the single item defined for the `module` and `name` pair.
///
/// Unlike the similar [`Linker::get_by_name`] method this function returns
/// a single `Extern` item. If the `module` and `name` pair isn't defined
/// in this linker then an error is returned. If more than one value exists
/// for the `module` and `name` pairs, then an error is returned as well.
pub fn get_one_by_name(&self, module: &str, name: &str) -> Result<Extern> {
let mut items = self.get_by_name(module, name);
let ret = items
.next()
.ok_or_else(|| anyhow!("no item named `{}` in `{}`", name, module))?;
if items.next().is_some() {
bail!("too many items named `{}` in `{}`", name, module);
}
Ok(ret.clone())
}
}
| {
if !Store::same(&self.store, instance.store()) {
bail!("all linker items must be from the same store");
}
for export in instance.exports() {
self.insert(module_name, export.name(), export.into_extern())?;
}
Ok(self)
} | identifier_body |
latcontrol_pid.py | import numpy as np
from selfdrive.controls.lib.pid import PIController
from selfdrive.controls.lib.drive_helpers import get_steer_max
from cereal import car
from cereal import log
from selfdrive.kegman_conf import kegman_conf
from common.numpy_fast import interp
import common.log as trace1
import common.MoveAvg as moveavg1
from selfdrive.config import Conversions as CV
MAX_SPEED = 255.0
class LatControlPID():
def __init__(self, CP):
self.kegman = kegman_conf(CP)
self.deadzone = float(self.kegman.conf['deadzone'])
self.pid = PIController((CP.lateralTuning.pid.kpBP, CP.lateralTuning.pid.kpV),
(CP.lateralTuning.pid.kiBP, CP.lateralTuning.pid.kiV),
k_f=CP.lateralTuning.pid.kf, pos_limit=1.0, sat_limit=CP.steerLimitTimer)
self.angle_steers_des = 0.
self.mpc_frame = 500
self.BP0 = 4
self.steer_Kf1 = [0.00003,0.00003]
self.steer_Ki1 = [0.02,0.04]
self.steer_Kp1 = [0.18,0.20]
self.steer_Kf2 = [0.00005,0.00005]
self.steer_Ki2 = [0.04,0.05]
self.steer_Kp2 = [0.20,0.25]
self.pid_change_flag = 0
self.pre_pid_change_flag = 0
self.pid_BP0_time = 0
self.movAvg = moveavg1.MoveAvg()
self.v_curvature = 256
self.model_sum = 0
self.path_x = np.arange(192)
def calc_va(self, sm, v_ego ):
md = sm['model']
if len(md.path.poly):
path = list(md.path.poly)
self.l_poly = np.array(md.leftLane.poly)
self.r_poly = np.array(md.rightLane.poly)
self.p_poly = np.array(md.path.poly)
# Curvature of polynomial https://en.wikipedia.org/wiki/Curvature#Curvature_of_the_graph_of_a_function
# y = a x^3 + b x^2 + c x + d, y' = 3 a x^2 + 2 b x + c, y'' = 6 a x + 2 b
# k = y'' / (1 + y'^2)^1.5
# TODO: compute max speed without using a list of points and without numpy
y_p = 3 * path[0] * self.path_x**2 + 2 * path[1] * self.path_x + path[2]
y_pp = 6 * path[0] * self.path_x + 2 * path[1]
curv = y_pp / (1. + y_p**2)**1.5
#print( 'curv={}'.format( curv ) )
a_y_max = 2.975 - v_ego * 0.0375 # ~1.85 @ 75mph, ~2.6 @ 25mph
v_curvature = np.sqrt(a_y_max / np.clip(np.abs(curv), 1e-4, None))
model_speed = np.min(v_curvature)
model_speed = max(30.0 * CV.KPH_TO_MS, model_speed) # Don't slow down below 20mph
model_sum = curv[2] * 1000. #np.sum( curv, 0 )
model_speed = model_speed * CV.MS_TO_KPH
if model_speed > MAX_SPEED:
model_speed = MAX_SPEED
else:
model_speed = MAX_SPEED
model_sum = 0
#following = lead_1.status and lead_1.dRel < 45.0 and lead_1.vLeadK > v_ego and lead_1.aLeadK > 0.0
#following = CS.lead_distance < 100.0
#accel_limits = [float(x) for x in calc_cruise_accel_limits(v_ego, following)]
#jerk_limits = [min(-0.1, accel_limits[0]), max(0.1, accel_limits[1])] # TODO: make a separate lookup for jerk tuning
#accel_limits_turns = limit_accel_in_turns(v_ego, CS.angle_steers, accel_limits, self.steerRatio, self.wheelbase )
model_speed = self.movAvg.get_min( model_speed, 10 )
return model_speed, model_sum
def update_state( self, sm, CS ):
self.v_curvature, self.model_sum = self.calc_va( sm, CS.vEgo )
def reset( self ):
self.pid.reset()
def linear2_tune( self, CP, v_ego ): # angle(조향각에 의한 변화)
cv_angle = abs(self.angle_steers_des)
cv = [ 2, 15 ] # angle
# Kp
fKp1 = [float(self.steer_Kp1[ 0 ]), float(self.steer_Kp1[ 1 ]) ]
fKp2 = [float(self.steer_Kp2[ 0 ]), float(self.steer_Kp2[ 1 ]) ]
self.steerKp1 = interp( cv_angle, cv, fKp1 )
self.steerKp2 = interp( cv_angle, cv, fKp2 )
self.steerKpV = [ float(self.steerKp1), float(self.steerKp2) ]
# Ki
fKi1 = [float(self.steer_Ki1[ 0 ]), float(self.steer_Ki1[ 1 ]) ]
fKi2 = [float(self.steer_Ki2[ 0 ]), float(self.steer_Ki2[ 1 ]) ]
self.steerKi1 = interp( cv_angle, cv, fKi1 )
self.steerKi2 = interp( cv_angle, cv, fKi2 )
self.steerKiV = [ float(self.steerKi1), float(self.steerKi2) ]
# kf
fKf1 = [float(self.steer_Kf1[ 0 ]), float(self.steer_Kf1[ 1 ]) ]
fKf2 = [float(self.steer_Kf2[ 0 ]), float(self.steer_Kf2[ 1 ]) ]
self.steerKf1 = interp( cv_angle, cv, fKf1 )
self.steerKf2 = interp( cv_angle, cv, fKf2 )
xp = CP.lateralTuning.pid.kpBP
fp = [float(self.steerKf1), float(self.steerKf2) ]
self.steerKfV = interp( v_ego, xp, fp )
self.pid.gain( (CP.lateralTuning.pid.kpBP, self.steerKpV), (CP.lateralTuning.pid.kiBP, self.steerKiV), k_f=self.steerKfV )
def linear_tune( self, CP, v_ego ): # 곡률에 의한 변화.
cv_value = self.v_curvature
cv = [ 100, 200 ] # 곡률
# Kp
fKp1 = [float(self.steer_Kp1[ 1 ]), float(self.steer_Kp1[ 0 ]) ]
fKp2 = [float(self.steer_Kp2[ 1 ]), float(self.steer_Kp2[ 0 ]) ]
self.steerKp1 = interp( cv_value, cv, fKp1 )
self.steerKp2 = interp( cv_value, cv, fKp2 )
self.steerKpV = [ float(self.steerKp1), float(self.steerKp2) ]
# Ki
fKi1 = [float(self.steer_Ki1[ 1 ]), float(self.steer_Ki1[ 0 ]) ]
fKi2 = [float(self.steer_Ki2[ 1 ]), float(self.steer_Ki2[ 0 ]) ]
self.steerKi1 = interp( cv_value, cv, fKi1 )
self.steerKi2 = interp( cv_value, cv, fKi2 )
self.steerKiV = [ float(self.steerKi1), float(self.steerKi2) ]
# kf
fKf1 = [float(self.steer_Kf1[ 1 ]), float(self.steer_Kf1[ 0 ]) ]
fKf2 = [float(self.steer_Kf2[ 1 ]), float(self.steer_Kf2[ 0 ]) ]
self.steerKf1 = interp( cv_value, cv, fKf1 )
self.steerKf2 = interp( cv_value, cv, fKf2 )
xp = CP.lateralTuning.pid.kpBP
fp = [float(self.steerKf1), float(self.steerKf2) ]
self.steerKfV = interp( v_ego, xp, fp )
self.pid.gain( (CP.lateralTuning.pid.kpBP, self.steerKpV), (CP.lateralTuning.pid.kiBP, self.steerKiV), k_f=self.steerKfV )
def sR_tune( self, CP, v_ego, path_plan ):
kBP0 = 0
if self.pid_change_flag == 0:
pass
elif abs(path_plan.angleSteers) > self.BP0 or self.v_curvature < 200:
kBP0 = 1
self.pid_change_fla | kBP0 = 1
self.pid_BP0_time -= 1
else:
kBP0 = 0
self.pid_change_flag = 3
self.steerKpV = [ float(self.steer_Kp1[ kBP0 ]), float(self.steer_Kp2[ kBP0 ]) ]
self.steerKiV = [ float(self.steer_Ki1[ kBP0 ]), float(self.steer_Ki2[ kBP0 ]) ]
xp = CP.lateralTuning.pid.kpBP
fp = [float(self.steer_Kf1[ kBP0 ]), float(self.steer_Kf2[ kBP0 ]) ]
self.steerKfV = interp( v_ego, xp, fp )
if self.pid_change_flag != self.pre_pid_change_flag:
self.pre_pid_change_flag = self.pid_change_flag
self.pid.gain( (CP.lateralTuning.pid.kpBP, self.steerKpV), (CP.lateralTuning.pid.kiBP, self.steerKiV) , k_f=self.steerKfV )
#self.pid = PIController((CP.lateralTuning.pid.kpBP, self.steerKpV),
# (CP.lateralTuning.pid.kiBP, self.steerKiV),
# k_f=self.steerKfV, pos_limit=1.0)
def live_tune(self, CP, path_plan, v_ego):
self.mpc_frame += 1
if self.mpc_frame % 600 == 0:
# live tuning through /data/openpilot/tune.py overrides interface.py settings
self.kegman = kegman_conf()
if self.kegman.conf['tuneGernby'] == "1":
self.steerKf = float(self.kegman.conf['Kf'])
self.BP0 = float(self.kegman.conf['sR_BP0'])
self.steer_Kp1 = [ float(self.kegman.conf['Kp']), float(self.kegman.conf['sR_Kp']) ]
self.steer_Ki1 = [ float(self.kegman.conf['Ki']), float(self.kegman.conf['sR_Ki']) ]
self.steer_Kf1 = [ float(self.kegman.conf['Kf']), float(self.kegman.conf['sR_Kf']) ]
self.steer_Kp2 = [ float(self.kegman.conf['Kp2']), float(self.kegman.conf['sR_Kp2']) ]
self.steer_Ki2 = [ float(self.kegman.conf['Ki2']), float(self.kegman.conf['sR_Ki2']) ]
self.steer_Kf2 = [ float(self.kegman.conf['Kf2']), float(self.kegman.conf['sR_Kf2']) ]
self.deadzone = float(self.kegman.conf['deadzone'])
self.mpc_frame = 0
if not self.pid_change_flag:
self.pid_change_flag = 1
self.linear2_tune( CP, v_ego )
#self.linear_tune( CP, v_ego )
#self.sR_tune( CP, v_ego, path_plan )
def update(self, active, v_ego, angle_steers, angle_steers_rate, eps_torque, steer_override, rate_limited, CP, path_plan):
self.angle_steers_des = path_plan.angleSteers
self.live_tune(CP, path_plan, v_ego)
pid_log = log.ControlsState.LateralPIDState.new_message()
pid_log.steerAngle = float(angle_steers)
pid_log.steerRate = float(angle_steers_rate)
if v_ego < 0.3 or not active:
output_steer = 0.0
pid_log.active = False
#self.angle_steers_des = 0.0
self.pid.reset()
#self.angle_steers_des = path_plan.angleSteers
else:
#self.angle_steers_des = path_plan.angleSteers
steers_max = get_steer_max(CP, v_ego)
self.pid.pos_limit = steers_max
self.pid.neg_limit = -steers_max
steer_feedforward = self.angle_steers_des # feedforward desired angle
if CP.steerControlType == car.CarParams.SteerControlType.torque:
# TODO: feedforward something based on path_plan.rateSteers
steer_feedforward -= path_plan.angleOffset # subtract the offset, since it does not contribute to resistive torque
steer_feedforward *= v_ego**2 # proportional to realigning tire momentum (~ lateral accel)
if abs(self.angle_steers_des) > self.BP0:
deadzone = 0
else:
deadzone = self.deadzone
check_saturation = (v_ego > 10) and not rate_limited and not steer_override
output_steer = self.pid.update(self.angle_steers_des, angle_steers, check_saturation=check_saturation, override=steer_override,
feedforward=steer_feedforward, speed=v_ego, deadzone=deadzone)
pid_log.active = True
pid_log.p = self.pid.p
pid_log.i = self.pid.i
pid_log.f = self.pid.f
pid_log.output = output_steer
pid_log.saturated = bool(self.pid.saturated)
return output_steer, float(self.angle_steers_des), pid_log
| g = 2
##
self.pid_BP0_time = 300
elif self.pid_BP0_time:
| conditional_block |
latcontrol_pid.py | import numpy as np
from selfdrive.controls.lib.pid import PIController
from selfdrive.controls.lib.drive_helpers import get_steer_max
from cereal import car
from cereal import log
from selfdrive.kegman_conf import kegman_conf
from common.numpy_fast import interp
import common.log as trace1
import common.MoveAvg as moveavg1
from selfdrive.config import Conversions as CV
MAX_SPEED = 255.0
class LatControlPID():
def __init__(self, CP):
self.kegman = kegman_conf(CP)
self.deadzone = float(self.kegman.conf['deadzone'])
self.pid = PIController((CP.lateralTuning.pid.kpBP, CP.lateralTuning.pid.kpV),
(CP.lateralTuning.pid.kiBP, CP.lateralTuning.pid.kiV),
k_f=CP.lateralTuning.pid.kf, pos_limit=1.0, sat_limit=CP.steerLimitTimer)
self.angle_steers_des = 0.
self.mpc_frame = 500
self.BP0 = 4
self.steer_Kf1 = [0.00003,0.00003]
self.steer_Ki1 = [0.02,0.04]
self.steer_Kp1 = [0.18,0.20]
self.steer_Kf2 = [0.00005,0.00005]
self.steer_Ki2 = [0.04,0.05]
self.steer_Kp2 = [0.20,0.25]
self.pid_change_flag = 0
self.pre_pid_change_flag = 0
self.pid_BP0_time = 0
self.movAvg = moveavg1.MoveAvg()
self.v_curvature = 256
self.model_sum = 0
self.path_x = np.arange(192)
def calc_va(self, sm, v_ego ):
md = sm['model']
if len(md.path.poly):
path = list(md.path.poly)
self.l_poly = np.array(md.leftLane.poly)
self.r_poly = np.array(md.rightLane.poly)
self.p_poly = np.array(md.path.poly)
# Curvature of polynomial https://en.wikipedia.org/wiki/Curvature#Curvature_of_the_graph_of_a_function
# y = a x^3 + b x^2 + c x + d, y' = 3 a x^2 + 2 b x + c, y'' = 6 a x + 2 b
# k = y'' / (1 + y'^2)^1.5
# TODO: compute max speed without using a list of points and without numpy
y_p = 3 * path[0] * self.path_x**2 + 2 * path[1] * self.path_x + path[2]
y_pp = 6 * path[0] * self.path_x + 2 * path[1]
curv = y_pp / (1. + y_p**2)**1.5
#print( 'curv={}'.format( curv ) )
a_y_max = 2.975 - v_ego * 0.0375 # ~1.85 @ 75mph, ~2.6 @ 25mph
v_curvature = np.sqrt(a_y_max / np.clip(np.abs(curv), 1e-4, None))
model_speed = np.min(v_curvature)
model_speed = max(30.0 * CV.KPH_TO_MS, model_speed) # Don't slow down below 20mph
model_sum = curv[2] * 1000. #np.sum( curv, 0 )
model_speed = model_speed * CV.MS_TO_KPH
if model_speed > MAX_SPEED:
model_speed = MAX_SPEED
else:
model_speed = MAX_SPEED
model_sum = 0
#following = lead_1.status and lead_1.dRel < 45.0 and lead_1.vLeadK > v_ego and lead_1.aLeadK > 0.0
#following = CS.lead_distance < 100.0
#accel_limits = [float(x) for x in calc_cruise_accel_limits(v_ego, following)]
#jerk_limits = [min(-0.1, accel_limits[0]), max(0.1, accel_limits[1])] # TODO: make a separate lookup for jerk tuning
#accel_limits_turns = limit_accel_in_turns(v_ego, CS.angle_steers, accel_limits, self.steerRatio, self.wheelbase )
model_speed = self.movAvg.get_min( model_speed, 10 )
return model_speed, model_sum
def update_state( self, sm, CS ):
self.v_curvature, self.model_sum = self.calc_va( sm, CS.vEgo )
def reset( self ):
self.pid.reset()
def linear2_tune( self, CP, v_ego ): # angle(조향각에 의한 변화)
cv_angle = abs(self.angle_steers_des)
cv = [ 2, 15 ] # angle
# Kp
fKp1 = [float(self.steer_Kp1[ 0 ]), float(self.steer_Kp1[ 1 ]) ]
fKp2 = [float(self.steer_Kp2[ 0 ]), float(self.steer_Kp2[ 1 ]) ]
self.steerKp1 = interp( cv_angle, cv, fKp1 )
self.steerKp2 = interp( cv_angle, cv, fKp2 )
self.steerKpV = [ float(self.steerKp1), float(self.steerKp2) ]
# Ki
fKi1 = [float(self.steer_Ki1[ 0 ]), float(self.steer_Ki1[ 1 ]) ]
fKi2 = [float(self.steer_Ki2[ 0 ]), float(self.steer_Ki2[ 1 ]) ]
self.steerKi1 = interp( cv_angle, cv, fKi1 )
self.steerKi2 = interp( cv_angle, cv, fKi2 )
self.steerKiV = [ float(self.steerKi1), float(self.steerKi2) ]
# kf
fKf1 = [float(self.steer_Kf1[ 0 ]), float(self.steer_Kf1[ 1 ]) ]
fKf2 = [float(self.steer_Kf2[ 0 ]), float(self.steer_Kf2[ 1 ]) ]
self.steerKf1 = interp( cv_angle, cv, fKf1 )
self.steerKf2 = interp( cv_angle, cv, fKf2 )
xp = CP.lateralTuning.pid.kpBP
fp = [float(self.steerKf1), float(self.steerKf2) ]
self.steerKfV = interp( v_ego, xp, fp )
self.pid.gain( (CP.lateralTuning.pid.kpBP, self.steerKpV), (CP.lateralTuning.pid.kiBP, self.steerKiV), k_f=self.steerKfV )
def linear_tune( sel | o ): # 곡률에 의한 변화.
cv_value = self.v_curvature
cv = [ 100, 200 ] # 곡률
# Kp
fKp1 = [float(self.steer_Kp1[ 1 ]), float(self.steer_Kp1[ 0 ]) ]
fKp2 = [float(self.steer_Kp2[ 1 ]), float(self.steer_Kp2[ 0 ]) ]
self.steerKp1 = interp( cv_value, cv, fKp1 )
self.steerKp2 = interp( cv_value, cv, fKp2 )
self.steerKpV = [ float(self.steerKp1), float(self.steerKp2) ]
# Ki
fKi1 = [float(self.steer_Ki1[ 1 ]), float(self.steer_Ki1[ 0 ]) ]
fKi2 = [float(self.steer_Ki2[ 1 ]), float(self.steer_Ki2[ 0 ]) ]
self.steerKi1 = interp( cv_value, cv, fKi1 )
self.steerKi2 = interp( cv_value, cv, fKi2 )
self.steerKiV = [ float(self.steerKi1), float(self.steerKi2) ]
# kf
fKf1 = [float(self.steer_Kf1[ 1 ]), float(self.steer_Kf1[ 0 ]) ]
fKf2 = [float(self.steer_Kf2[ 1 ]), float(self.steer_Kf2[ 0 ]) ]
self.steerKf1 = interp( cv_value, cv, fKf1 )
self.steerKf2 = interp( cv_value, cv, fKf2 )
xp = CP.lateralTuning.pid.kpBP
fp = [float(self.steerKf1), float(self.steerKf2) ]
self.steerKfV = interp( v_ego, xp, fp )
self.pid.gain( (CP.lateralTuning.pid.kpBP, self.steerKpV), (CP.lateralTuning.pid.kiBP, self.steerKiV), k_f=self.steerKfV )
def sR_tune( self, CP, v_ego, path_plan ):
kBP0 = 0
if self.pid_change_flag == 0:
pass
elif abs(path_plan.angleSteers) > self.BP0 or self.v_curvature < 200:
kBP0 = 1
self.pid_change_flag = 2
##
self.pid_BP0_time = 300
elif self.pid_BP0_time:
kBP0 = 1
self.pid_BP0_time -= 1
else:
kBP0 = 0
self.pid_change_flag = 3
self.steerKpV = [ float(self.steer_Kp1[ kBP0 ]), float(self.steer_Kp2[ kBP0 ]) ]
self.steerKiV = [ float(self.steer_Ki1[ kBP0 ]), float(self.steer_Ki2[ kBP0 ]) ]
xp = CP.lateralTuning.pid.kpBP
fp = [float(self.steer_Kf1[ kBP0 ]), float(self.steer_Kf2[ kBP0 ]) ]
self.steerKfV = interp( v_ego, xp, fp )
if self.pid_change_flag != self.pre_pid_change_flag:
self.pre_pid_change_flag = self.pid_change_flag
self.pid.gain( (CP.lateralTuning.pid.kpBP, self.steerKpV), (CP.lateralTuning.pid.kiBP, self.steerKiV) , k_f=self.steerKfV )
#self.pid = PIController((CP.lateralTuning.pid.kpBP, self.steerKpV),
# (CP.lateralTuning.pid.kiBP, self.steerKiV),
# k_f=self.steerKfV, pos_limit=1.0)
def live_tune(self, CP, path_plan, v_ego):
self.mpc_frame += 1
if self.mpc_frame % 600 == 0:
# live tuning through /data/openpilot/tune.py overrides interface.py settings
self.kegman = kegman_conf()
if self.kegman.conf['tuneGernby'] == "1":
self.steerKf = float(self.kegman.conf['Kf'])
self.BP0 = float(self.kegman.conf['sR_BP0'])
self.steer_Kp1 = [ float(self.kegman.conf['Kp']), float(self.kegman.conf['sR_Kp']) ]
self.steer_Ki1 = [ float(self.kegman.conf['Ki']), float(self.kegman.conf['sR_Ki']) ]
self.steer_Kf1 = [ float(self.kegman.conf['Kf']), float(self.kegman.conf['sR_Kf']) ]
self.steer_Kp2 = [ float(self.kegman.conf['Kp2']), float(self.kegman.conf['sR_Kp2']) ]
self.steer_Ki2 = [ float(self.kegman.conf['Ki2']), float(self.kegman.conf['sR_Ki2']) ]
self.steer_Kf2 = [ float(self.kegman.conf['Kf2']), float(self.kegman.conf['sR_Kf2']) ]
self.deadzone = float(self.kegman.conf['deadzone'])
self.mpc_frame = 0
if not self.pid_change_flag:
self.pid_change_flag = 1
self.linear2_tune( CP, v_ego )
#self.linear_tune( CP, v_ego )
#self.sR_tune( CP, v_ego, path_plan )
def update(self, active, v_ego, angle_steers, angle_steers_rate, eps_torque, steer_override, rate_limited, CP, path_plan):
self.angle_steers_des = path_plan.angleSteers
self.live_tune(CP, path_plan, v_ego)
pid_log = log.ControlsState.LateralPIDState.new_message()
pid_log.steerAngle = float(angle_steers)
pid_log.steerRate = float(angle_steers_rate)
if v_ego < 0.3 or not active:
output_steer = 0.0
pid_log.active = False
#self.angle_steers_des = 0.0
self.pid.reset()
#self.angle_steers_des = path_plan.angleSteers
else:
#self.angle_steers_des = path_plan.angleSteers
steers_max = get_steer_max(CP, v_ego)
self.pid.pos_limit = steers_max
self.pid.neg_limit = -steers_max
steer_feedforward = self.angle_steers_des # feedforward desired angle
if CP.steerControlType == car.CarParams.SteerControlType.torque:
# TODO: feedforward something based on path_plan.rateSteers
steer_feedforward -= path_plan.angleOffset # subtract the offset, since it does not contribute to resistive torque
steer_feedforward *= v_ego**2 # proportional to realigning tire momentum (~ lateral accel)
if abs(self.angle_steers_des) > self.BP0:
deadzone = 0
else:
deadzone = self.deadzone
check_saturation = (v_ego > 10) and not rate_limited and not steer_override
output_steer = self.pid.update(self.angle_steers_des, angle_steers, check_saturation=check_saturation, override=steer_override,
feedforward=steer_feedforward, speed=v_ego, deadzone=deadzone)
pid_log.active = True
pid_log.p = self.pid.p
pid_log.i = self.pid.i
pid_log.f = self.pid.f
pid_log.output = output_steer
pid_log.saturated = bool(self.pid.saturated)
return output_steer, float(self.angle_steers_des), pid_log
| f, CP, v_eg | identifier_name |
latcontrol_pid.py | import numpy as np
from selfdrive.controls.lib.pid import PIController
from selfdrive.controls.lib.drive_helpers import get_steer_max
from cereal import car
from cereal import log
from selfdrive.kegman_conf import kegman_conf
from common.numpy_fast import interp
import common.log as trace1
import common.MoveAvg as moveavg1
from selfdrive.config import Conversions as CV
MAX_SPEED = 255.0
class LatControlPID():
def __init__(self, CP):
self.kegman = kegman_conf(CP)
self.deadzone = float(self.kegman.conf['deadzone'])
self.pid = PIController((CP.lateralTuning.pid.kpBP, CP.lateralTuning.pid.kpV),
(CP.lateralTuning.pid.kiBP, CP.lateralTuning.pid.kiV),
k_f=CP.lateralTuning.pid.kf, pos_limit=1.0, sat_limit=CP.steerLimitTimer)
self.angle_steers_des = 0.
self.mpc_frame = 500
self.BP0 = 4
self.steer_Kf1 = [0.00003,0.00003]
self.steer_Ki1 = [0.02,0.04]
self.steer_Kp1 = [0.18,0.20]
self.steer_Kf2 = [0.00005,0.00005]
self.steer_Ki2 = [0.04,0.05]
self.steer_Kp2 = [0.20,0.25]
self.pid_change_flag = 0
self.pre_pid_change_flag = 0
self.pid_BP0_time = 0
self.movAvg = moveavg1.MoveAvg()
self.v_curvature = 256
self.model_sum = 0
self.path_x = np.arange(192)
def calc_va(self, sm, v_ego ): | md = sm['model']
if len(md.path.poly):
path = list(md.path.poly)
self.l_poly = np.array(md.leftLane.poly)
self.r_poly = np.array(md.rightLane.poly)
self.p_poly = np.array(md.path.poly)
# Curvature of polynomial https://en.wikipedia.org/wiki/Curvature#Curvature_of_the_graph_of_a_function
# y = a x^3 + b x^2 + c x + d, y' = 3 a x^2 + 2 b x + c, y'' = 6 a x + 2 b
# k = y'' / (1 + y'^2)^1.5
# TODO: compute max speed without using a list of points and without numpy
y_p = 3 * path[0] * self.path_x**2 + 2 * path[1] * self.path_x + path[2]
y_pp = 6 * path[0] * self.path_x + 2 * path[1]
curv = y_pp / (1. + y_p**2)**1.5
#print( 'curv={}'.format( curv ) )
a_y_max = 2.975 - v_ego * 0.0375 # ~1.85 @ 75mph, ~2.6 @ 25mph
v_curvature = np.sqrt(a_y_max / np.clip(np.abs(curv), 1e-4, None))
model_speed = np.min(v_curvature)
model_speed = max(30.0 * CV.KPH_TO_MS, model_speed) # Don't slow down below 20mph
model_sum = curv[2] * 1000. #np.sum( curv, 0 )
model_speed = model_speed * CV.MS_TO_KPH
if model_speed > MAX_SPEED:
model_speed = MAX_SPEED
else:
model_speed = MAX_SPEED
model_sum = 0
#following = lead_1.status and lead_1.dRel < 45.0 and lead_1.vLeadK > v_ego and lead_1.aLeadK > 0.0
#following = CS.lead_distance < 100.0
#accel_limits = [float(x) for x in calc_cruise_accel_limits(v_ego, following)]
#jerk_limits = [min(-0.1, accel_limits[0]), max(0.1, accel_limits[1])] # TODO: make a separate lookup for jerk tuning
#accel_limits_turns = limit_accel_in_turns(v_ego, CS.angle_steers, accel_limits, self.steerRatio, self.wheelbase )
model_speed = self.movAvg.get_min( model_speed, 10 )
return model_speed, model_sum
def update_state( self, sm, CS ):
self.v_curvature, self.model_sum = self.calc_va( sm, CS.vEgo )
def reset( self ):
self.pid.reset()
def linear2_tune( self, CP, v_ego ): # angle(조향각에 의한 변화)
cv_angle = abs(self.angle_steers_des)
cv = [ 2, 15 ] # angle
# Kp
fKp1 = [float(self.steer_Kp1[ 0 ]), float(self.steer_Kp1[ 1 ]) ]
fKp2 = [float(self.steer_Kp2[ 0 ]), float(self.steer_Kp2[ 1 ]) ]
self.steerKp1 = interp( cv_angle, cv, fKp1 )
self.steerKp2 = interp( cv_angle, cv, fKp2 )
self.steerKpV = [ float(self.steerKp1), float(self.steerKp2) ]
# Ki
fKi1 = [float(self.steer_Ki1[ 0 ]), float(self.steer_Ki1[ 1 ]) ]
fKi2 = [float(self.steer_Ki2[ 0 ]), float(self.steer_Ki2[ 1 ]) ]
self.steerKi1 = interp( cv_angle, cv, fKi1 )
self.steerKi2 = interp( cv_angle, cv, fKi2 )
self.steerKiV = [ float(self.steerKi1), float(self.steerKi2) ]
# kf
fKf1 = [float(self.steer_Kf1[ 0 ]), float(self.steer_Kf1[ 1 ]) ]
fKf2 = [float(self.steer_Kf2[ 0 ]), float(self.steer_Kf2[ 1 ]) ]
self.steerKf1 = interp( cv_angle, cv, fKf1 )
self.steerKf2 = interp( cv_angle, cv, fKf2 )
xp = CP.lateralTuning.pid.kpBP
fp = [float(self.steerKf1), float(self.steerKf2) ]
self.steerKfV = interp( v_ego, xp, fp )
self.pid.gain( (CP.lateralTuning.pid.kpBP, self.steerKpV), (CP.lateralTuning.pid.kiBP, self.steerKiV), k_f=self.steerKfV )
def linear_tune( self, CP, v_ego ): # 곡률에 의한 변화.
cv_value = self.v_curvature
cv = [ 100, 200 ] # 곡률
# Kp
fKp1 = [float(self.steer_Kp1[ 1 ]), float(self.steer_Kp1[ 0 ]) ]
fKp2 = [float(self.steer_Kp2[ 1 ]), float(self.steer_Kp2[ 0 ]) ]
self.steerKp1 = interp( cv_value, cv, fKp1 )
self.steerKp2 = interp( cv_value, cv, fKp2 )
self.steerKpV = [ float(self.steerKp1), float(self.steerKp2) ]
# Ki
fKi1 = [float(self.steer_Ki1[ 1 ]), float(self.steer_Ki1[ 0 ]) ]
fKi2 = [float(self.steer_Ki2[ 1 ]), float(self.steer_Ki2[ 0 ]) ]
self.steerKi1 = interp( cv_value, cv, fKi1 )
self.steerKi2 = interp( cv_value, cv, fKi2 )
self.steerKiV = [ float(self.steerKi1), float(self.steerKi2) ]
# kf
fKf1 = [float(self.steer_Kf1[ 1 ]), float(self.steer_Kf1[ 0 ]) ]
fKf2 = [float(self.steer_Kf2[ 1 ]), float(self.steer_Kf2[ 0 ]) ]
self.steerKf1 = interp( cv_value, cv, fKf1 )
self.steerKf2 = interp( cv_value, cv, fKf2 )
xp = CP.lateralTuning.pid.kpBP
fp = [float(self.steerKf1), float(self.steerKf2) ]
self.steerKfV = interp( v_ego, xp, fp )
self.pid.gain( (CP.lateralTuning.pid.kpBP, self.steerKpV), (CP.lateralTuning.pid.kiBP, self.steerKiV), k_f=self.steerKfV )
def sR_tune( self, CP, v_ego, path_plan ):
kBP0 = 0
if self.pid_change_flag == 0:
pass
elif abs(path_plan.angleSteers) > self.BP0 or self.v_curvature < 200:
kBP0 = 1
self.pid_change_flag = 2
##
self.pid_BP0_time = 300
elif self.pid_BP0_time:
kBP0 = 1
self.pid_BP0_time -= 1
else:
kBP0 = 0
self.pid_change_flag = 3
self.steerKpV = [ float(self.steer_Kp1[ kBP0 ]), float(self.steer_Kp2[ kBP0 ]) ]
self.steerKiV = [ float(self.steer_Ki1[ kBP0 ]), float(self.steer_Ki2[ kBP0 ]) ]
xp = CP.lateralTuning.pid.kpBP
fp = [float(self.steer_Kf1[ kBP0 ]), float(self.steer_Kf2[ kBP0 ]) ]
self.steerKfV = interp( v_ego, xp, fp )
if self.pid_change_flag != self.pre_pid_change_flag:
self.pre_pid_change_flag = self.pid_change_flag
self.pid.gain( (CP.lateralTuning.pid.kpBP, self.steerKpV), (CP.lateralTuning.pid.kiBP, self.steerKiV) , k_f=self.steerKfV )
#self.pid = PIController((CP.lateralTuning.pid.kpBP, self.steerKpV),
# (CP.lateralTuning.pid.kiBP, self.steerKiV),
# k_f=self.steerKfV, pos_limit=1.0)
def live_tune(self, CP, path_plan, v_ego):
self.mpc_frame += 1
if self.mpc_frame % 600 == 0:
# live tuning through /data/openpilot/tune.py overrides interface.py settings
self.kegman = kegman_conf()
if self.kegman.conf['tuneGernby'] == "1":
self.steerKf = float(self.kegman.conf['Kf'])
self.BP0 = float(self.kegman.conf['sR_BP0'])
self.steer_Kp1 = [ float(self.kegman.conf['Kp']), float(self.kegman.conf['sR_Kp']) ]
self.steer_Ki1 = [ float(self.kegman.conf['Ki']), float(self.kegman.conf['sR_Ki']) ]
self.steer_Kf1 = [ float(self.kegman.conf['Kf']), float(self.kegman.conf['sR_Kf']) ]
self.steer_Kp2 = [ float(self.kegman.conf['Kp2']), float(self.kegman.conf['sR_Kp2']) ]
self.steer_Ki2 = [ float(self.kegman.conf['Ki2']), float(self.kegman.conf['sR_Ki2']) ]
self.steer_Kf2 = [ float(self.kegman.conf['Kf2']), float(self.kegman.conf['sR_Kf2']) ]
self.deadzone = float(self.kegman.conf['deadzone'])
self.mpc_frame = 0
if not self.pid_change_flag:
self.pid_change_flag = 1
self.linear2_tune( CP, v_ego )
#self.linear_tune( CP, v_ego )
#self.sR_tune( CP, v_ego, path_plan )
def update(self, active, v_ego, angle_steers, angle_steers_rate, eps_torque, steer_override, rate_limited, CP, path_plan):
self.angle_steers_des = path_plan.angleSteers
self.live_tune(CP, path_plan, v_ego)
pid_log = log.ControlsState.LateralPIDState.new_message()
pid_log.steerAngle = float(angle_steers)
pid_log.steerRate = float(angle_steers_rate)
if v_ego < 0.3 or not active:
output_steer = 0.0
pid_log.active = False
#self.angle_steers_des = 0.0
self.pid.reset()
#self.angle_steers_des = path_plan.angleSteers
else:
#self.angle_steers_des = path_plan.angleSteers
steers_max = get_steer_max(CP, v_ego)
self.pid.pos_limit = steers_max
self.pid.neg_limit = -steers_max
steer_feedforward = self.angle_steers_des # feedforward desired angle
if CP.steerControlType == car.CarParams.SteerControlType.torque:
# TODO: feedforward something based on path_plan.rateSteers
steer_feedforward -= path_plan.angleOffset # subtract the offset, since it does not contribute to resistive torque
steer_feedforward *= v_ego**2 # proportional to realigning tire momentum (~ lateral accel)
if abs(self.angle_steers_des) > self.BP0:
deadzone = 0
else:
deadzone = self.deadzone
check_saturation = (v_ego > 10) and not rate_limited and not steer_override
output_steer = self.pid.update(self.angle_steers_des, angle_steers, check_saturation=check_saturation, override=steer_override,
feedforward=steer_feedforward, speed=v_ego, deadzone=deadzone)
pid_log.active = True
pid_log.p = self.pid.p
pid_log.i = self.pid.i
pid_log.f = self.pid.f
pid_log.output = output_steer
pid_log.saturated = bool(self.pid.saturated)
return output_steer, float(self.angle_steers_des), pid_log | random_line_split | |
latcontrol_pid.py | import numpy as np
from selfdrive.controls.lib.pid import PIController
from selfdrive.controls.lib.drive_helpers import get_steer_max
from cereal import car
from cereal import log
from selfdrive.kegman_conf import kegman_conf
from common.numpy_fast import interp
import common.log as trace1
import common.MoveAvg as moveavg1
from selfdrive.config import Conversions as CV
MAX_SPEED = 255.0
class LatControlPID():
def __init__(self, CP):
self.kegman = kegman_conf(CP)
self.deadzone = float(self.kegman.conf['deadzone'])
self.pid = PIController((CP.lateralTuning.pid.kpBP, CP.lateralTuning.pid.kpV),
(CP.lateralTuning.pid.kiBP, CP.lateralTuning.pid.kiV),
k_f=CP.lateralTuning.pid.kf, pos_limit=1.0, sat_limit=CP.steerLimitTimer)
self.angle_steers_des = 0.
self.mpc_frame = 500
self.BP0 = 4
self.steer_Kf1 = [0.00003,0.00003]
self.steer_Ki1 = [0.02,0.04]
self.steer_Kp1 = [0.18,0.20]
self.steer_Kf2 = [0.00005,0.00005]
self.steer_Ki2 = [0.04,0.05]
self.steer_Kp2 = [0.20,0.25]
self.pid_change_flag = 0
self.pre_pid_change_flag = 0
self.pid_BP0_time = 0
self.movAvg = moveavg1.MoveAvg()
self.v_curvature = 256
self.model_sum = 0
self.path_x = np.arange(192)
def calc_va(self, sm, v_ego ):
md = sm['model']
if len(md.path.poly):
path = list(md.path.poly)
self.l_poly = np.array(md.leftLane.poly)
self.r_poly = np.array(md.rightLane.poly)
self.p_poly = np.array(md.path.poly)
# Curvature of polynomial https://en.wikipedia.org/wiki/Curvature#Curvature_of_the_graph_of_a_function
# y = a x^3 + b x^2 + c x + d, y' = 3 a x^2 + 2 b x + c, y'' = 6 a x + 2 b
# k = y'' / (1 + y'^2)^1.5
# TODO: compute max speed without using a list of points and without numpy
y_p = 3 * path[0] * self.path_x**2 + 2 * path[1] * self.path_x + path[2]
y_pp = 6 * path[0] * self.path_x + 2 * path[1]
curv = y_pp / (1. + y_p**2)**1.5
#print( 'curv={}'.format( curv ) )
a_y_max = 2.975 - v_ego * 0.0375 # ~1.85 @ 75mph, ~2.6 @ 25mph
v_curvature = np.sqrt(a_y_max / np.clip(np.abs(curv), 1e-4, None))
model_speed = np.min(v_curvature)
model_speed = max(30.0 * CV.KPH_TO_MS, model_speed) # Don't slow down below 20mph
model_sum = curv[2] * 1000. #np.sum( curv, 0 )
model_speed = model_speed * CV.MS_TO_KPH
if model_speed > MAX_SPEED:
model_speed = MAX_SPEED
else:
model_speed = MAX_SPEED
model_sum = 0
#following = lead_1.status and lead_1.dRel < 45.0 and lead_1.vLeadK > v_ego and lead_1.aLeadK > 0.0
#following = CS.lead_distance < 100.0
#accel_limits = [float(x) for x in calc_cruise_accel_limits(v_ego, following)]
#jerk_limits = [min(-0.1, accel_limits[0]), max(0.1, accel_limits[1])] # TODO: make a separate lookup for jerk tuning
#accel_limits_turns = limit_accel_in_turns(v_ego, CS.angle_steers, accel_limits, self.steerRatio, self.wheelbase )
model_speed = self.movAvg.get_min( model_speed, 10 )
return model_speed, model_sum
def update_state( self, sm, CS ):
|
def reset( self ):
self.pid.reset()
def linear2_tune( self, CP, v_ego ): # angle(조향각에 의한 변화)
cv_angle = abs(self.angle_steers_des)
cv = [ 2, 15 ] # angle
# Kp
fKp1 = [float(self.steer_Kp1[ 0 ]), float(self.steer_Kp1[ 1 ]) ]
fKp2 = [float(self.steer_Kp2[ 0 ]), float(self.steer_Kp2[ 1 ]) ]
self.steerKp1 = interp( cv_angle, cv, fKp1 )
self.steerKp2 = interp( cv_angle, cv, fKp2 )
self.steerKpV = [ float(self.steerKp1), float(self.steerKp2) ]
# Ki
fKi1 = [float(self.steer_Ki1[ 0 ]), float(self.steer_Ki1[ 1 ]) ]
fKi2 = [float(self.steer_Ki2[ 0 ]), float(self.steer_Ki2[ 1 ]) ]
self.steerKi1 = interp( cv_angle, cv, fKi1 )
self.steerKi2 = interp( cv_angle, cv, fKi2 )
self.steerKiV = [ float(self.steerKi1), float(self.steerKi2) ]
# kf
fKf1 = [float(self.steer_Kf1[ 0 ]), float(self.steer_Kf1[ 1 ]) ]
fKf2 = [float(self.steer_Kf2[ 0 ]), float(self.steer_Kf2[ 1 ]) ]
self.steerKf1 = interp( cv_angle, cv, fKf1 )
self.steerKf2 = interp( cv_angle, cv, fKf2 )
xp = CP.lateralTuning.pid.kpBP
fp = [float(self.steerKf1), float(self.steerKf2) ]
self.steerKfV = interp( v_ego, xp, fp )
self.pid.gain( (CP.lateralTuning.pid.kpBP, self.steerKpV), (CP.lateralTuning.pid.kiBP, self.steerKiV), k_f=self.steerKfV )
def linear_tune( self, CP, v_ego ): # 곡률에 의한 변화.
cv_value = self.v_curvature
cv = [ 100, 200 ] # 곡률
# Kp
fKp1 = [float(self.steer_Kp1[ 1 ]), float(self.steer_Kp1[ 0 ]) ]
fKp2 = [float(self.steer_Kp2[ 1 ]), float(self.steer_Kp2[ 0 ]) ]
self.steerKp1 = interp( cv_value, cv, fKp1 )
self.steerKp2 = interp( cv_value, cv, fKp2 )
self.steerKpV = [ float(self.steerKp1), float(self.steerKp2) ]
# Ki
fKi1 = [float(self.steer_Ki1[ 1 ]), float(self.steer_Ki1[ 0 ]) ]
fKi2 = [float(self.steer_Ki2[ 1 ]), float(self.steer_Ki2[ 0 ]) ]
self.steerKi1 = interp( cv_value, cv, fKi1 )
self.steerKi2 = interp( cv_value, cv, fKi2 )
self.steerKiV = [ float(self.steerKi1), float(self.steerKi2) ]
# kf
fKf1 = [float(self.steer_Kf1[ 1 ]), float(self.steer_Kf1[ 0 ]) ]
fKf2 = [float(self.steer_Kf2[ 1 ]), float(self.steer_Kf2[ 0 ]) ]
self.steerKf1 = interp( cv_value, cv, fKf1 )
self.steerKf2 = interp( cv_value, cv, fKf2 )
xp = CP.lateralTuning.pid.kpBP
fp = [float(self.steerKf1), float(self.steerKf2) ]
self.steerKfV = interp( v_ego, xp, fp )
self.pid.gain( (CP.lateralTuning.pid.kpBP, self.steerKpV), (CP.lateralTuning.pid.kiBP, self.steerKiV), k_f=self.steerKfV )
def sR_tune( self, CP, v_ego, path_plan ):
kBP0 = 0
if self.pid_change_flag == 0:
pass
elif abs(path_plan.angleSteers) > self.BP0 or self.v_curvature < 200:
kBP0 = 1
self.pid_change_flag = 2
##
self.pid_BP0_time = 300
elif self.pid_BP0_time:
kBP0 = 1
self.pid_BP0_time -= 1
else:
kBP0 = 0
self.pid_change_flag = 3
self.steerKpV = [ float(self.steer_Kp1[ kBP0 ]), float(self.steer_Kp2[ kBP0 ]) ]
self.steerKiV = [ float(self.steer_Ki1[ kBP0 ]), float(self.steer_Ki2[ kBP0 ]) ]
xp = CP.lateralTuning.pid.kpBP
fp = [float(self.steer_Kf1[ kBP0 ]), float(self.steer_Kf2[ kBP0 ]) ]
self.steerKfV = interp( v_ego, xp, fp )
if self.pid_change_flag != self.pre_pid_change_flag:
self.pre_pid_change_flag = self.pid_change_flag
self.pid.gain( (CP.lateralTuning.pid.kpBP, self.steerKpV), (CP.lateralTuning.pid.kiBP, self.steerKiV) , k_f=self.steerKfV )
#self.pid = PIController((CP.lateralTuning.pid.kpBP, self.steerKpV),
# (CP.lateralTuning.pid.kiBP, self.steerKiV),
# k_f=self.steerKfV, pos_limit=1.0)
def live_tune(self, CP, path_plan, v_ego):
self.mpc_frame += 1
if self.mpc_frame % 600 == 0:
# live tuning through /data/openpilot/tune.py overrides interface.py settings
self.kegman = kegman_conf()
if self.kegman.conf['tuneGernby'] == "1":
self.steerKf = float(self.kegman.conf['Kf'])
self.BP0 = float(self.kegman.conf['sR_BP0'])
self.steer_Kp1 = [ float(self.kegman.conf['Kp']), float(self.kegman.conf['sR_Kp']) ]
self.steer_Ki1 = [ float(self.kegman.conf['Ki']), float(self.kegman.conf['sR_Ki']) ]
self.steer_Kf1 = [ float(self.kegman.conf['Kf']), float(self.kegman.conf['sR_Kf']) ]
self.steer_Kp2 = [ float(self.kegman.conf['Kp2']), float(self.kegman.conf['sR_Kp2']) ]
self.steer_Ki2 = [ float(self.kegman.conf['Ki2']), float(self.kegman.conf['sR_Ki2']) ]
self.steer_Kf2 = [ float(self.kegman.conf['Kf2']), float(self.kegman.conf['sR_Kf2']) ]
self.deadzone = float(self.kegman.conf['deadzone'])
self.mpc_frame = 0
if not self.pid_change_flag:
self.pid_change_flag = 1
self.linear2_tune( CP, v_ego )
#self.linear_tune( CP, v_ego )
#self.sR_tune( CP, v_ego, path_plan )
def update(self, active, v_ego, angle_steers, angle_steers_rate, eps_torque, steer_override, rate_limited, CP, path_plan):
self.angle_steers_des = path_plan.angleSteers
self.live_tune(CP, path_plan, v_ego)
pid_log = log.ControlsState.LateralPIDState.new_message()
pid_log.steerAngle = float(angle_steers)
pid_log.steerRate = float(angle_steers_rate)
if v_ego < 0.3 or not active:
output_steer = 0.0
pid_log.active = False
#self.angle_steers_des = 0.0
self.pid.reset()
#self.angle_steers_des = path_plan.angleSteers
else:
#self.angle_steers_des = path_plan.angleSteers
steers_max = get_steer_max(CP, v_ego)
self.pid.pos_limit = steers_max
self.pid.neg_limit = -steers_max
steer_feedforward = self.angle_steers_des # feedforward desired angle
if CP.steerControlType == car.CarParams.SteerControlType.torque:
# TODO: feedforward something based on path_plan.rateSteers
steer_feedforward -= path_plan.angleOffset # subtract the offset, since it does not contribute to resistive torque
steer_feedforward *= v_ego**2 # proportional to realigning tire momentum (~ lateral accel)
if abs(self.angle_steers_des) > self.BP0:
deadzone = 0
else:
deadzone = self.deadzone
check_saturation = (v_ego > 10) and not rate_limited and not steer_override
output_steer = self.pid.update(self.angle_steers_des, angle_steers, check_saturation=check_saturation, override=steer_override,
feedforward=steer_feedforward, speed=v_ego, deadzone=deadzone)
pid_log.active = True
pid_log.p = self.pid.p
pid_log.i = self.pid.i
pid_log.f = self.pid.f
pid_log.output = output_steer
pid_log.saturated = bool(self.pid.saturated)
return output_steer, float(self.angle_steers_des), pid_log
| self.v_curvature, self.model_sum = self.calc_va( sm, CS.vEgo ) | identifier_body |
manifest.go | // Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package registry
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"sort"
"strconv"
"strings"
"sync"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/types"
)
type catalog struct {
Repos []string `json:"repositories"`
}
type listTags struct {
Name string `json:"name"`
Tags []string `json:"tags"`
}
type manifest struct {
contentType string
blob []byte
}
type manifests struct {
// maps repo -> manifest tag/digest -> manifest
manifests map[string]map[string]manifest
lock sync.Mutex
log *log.Logger
}
func isManifest(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 4 {
return false
}
return elems[len(elems)-2] == "manifests"
}
func isTags(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 4 {
return false
}
return elems[len(elems)-2] == "tags"
}
func isCatalog(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 2 {
return false
}
return elems[len(elems)-1] == "_catalog"
}
// Returns whether this url should be handled by the referrers handler
func isReferrers(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 4 {
return false
}
return elems[len(elems)-2] == "referrers"
}
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pulling-an-image-manifest
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pushing-an-image
func (m *manifests) handle(resp http.ResponseWriter, req *http.Request) *regError {
elem := strings.Split(req.URL.Path, "/")
elem = elem[1:]
target := elem[len(elem)-1]
repo := strings.Join(elem[1:len(elem)-2], "/")
switch req.Method {
case http.MethodGet:
m.lock.Lock()
defer m.lock.Unlock()
c, ok := m.manifests[repo]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
m, ok := c[target]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: "Unknown manifest",
}
}
h, _, _ := v1.SHA256(bytes.NewReader(m.blob))
resp.Header().Set("Docker-Content-Digest", h.String())
resp.Header().Set("Content-Type", m.contentType)
resp.Header().Set("Content-Length", fmt.Sprint(len(m.blob)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader(m.blob))
return nil
case http.MethodHead:
m.lock.Lock()
defer m.lock.Unlock()
if _, ok := m.manifests[repo]; !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
m, ok := m.manifests[repo][target]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: "Unknown manifest",
}
}
h, _, _ := v1.SHA256(bytes.NewReader(m.blob))
resp.Header().Set("Docker-Content-Digest", h.String())
resp.Header().Set("Content-Type", m.contentType)
resp.Header().Set("Content-Length", fmt.Sprint(len(m.blob)))
resp.WriteHeader(http.StatusOK)
return nil
case http.MethodPut:
m.lock.Lock()
defer m.lock.Unlock()
if _, ok := m.manifests[repo]; !ok {
m.manifests[repo] = map[string]manifest{}
}
b := &bytes.Buffer{}
io.Copy(b, req.Body)
h, _, _ := v1.SHA256(bytes.NewReader(b.Bytes()))
digest := h.String()
mf := manifest{
blob: b.Bytes(),
contentType: req.Header.Get("Content-Type"),
}
// If the manifest is a manifest list, check that the manifest
// list's constituent manifests are already uploaded.
// This isn't strictly required by the registry API, but some
// registries require this.
if types.MediaType(mf.contentType).IsIndex() {
im, err := v1.ParseIndexManifest(b)
if err != nil {
return ®Error{
Status: http.StatusBadRequest,
Code: "MANIFEST_INVALID",
Message: err.Error(),
}
}
for _, desc := range im.Manifests {
if !desc.MediaType.IsDistributable() {
continue
}
if desc.MediaType.IsIndex() || desc.MediaType.IsImage() {
if _, found := m.manifests[repo][desc.Digest.String()]; !found {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: fmt.Sprintf("Sub-manifest %q not found", desc.Digest),
}
}
} else {
// TODO: Probably want to do an existence check for blobs.
m.log.Printf("TODO: Check blobs for %q", desc.Digest)
}
}
}
// Allow future references by target (tag) and immutable digest.
// See https://docs.docker.com/engine/reference/commandline/pull/#pull-an-image-by-digest-immutable-identifier.
m.manifests[repo][target] = mf
m.manifests[repo][digest] = mf
resp.Header().Set("Docker-Content-Digest", digest)
resp.WriteHeader(http.StatusCreated)
return nil
case http.MethodDelete:
m.lock.Lock()
defer m.lock.Unlock()
if _, ok := m.manifests[repo]; !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
_, ok := m.manifests[repo][target]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: "Unknown manifest",
}
}
delete(m.manifests[repo], target)
resp.WriteHeader(http.StatusAccepted)
return nil
default:
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
}
func (m *manifests) handleTags(resp http.ResponseWriter, req *http.Request) *regError {
elem := strings.Split(req.URL.Path, "/")
elem = elem[1:]
repo := strings.Join(elem[1:len(elem)-2], "/")
if req.Method == "GET" {
m.lock.Lock()
defer m.lock.Unlock()
c, ok := m.manifests[repo]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
var tags []string
for tag := range c |
sort.Strings(tags)
// https://github.com/opencontainers/distribution-spec/blob/b505e9cc53ec499edbd9c1be32298388921bb705/detail.md#tags-paginated
// Offset using last query parameter.
if last := req.URL.Query().Get("last"); last != "" {
for i, t := range tags {
if t > last {
tags = tags[i:]
break
}
}
}
// Limit using n query parameter.
if ns := req.URL.Query().Get("n"); ns != "" {
if n, err := strconv.Atoi(ns); err != nil {
return ®Error{
Status: http.StatusBadRequest,
Code: "BAD_REQUEST",
Message: fmt.Sprintf("parsing n: %v", err),
}
} else if n < len(tags) {
tags = tags[:n]
}
}
tagsToList := listTags{
Name: repo,
Tags: tags,
}
msg, _ := json.Marshal(tagsToList)
resp.Header().Set("Content-Length", fmt.Sprint(len(msg)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader([]byte(msg)))
return nil
}
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
func (m *manifests) handleCatalog(resp http.ResponseWriter, req *http.Request) *regError {
query := req.URL.Query()
nStr := query.Get("n")
n := 10000
if nStr != "" {
n, _ = strconv.Atoi(nStr)
}
if req.Method == "GET" {
m.lock.Lock()
defer m.lock.Unlock()
var repos []string
countRepos := 0
// TODO: implement pagination
for key := range m.manifests {
if countRepos >= n {
break
}
countRepos++
repos = append(repos, key)
}
repositoriesToList := catalog{
Repos: repos,
}
msg, _ := json.Marshal(repositoriesToList)
resp.Header().Set("Content-Length", fmt.Sprint(len(msg)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader([]byte(msg)))
return nil
}
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
// TODO: implement handling of artifactType querystring
func (m *manifests) handleReferrers(resp http.ResponseWriter, req *http.Request) *regError {
// Ensure this is a GET request
if req.Method != "GET" {
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
elem := strings.Split(req.URL.Path, "/")
elem = elem[1:]
target := elem[len(elem)-1]
repo := strings.Join(elem[1:len(elem)-2], "/")
// Validate that incoming target is a valid digest
if _, err := v1.NewHash(target); err != nil {
return ®Error{
Status: http.StatusBadRequest,
Code: "UNSUPPORTED",
Message: "Target must be a valid digest",
}
}
m.lock.Lock()
defer m.lock.Unlock()
digestToManifestMap, repoExists := m.manifests[repo]
if !repoExists {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
im := v1.IndexManifest{
SchemaVersion: 2,
MediaType: types.OCIImageIndex,
Manifests: []v1.Descriptor{},
}
for digest, manifest := range digestToManifestMap {
h, err := v1.NewHash(digest)
if err != nil {
continue
}
var refPointer struct {
Subject *v1.Descriptor `json:"subject"`
}
json.Unmarshal(manifest.blob, &refPointer)
if refPointer.Subject == nil {
continue
}
referenceDigest := refPointer.Subject.Digest
if referenceDigest.String() != target {
continue
}
// At this point, we know the current digest references the target
var imageAsArtifact struct {
Config struct {
MediaType string `json:"mediaType"`
} `json:"config"`
}
json.Unmarshal(manifest.blob, &imageAsArtifact)
im.Manifests = append(im.Manifests, v1.Descriptor{
MediaType: types.MediaType(manifest.contentType),
Size: int64(len(manifest.blob)),
Digest: h,
ArtifactType: imageAsArtifact.Config.MediaType,
})
}
msg, _ := json.Marshal(&im)
resp.Header().Set("Content-Length", fmt.Sprint(len(msg)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader([]byte(msg)))
return nil
}
| {
if !strings.Contains(tag, "sha256:") {
tags = append(tags, tag)
}
} | conditional_block |
manifest.go | // Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package registry
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"sort"
"strconv"
"strings"
"sync"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/types"
)
type catalog struct {
Repos []string `json:"repositories"`
}
type listTags struct {
Name string `json:"name"`
Tags []string `json:"tags"`
}
type manifest struct {
contentType string
blob []byte
}
type manifests struct {
// maps repo -> manifest tag/digest -> manifest
manifests map[string]map[string]manifest
lock sync.Mutex
log *log.Logger
}
func isManifest(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 4 {
return false
}
return elems[len(elems)-2] == "manifests"
}
func isTags(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 4 {
return false
}
return elems[len(elems)-2] == "tags"
}
func isCatalog(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 2 {
return false
}
return elems[len(elems)-1] == "_catalog"
}
// Returns whether this url should be handled by the referrers handler
func isReferrers(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 4 {
return false
}
return elems[len(elems)-2] == "referrers"
}
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pulling-an-image-manifest
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pushing-an-image
func (m *manifests) handle(resp http.ResponseWriter, req *http.Request) *regError {
elem := strings.Split(req.URL.Path, "/")
elem = elem[1:]
target := elem[len(elem)-1]
repo := strings.Join(elem[1:len(elem)-2], "/")
switch req.Method {
case http.MethodGet:
m.lock.Lock()
defer m.lock.Unlock()
c, ok := m.manifests[repo]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
m, ok := c[target]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: "Unknown manifest",
}
}
h, _, _ := v1.SHA256(bytes.NewReader(m.blob))
resp.Header().Set("Docker-Content-Digest", h.String())
resp.Header().Set("Content-Type", m.contentType)
resp.Header().Set("Content-Length", fmt.Sprint(len(m.blob)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader(m.blob))
return nil
case http.MethodHead:
m.lock.Lock()
defer m.lock.Unlock()
if _, ok := m.manifests[repo]; !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
m, ok := m.manifests[repo][target]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: "Unknown manifest",
}
}
h, _, _ := v1.SHA256(bytes.NewReader(m.blob))
resp.Header().Set("Docker-Content-Digest", h.String())
resp.Header().Set("Content-Type", m.contentType)
resp.Header().Set("Content-Length", fmt.Sprint(len(m.blob)))
resp.WriteHeader(http.StatusOK)
return nil
case http.MethodPut:
m.lock.Lock()
defer m.lock.Unlock()
if _, ok := m.manifests[repo]; !ok {
m.manifests[repo] = map[string]manifest{}
}
b := &bytes.Buffer{}
io.Copy(b, req.Body)
h, _, _ := v1.SHA256(bytes.NewReader(b.Bytes()))
digest := h.String()
mf := manifest{
blob: b.Bytes(),
contentType: req.Header.Get("Content-Type"),
}
// If the manifest is a manifest list, check that the manifest
// list's constituent manifests are already uploaded.
// This isn't strictly required by the registry API, but some
// registries require this.
if types.MediaType(mf.contentType).IsIndex() {
im, err := v1.ParseIndexManifest(b)
if err != nil {
return ®Error{
Status: http.StatusBadRequest,
Code: "MANIFEST_INVALID",
Message: err.Error(),
}
}
for _, desc := range im.Manifests {
if !desc.MediaType.IsDistributable() {
continue
}
if desc.MediaType.IsIndex() || desc.MediaType.IsImage() {
if _, found := m.manifests[repo][desc.Digest.String()]; !found {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: fmt.Sprintf("Sub-manifest %q not found", desc.Digest),
}
}
} else {
// TODO: Probably want to do an existence check for blobs.
m.log.Printf("TODO: Check blobs for %q", desc.Digest)
}
}
}
// Allow future references by target (tag) and immutable digest.
// See https://docs.docker.com/engine/reference/commandline/pull/#pull-an-image-by-digest-immutable-identifier.
m.manifests[repo][target] = mf
m.manifests[repo][digest] = mf
resp.Header().Set("Docker-Content-Digest", digest)
resp.WriteHeader(http.StatusCreated)
return nil
case http.MethodDelete:
m.lock.Lock()
defer m.lock.Unlock()
if _, ok := m.manifests[repo]; !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
_, ok := m.manifests[repo][target]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: "Unknown manifest",
}
}
delete(m.manifests[repo], target)
resp.WriteHeader(http.StatusAccepted)
return nil
default:
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
}
func (m *manifests) handleTags(resp http.ResponseWriter, req *http.Request) *regError {
elem := strings.Split(req.URL.Path, "/")
elem = elem[1:]
repo := strings.Join(elem[1:len(elem)-2], "/")
if req.Method == "GET" {
m.lock.Lock()
defer m.lock.Unlock()
c, ok := m.manifests[repo]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
var tags []string
for tag := range c {
if !strings.Contains(tag, "sha256:") {
tags = append(tags, tag)
}
}
sort.Strings(tags)
// https://github.com/opencontainers/distribution-spec/blob/b505e9cc53ec499edbd9c1be32298388921bb705/detail.md#tags-paginated
// Offset using last query parameter.
if last := req.URL.Query().Get("last"); last != "" {
for i, t := range tags {
if t > last {
tags = tags[i:]
break
}
}
}
// Limit using n query parameter.
if ns := req.URL.Query().Get("n"); ns != "" {
if n, err := strconv.Atoi(ns); err != nil {
return ®Error{
Status: http.StatusBadRequest,
Code: "BAD_REQUEST",
Message: fmt.Sprintf("parsing n: %v", err),
}
} else if n < len(tags) {
tags = tags[:n]
}
}
tagsToList := listTags{
Name: repo,
Tags: tags,
}
msg, _ := json.Marshal(tagsToList)
resp.Header().Set("Content-Length", fmt.Sprint(len(msg)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader([]byte(msg)))
return nil
}
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
func (m *manifests) handleCatalog(resp http.ResponseWriter, req *http.Request) *regError {
query := req.URL.Query()
nStr := query.Get("n")
n := 10000
if nStr != "" {
n, _ = strconv.Atoi(nStr)
}
if req.Method == "GET" {
m.lock.Lock()
defer m.lock.Unlock()
var repos []string
countRepos := 0
// TODO: implement pagination
for key := range m.manifests {
if countRepos >= n {
break
}
countRepos++
repos = append(repos, key)
}
repositoriesToList := catalog{
Repos: repos,
}
msg, _ := json.Marshal(repositoriesToList)
resp.Header().Set("Content-Length", fmt.Sprint(len(msg)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader([]byte(msg)))
return nil
}
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
// TODO: implement handling of artifactType querystring
func (m *manifests) | (resp http.ResponseWriter, req *http.Request) *regError {
// Ensure this is a GET request
if req.Method != "GET" {
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
elem := strings.Split(req.URL.Path, "/")
elem = elem[1:]
target := elem[len(elem)-1]
repo := strings.Join(elem[1:len(elem)-2], "/")
// Validate that incoming target is a valid digest
if _, err := v1.NewHash(target); err != nil {
return ®Error{
Status: http.StatusBadRequest,
Code: "UNSUPPORTED",
Message: "Target must be a valid digest",
}
}
m.lock.Lock()
defer m.lock.Unlock()
digestToManifestMap, repoExists := m.manifests[repo]
if !repoExists {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
im := v1.IndexManifest{
SchemaVersion: 2,
MediaType: types.OCIImageIndex,
Manifests: []v1.Descriptor{},
}
for digest, manifest := range digestToManifestMap {
h, err := v1.NewHash(digest)
if err != nil {
continue
}
var refPointer struct {
Subject *v1.Descriptor `json:"subject"`
}
json.Unmarshal(manifest.blob, &refPointer)
if refPointer.Subject == nil {
continue
}
referenceDigest := refPointer.Subject.Digest
if referenceDigest.String() != target {
continue
}
// At this point, we know the current digest references the target
var imageAsArtifact struct {
Config struct {
MediaType string `json:"mediaType"`
} `json:"config"`
}
json.Unmarshal(manifest.blob, &imageAsArtifact)
im.Manifests = append(im.Manifests, v1.Descriptor{
MediaType: types.MediaType(manifest.contentType),
Size: int64(len(manifest.blob)),
Digest: h,
ArtifactType: imageAsArtifact.Config.MediaType,
})
}
msg, _ := json.Marshal(&im)
resp.Header().Set("Content-Length", fmt.Sprint(len(msg)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader([]byte(msg)))
return nil
}
| handleReferrers | identifier_name |
manifest.go | // Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package registry
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"sort"
"strconv"
"strings"
"sync"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/types"
)
type catalog struct {
Repos []string `json:"repositories"`
}
type listTags struct {
Name string `json:"name"`
Tags []string `json:"tags"`
}
type manifest struct {
contentType string
blob []byte
}
type manifests struct {
// maps repo -> manifest tag/digest -> manifest
manifests map[string]map[string]manifest
lock sync.Mutex
log *log.Logger
}
func isManifest(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 4 {
return false
}
return elems[len(elems)-2] == "manifests"
}
func isTags(req *http.Request) bool |
func isCatalog(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 2 {
return false
}
return elems[len(elems)-1] == "_catalog"
}
// Returns whether this url should be handled by the referrers handler
func isReferrers(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 4 {
return false
}
return elems[len(elems)-2] == "referrers"
}
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pulling-an-image-manifest
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pushing-an-image
func (m *manifests) handle(resp http.ResponseWriter, req *http.Request) *regError {
elem := strings.Split(req.URL.Path, "/")
elem = elem[1:]
target := elem[len(elem)-1]
repo := strings.Join(elem[1:len(elem)-2], "/")
switch req.Method {
case http.MethodGet:
m.lock.Lock()
defer m.lock.Unlock()
c, ok := m.manifests[repo]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
m, ok := c[target]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: "Unknown manifest",
}
}
h, _, _ := v1.SHA256(bytes.NewReader(m.blob))
resp.Header().Set("Docker-Content-Digest", h.String())
resp.Header().Set("Content-Type", m.contentType)
resp.Header().Set("Content-Length", fmt.Sprint(len(m.blob)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader(m.blob))
return nil
case http.MethodHead:
m.lock.Lock()
defer m.lock.Unlock()
if _, ok := m.manifests[repo]; !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
m, ok := m.manifests[repo][target]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: "Unknown manifest",
}
}
h, _, _ := v1.SHA256(bytes.NewReader(m.blob))
resp.Header().Set("Docker-Content-Digest", h.String())
resp.Header().Set("Content-Type", m.contentType)
resp.Header().Set("Content-Length", fmt.Sprint(len(m.blob)))
resp.WriteHeader(http.StatusOK)
return nil
case http.MethodPut:
m.lock.Lock()
defer m.lock.Unlock()
if _, ok := m.manifests[repo]; !ok {
m.manifests[repo] = map[string]manifest{}
}
b := &bytes.Buffer{}
io.Copy(b, req.Body)
h, _, _ := v1.SHA256(bytes.NewReader(b.Bytes()))
digest := h.String()
mf := manifest{
blob: b.Bytes(),
contentType: req.Header.Get("Content-Type"),
}
// If the manifest is a manifest list, check that the manifest
// list's constituent manifests are already uploaded.
// This isn't strictly required by the registry API, but some
// registries require this.
if types.MediaType(mf.contentType).IsIndex() {
im, err := v1.ParseIndexManifest(b)
if err != nil {
return ®Error{
Status: http.StatusBadRequest,
Code: "MANIFEST_INVALID",
Message: err.Error(),
}
}
for _, desc := range im.Manifests {
if !desc.MediaType.IsDistributable() {
continue
}
if desc.MediaType.IsIndex() || desc.MediaType.IsImage() {
if _, found := m.manifests[repo][desc.Digest.String()]; !found {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: fmt.Sprintf("Sub-manifest %q not found", desc.Digest),
}
}
} else {
// TODO: Probably want to do an existence check for blobs.
m.log.Printf("TODO: Check blobs for %q", desc.Digest)
}
}
}
// Allow future references by target (tag) and immutable digest.
// See https://docs.docker.com/engine/reference/commandline/pull/#pull-an-image-by-digest-immutable-identifier.
m.manifests[repo][target] = mf
m.manifests[repo][digest] = mf
resp.Header().Set("Docker-Content-Digest", digest)
resp.WriteHeader(http.StatusCreated)
return nil
case http.MethodDelete:
m.lock.Lock()
defer m.lock.Unlock()
if _, ok := m.manifests[repo]; !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
_, ok := m.manifests[repo][target]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: "Unknown manifest",
}
}
delete(m.manifests[repo], target)
resp.WriteHeader(http.StatusAccepted)
return nil
default:
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
}
func (m *manifests) handleTags(resp http.ResponseWriter, req *http.Request) *regError {
elem := strings.Split(req.URL.Path, "/")
elem = elem[1:]
repo := strings.Join(elem[1:len(elem)-2], "/")
if req.Method == "GET" {
m.lock.Lock()
defer m.lock.Unlock()
c, ok := m.manifests[repo]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
var tags []string
for tag := range c {
if !strings.Contains(tag, "sha256:") {
tags = append(tags, tag)
}
}
sort.Strings(tags)
// https://github.com/opencontainers/distribution-spec/blob/b505e9cc53ec499edbd9c1be32298388921bb705/detail.md#tags-paginated
// Offset using last query parameter.
if last := req.URL.Query().Get("last"); last != "" {
for i, t := range tags {
if t > last {
tags = tags[i:]
break
}
}
}
// Limit using n query parameter.
if ns := req.URL.Query().Get("n"); ns != "" {
if n, err := strconv.Atoi(ns); err != nil {
return ®Error{
Status: http.StatusBadRequest,
Code: "BAD_REQUEST",
Message: fmt.Sprintf("parsing n: %v", err),
}
} else if n < len(tags) {
tags = tags[:n]
}
}
tagsToList := listTags{
Name: repo,
Tags: tags,
}
msg, _ := json.Marshal(tagsToList)
resp.Header().Set("Content-Length", fmt.Sprint(len(msg)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader([]byte(msg)))
return nil
}
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
func (m *manifests) handleCatalog(resp http.ResponseWriter, req *http.Request) *regError {
query := req.URL.Query()
nStr := query.Get("n")
n := 10000
if nStr != "" {
n, _ = strconv.Atoi(nStr)
}
if req.Method == "GET" {
m.lock.Lock()
defer m.lock.Unlock()
var repos []string
countRepos := 0
// TODO: implement pagination
for key := range m.manifests {
if countRepos >= n {
break
}
countRepos++
repos = append(repos, key)
}
repositoriesToList := catalog{
Repos: repos,
}
msg, _ := json.Marshal(repositoriesToList)
resp.Header().Set("Content-Length", fmt.Sprint(len(msg)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader([]byte(msg)))
return nil
}
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
// TODO: implement handling of artifactType querystring
func (m *manifests) handleReferrers(resp http.ResponseWriter, req *http.Request) *regError {
// Ensure this is a GET request
if req.Method != "GET" {
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
elem := strings.Split(req.URL.Path, "/")
elem = elem[1:]
target := elem[len(elem)-1]
repo := strings.Join(elem[1:len(elem)-2], "/")
// Validate that incoming target is a valid digest
if _, err := v1.NewHash(target); err != nil {
return ®Error{
Status: http.StatusBadRequest,
Code: "UNSUPPORTED",
Message: "Target must be a valid digest",
}
}
m.lock.Lock()
defer m.lock.Unlock()
digestToManifestMap, repoExists := m.manifests[repo]
if !repoExists {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
im := v1.IndexManifest{
SchemaVersion: 2,
MediaType: types.OCIImageIndex,
Manifests: []v1.Descriptor{},
}
for digest, manifest := range digestToManifestMap {
h, err := v1.NewHash(digest)
if err != nil {
continue
}
var refPointer struct {
Subject *v1.Descriptor `json:"subject"`
}
json.Unmarshal(manifest.blob, &refPointer)
if refPointer.Subject == nil {
continue
}
referenceDigest := refPointer.Subject.Digest
if referenceDigest.String() != target {
continue
}
// At this point, we know the current digest references the target
var imageAsArtifact struct {
Config struct {
MediaType string `json:"mediaType"`
} `json:"config"`
}
json.Unmarshal(manifest.blob, &imageAsArtifact)
im.Manifests = append(im.Manifests, v1.Descriptor{
MediaType: types.MediaType(manifest.contentType),
Size: int64(len(manifest.blob)),
Digest: h,
ArtifactType: imageAsArtifact.Config.MediaType,
})
}
msg, _ := json.Marshal(&im)
resp.Header().Set("Content-Length", fmt.Sprint(len(msg)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader([]byte(msg)))
return nil
}
| {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 4 {
return false
}
return elems[len(elems)-2] == "tags"
} | identifier_body |
manifest.go | // Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package registry
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"sort"
"strconv"
"strings"
"sync"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/types"
)
type catalog struct {
Repos []string `json:"repositories"`
}
type listTags struct {
Name string `json:"name"`
Tags []string `json:"tags"`
}
type manifest struct {
contentType string
blob []byte
}
type manifests struct {
// maps repo -> manifest tag/digest -> manifest
manifests map[string]map[string]manifest
lock sync.Mutex
log *log.Logger
}
func isManifest(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 4 {
return false
}
return elems[len(elems)-2] == "manifests"
}
func isTags(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 4 {
return false
}
return elems[len(elems)-2] == "tags"
}
func isCatalog(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 2 {
return false
}
return elems[len(elems)-1] == "_catalog"
}
// Returns whether this url should be handled by the referrers handler
func isReferrers(req *http.Request) bool {
elems := strings.Split(req.URL.Path, "/")
elems = elems[1:]
if len(elems) < 4 {
return false
}
return elems[len(elems)-2] == "referrers"
}
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pulling-an-image-manifest
// https://github.com/opencontainers/distribution-spec/blob/master/spec.md#pushing-an-image
func (m *manifests) handle(resp http.ResponseWriter, req *http.Request) *regError {
elem := strings.Split(req.URL.Path, "/")
elem = elem[1:]
target := elem[len(elem)-1]
repo := strings.Join(elem[1:len(elem)-2], "/")
switch req.Method {
case http.MethodGet:
m.lock.Lock()
defer m.lock.Unlock()
c, ok := m.manifests[repo]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
m, ok := c[target]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: "Unknown manifest",
}
}
h, _, _ := v1.SHA256(bytes.NewReader(m.blob))
resp.Header().Set("Docker-Content-Digest", h.String())
resp.Header().Set("Content-Type", m.contentType)
resp.Header().Set("Content-Length", fmt.Sprint(len(m.blob)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader(m.blob))
return nil
case http.MethodHead:
m.lock.Lock()
defer m.lock.Unlock()
if _, ok := m.manifests[repo]; !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
m, ok := m.manifests[repo][target]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: "Unknown manifest",
}
}
h, _, _ := v1.SHA256(bytes.NewReader(m.blob))
resp.Header().Set("Docker-Content-Digest", h.String())
resp.Header().Set("Content-Type", m.contentType)
resp.Header().Set("Content-Length", fmt.Sprint(len(m.blob)))
resp.WriteHeader(http.StatusOK)
return nil
case http.MethodPut:
m.lock.Lock()
defer m.lock.Unlock()
if _, ok := m.manifests[repo]; !ok {
m.manifests[repo] = map[string]manifest{}
}
b := &bytes.Buffer{}
io.Copy(b, req.Body)
h, _, _ := v1.SHA256(bytes.NewReader(b.Bytes()))
digest := h.String()
mf := manifest{
blob: b.Bytes(),
contentType: req.Header.Get("Content-Type"),
}
// If the manifest is a manifest list, check that the manifest
// list's constituent manifests are already uploaded.
// This isn't strictly required by the registry API, but some
// registries require this.
if types.MediaType(mf.contentType).IsIndex() {
im, err := v1.ParseIndexManifest(b)
if err != nil {
return ®Error{
Status: http.StatusBadRequest,
Code: "MANIFEST_INVALID",
Message: err.Error(),
}
}
for _, desc := range im.Manifests {
if !desc.MediaType.IsDistributable() {
continue
}
if desc.MediaType.IsIndex() || desc.MediaType.IsImage() {
if _, found := m.manifests[repo][desc.Digest.String()]; !found {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: fmt.Sprintf("Sub-manifest %q not found", desc.Digest),
}
}
} else {
// TODO: Probably want to do an existence check for blobs.
m.log.Printf("TODO: Check blobs for %q", desc.Digest)
}
}
}
// Allow future references by target (tag) and immutable digest.
// See https://docs.docker.com/engine/reference/commandline/pull/#pull-an-image-by-digest-immutable-identifier.
m.manifests[repo][target] = mf
m.manifests[repo][digest] = mf
resp.Header().Set("Docker-Content-Digest", digest)
resp.WriteHeader(http.StatusCreated)
return nil
case http.MethodDelete:
m.lock.Lock()
defer m.lock.Unlock()
if _, ok := m.manifests[repo]; !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
_, ok := m.manifests[repo][target]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "MANIFEST_UNKNOWN",
Message: "Unknown manifest",
}
}
delete(m.manifests[repo], target)
resp.WriteHeader(http.StatusAccepted)
return nil
default:
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
}
func (m *manifests) handleTags(resp http.ResponseWriter, req *http.Request) *regError {
elem := strings.Split(req.URL.Path, "/")
elem = elem[1:]
repo := strings.Join(elem[1:len(elem)-2], "/")
if req.Method == "GET" {
m.lock.Lock()
defer m.lock.Unlock()
c, ok := m.manifests[repo]
if !ok {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
var tags []string
for tag := range c {
if !strings.Contains(tag, "sha256:") {
tags = append(tags, tag)
}
}
sort.Strings(tags)
// https://github.com/opencontainers/distribution-spec/blob/b505e9cc53ec499edbd9c1be32298388921bb705/detail.md#tags-paginated
// Offset using last query parameter.
if last := req.URL.Query().Get("last"); last != "" {
for i, t := range tags {
if t > last {
tags = tags[i:]
break
}
}
}
// Limit using n query parameter.
if ns := req.URL.Query().Get("n"); ns != "" {
if n, err := strconv.Atoi(ns); err != nil {
return ®Error{
Status: http.StatusBadRequest,
Code: "BAD_REQUEST",
Message: fmt.Sprintf("parsing n: %v", err),
}
} else if n < len(tags) {
tags = tags[:n]
}
}
tagsToList := listTags{
Name: repo,
Tags: tags,
}
msg, _ := json.Marshal(tagsToList)
resp.Header().Set("Content-Length", fmt.Sprint(len(msg)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader([]byte(msg)))
return nil
}
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
func (m *manifests) handleCatalog(resp http.ResponseWriter, req *http.Request) *regError {
query := req.URL.Query()
nStr := query.Get("n")
n := 10000
if nStr != "" {
n, _ = strconv.Atoi(nStr)
}
if req.Method == "GET" {
m.lock.Lock()
defer m.lock.Unlock()
var repos []string
countRepos := 0
// TODO: implement pagination
for key := range m.manifests {
if countRepos >= n {
break
}
countRepos++
repos = append(repos, key)
}
repositoriesToList := catalog{
Repos: repos,
}
msg, _ := json.Marshal(repositoriesToList)
resp.Header().Set("Content-Length", fmt.Sprint(len(msg)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader([]byte(msg)))
return nil
}
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
// TODO: implement handling of artifactType querystring
func (m *manifests) handleReferrers(resp http.ResponseWriter, req *http.Request) *regError {
// Ensure this is a GET request
if req.Method != "GET" {
return ®Error{
Status: http.StatusBadRequest,
Code: "METHOD_UNKNOWN",
Message: "We don't understand your method + url",
}
}
elem := strings.Split(req.URL.Path, "/")
elem = elem[1:]
target := elem[len(elem)-1]
repo := strings.Join(elem[1:len(elem)-2], "/")
// Validate that incoming target is a valid digest
if _, err := v1.NewHash(target); err != nil {
return ®Error{
Status: http.StatusBadRequest,
Code: "UNSUPPORTED",
Message: "Target must be a valid digest",
}
}
m.lock.Lock()
defer m.lock.Unlock()
digestToManifestMap, repoExists := m.manifests[repo]
if !repoExists {
return ®Error{
Status: http.StatusNotFound,
Code: "NAME_UNKNOWN",
Message: "Unknown name",
}
}
im := v1.IndexManifest{
SchemaVersion: 2,
MediaType: types.OCIImageIndex,
Manifests: []v1.Descriptor{},
}
for digest, manifest := range digestToManifestMap {
h, err := v1.NewHash(digest)
if err != nil {
continue
}
var refPointer struct {
Subject *v1.Descriptor `json:"subject"`
}
json.Unmarshal(manifest.blob, &refPointer)
if refPointer.Subject == nil { | referenceDigest := refPointer.Subject.Digest
if referenceDigest.String() != target {
continue
}
// At this point, we know the current digest references the target
var imageAsArtifact struct {
Config struct {
MediaType string `json:"mediaType"`
} `json:"config"`
}
json.Unmarshal(manifest.blob, &imageAsArtifact)
im.Manifests = append(im.Manifests, v1.Descriptor{
MediaType: types.MediaType(manifest.contentType),
Size: int64(len(manifest.blob)),
Digest: h,
ArtifactType: imageAsArtifact.Config.MediaType,
})
}
msg, _ := json.Marshal(&im)
resp.Header().Set("Content-Length", fmt.Sprint(len(msg)))
resp.WriteHeader(http.StatusOK)
io.Copy(resp, bytes.NewReader([]byte(msg)))
return nil
} | continue
} | random_line_split |
api-compose-object.go | /*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/s3utils"
)
// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs
type CopyDestOptions struct {
Bucket string // points to destination bucket
Object string // points to destination object
// `Encryption` is the key info for server-side-encryption with customer
// provided key. If it is nil, no encryption is performed.
Encryption encrypt.ServerSide
// `userMeta` is the user-metadata key-value pairs to be set on the
// destination. The keys are automatically prefixed with `x-amz-meta-`
// if needed. If nil is passed, and if only a single source (of any
// size) is provided in the ComposeObject call, then metadata from the
// source is copied to the destination.
// if no user-metadata is provided, it is copied from source
// (when there is only once source object in the compose
// request)
UserMetadata map[string]string
// UserMetadata is only set to destination if ReplaceMetadata is true
// other value is UserMetadata is ignored and we preserve src.UserMetadata
// NOTE: if you set this value to true and now metadata is present
// in UserMetadata your destination object will not have any metadata
// set.
ReplaceMetadata bool
// `userTags` is the user defined object tags to be set on destination.
// This will be set only if the `replaceTags` field is set to true.
// Otherwise this field is ignored
UserTags map[string]string
ReplaceTags bool
// Specifies whether you want to apply a Legal Hold to the copied object.
LegalHold LegalHoldStatus
// Object Retention related fields
Mode RetentionMode
RetainUntilDate time.Time
Size int64 // Needs to be specified if progress bar is specified.
// Progress of the entire copy operation will be sent here.
Progress io.Reader
}
// Process custom-metadata to remove a `x-amz-meta-` prefix if
// present and validate that keys are distinct (after this
// prefix removal).
func filterCustomMeta(userMeta map[string]string) map[string]string {
m := make(map[string]string)
for k, v := range userMeta {
if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
k = k[len("x-amz-meta-"):]
}
if _, ok := m[k]; ok {
continue
}
m[k] = v
}
return m
}
// Marshal converts all the CopyDestOptions into their
// equivalent HTTP header representation
func (opts CopyDestOptions) Marshal(header http.Header) {
const replaceDirective = "REPLACE"
if opts.ReplaceTags {
header.Set(amzTaggingHeaderDirective, replaceDirective)
if tags := s3utils.TagEncode(opts.UserTags); tags != "" {
header.Set(amzTaggingHeader, tags)
}
}
if opts.LegalHold != LegalHoldStatus("") {
header.Set(amzLegalHoldHeader, opts.LegalHold.String())
}
if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() {
header.Set(amzLockMode, opts.Mode.String())
header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339))
}
if opts.Encryption != nil {
opts.Encryption.Marshal(header)
}
if opts.ReplaceMetadata {
header.Set("x-amz-metadata-directive", replaceDirective)
for k, v := range filterCustomMeta(opts.UserMetadata) {
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
header.Set(k, v)
} else {
header.Set("x-amz-meta-"+k, v)
}
}
}
}
// toDestinationInfo returns a validated copyOptions object.
func (opts CopyDestOptions) validate() (err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
return err
}
if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
return err
}
if opts.Progress != nil && opts.Size < 0 {
return errInvalidArgument("For progress bar effective size needs to be specified")
}
return nil
}
// CopySrcOptions represents a source object to be copied, using
// server-side copying APIs.
type CopySrcOptions struct {
Bucket, Object string
VersionID string
MatchETag string
NoMatchETag string
MatchModifiedSince time.Time
MatchUnmodifiedSince time.Time
MatchRange bool
Start, End int64
Encryption encrypt.ServerSide
}
// Marshal converts all the CopySrcOptions into their
// equivalent HTTP header representation
func (opts CopySrcOptions) Marshal(header http.Header) {
// Set the source header
header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object))
if opts.VersionID != "" {
header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID)
}
if opts.MatchETag != "" {
header.Set("x-amz-copy-source-if-match", opts.MatchETag)
}
if opts.NoMatchETag != "" {
header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag)
}
if !opts.MatchModifiedSince.IsZero() {
header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat))
}
if !opts.MatchUnmodifiedSince.IsZero() {
header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat))
}
if opts.Encryption != nil {
encrypt.SSECopy(opts.Encryption).Marshal(header)
}
}
func (opts CopySrcOptions) validate() (err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
return err
}
if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
return err
}
if opts.Start > opts.End || opts.Start < 0 {
return errInvalidArgument("start must be non-negative, and start must be at most end.")
}
return nil
}
// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions,
) (ObjectInfo, error) {
// Build headers.
headers := make(http.Header)
// Set all the metadata headers.
for k, v := range metadata {
headers.Set(k, v)
}
if !dstOpts.Internal.ReplicationStatus.Empty() {
headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus))
}
if !dstOpts.Internal.SourceMTime.IsZero() {
headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano))
} | }
if dstOpts.Internal.ReplicationValidityCheck {
headers.Set(minIOBucketReplicationCheck, "true")
}
if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
}
if !dstOpts.Internal.RetentionTimestamp.IsZero() {
headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
}
if !dstOpts.Internal.TaggingTimestamp.IsZero() {
headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
}
if len(dstOpts.UserTags) != 0 {
headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags))
}
reqMetadata := requestMetadata{
bucketName: destBucket,
objectName: destObject,
customHeader: headers,
}
if dstOpts.Internal.SourceVersionID != "" {
if dstOpts.Internal.SourceVersionID != nullVersionID {
if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil {
return ObjectInfo{}, errInvalidArgument(err.Error())
}
}
urlValues := make(url.Values)
urlValues.Set("versionId", dstOpts.Internal.SourceVersionID)
reqMetadata.queryValues = urlValues
}
// Set the source header
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
if srcOpts.VersionID != "" {
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID)
}
// Send upload-part-copy request
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject)
}
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return ObjectInfo{}, err
}
objInfo := ObjectInfo{
Key: destObject,
ETag: strings.Trim(cpObjRes.ETag, "\""),
LastModified: cpObjRes.LastModified,
}
return objInfo, nil
}
func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, metadata map[string]string,
) (p CompletePart, err error) {
headers := make(http.Header)
// Set source
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
if startOffset < 0 {
return p, errInvalidArgument("startOffset must be non-negative")
}
if length >= 0 {
headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
}
for k, v := range metadata {
headers.Set(k, v)
}
queryValues := make(url.Values)
queryValues.Set("partNumber", strconv.Itoa(partID))
queryValues.Set("uploadId", uploadID)
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
bucketName: destBucket,
objectName: destObject,
customHeader: headers,
queryValues: queryValues,
})
defer closeResponse(resp)
if err != nil {
return
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return p, httpRespToErrorResponse(resp, destBucket, destObject)
}
// Decode copy-part response on success.
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return p, err
}
p.PartNumber, p.ETag = partID, cpObjRes.ETag
return p, nil
}
// uploadPartCopy - helper function to create a part in a multipart
// upload via an upload-part-copy request
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
headers http.Header,
) (p CompletePart, err error) {
// Build query parameters
urlValues := make(url.Values)
urlValues.Set("partNumber", strconv.Itoa(partNumber))
urlValues.Set("uploadId", uploadID)
// Send upload-part-copy request
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
bucketName: bucket,
objectName: object,
customHeader: headers,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return p, err
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return p, httpRespToErrorResponse(resp, bucket, object)
}
// Decode copy-part response on success.
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return p, err
}
p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
return p, nil
}
// ComposeObject - creates an object using server-side copying
// of existing objects. It takes a list of source objects (with optional offsets)
// and concatenates them into a new object using only server-side copying
// operations. Optionally takes progress reader hook for applications to
// look at current progress.
func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) {
if len(srcs) < 1 || len(srcs) > maxPartsCount {
return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.")
}
for _, src := range srcs {
if err := src.validate(); err != nil {
return UploadInfo{}, err
}
}
if err := dst.validate(); err != nil {
return UploadInfo{}, err
}
srcObjectInfos := make([]ObjectInfo, len(srcs))
srcObjectSizes := make([]int64, len(srcs))
var totalSize, totalParts int64
var err error
for i, src := range srcs {
opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID}
srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts)
if err != nil {
return UploadInfo{}, err
}
srcCopySize := srcObjectInfos[i].Size
// Check if a segment is specified, and if so, is the
// segment within object bounds?
if src.MatchRange {
// Since range is specified,
// 0 <= src.start <= src.end
// so only invalid case to check is:
if src.End >= srcCopySize || src.Start < 0 {
return UploadInfo{}, errInvalidArgument(
fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)",
i, src.Start, src.End, srcCopySize))
}
srcCopySize = src.End - src.Start + 1
}
// Only the last source may be less than `absMinPartSize`
if srcCopySize < absMinPartSize && i < len(srcs)-1 {
return UploadInfo{}, errInvalidArgument(
fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize))
}
// Is data to copy too large?
totalSize += srcCopySize
if totalSize > maxMultipartPutObjectSize {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
}
// record source size
srcObjectSizes[i] = srcCopySize
// calculate parts needed for current source
totalParts += partsRequired(srcCopySize)
// Do we need more parts than we are allowed?
if totalParts > maxPartsCount {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf(
"Your proposed compose object requires more than %d parts", maxPartsCount))
}
}
// Single source object case (i.e. when only one source is
// involved, it is being copied wholly and at most 5GiB in
// size, emptyfiles are also supported).
if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) {
return c.CopyObject(ctx, dst, srcs[0])
}
// Now, handle multipart-copy cases.
// 1. Ensure that the object has not been changed while
// we are copying data.
for i, src := range srcs {
src.MatchETag = srcObjectInfos[i].ETag
}
// 2. Initiate a new multipart upload.
// Set user-metadata on the destination object. If no
// user-metadata is specified, and there is only one source,
// (only) then metadata from source is copied.
var userMeta map[string]string
if dst.ReplaceMetadata {
userMeta = dst.UserMetadata
} else {
userMeta = srcObjectInfos[0].UserMetadata
}
var userTags map[string]string
if dst.ReplaceTags {
userTags = dst.UserTags
} else {
userTags = srcObjectInfos[0].UserTags
}
uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{
ServerSideEncryption: dst.Encryption,
UserMetadata: userMeta,
UserTags: userTags,
Mode: dst.Mode,
RetainUntilDate: dst.RetainUntilDate,
LegalHold: dst.LegalHold,
})
if err != nil {
return UploadInfo{}, err
}
// 3. Perform copy part uploads
objParts := []CompletePart{}
partIndex := 1
for i, src := range srcs {
h := make(http.Header)
src.Marshal(h)
if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC {
dst.Encryption.Marshal(h)
}
// calculate start/end indices of parts after
// splitting.
startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src)
for j, start := range startIdx {
end := endIdx[j]
// Add (or reset) source range header for
// upload part copy request.
h.Set("x-amz-copy-source-range",
fmt.Sprintf("bytes=%d-%d", start, end))
// make upload-part-copy request
complPart, err := c.uploadPartCopy(ctx, dst.Bucket,
dst.Object, uploadID, partIndex, h)
if err != nil {
return UploadInfo{}, err
}
if dst.Progress != nil {
io.CopyN(io.Discard, dst.Progress, end-start+1)
}
objParts = append(objParts, complPart)
partIndex++
}
}
// 4. Make final complete-multipart request.
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption})
if err != nil {
return UploadInfo{}, err
}
uploadInfo.Size = totalSize
return uploadInfo, nil
}
// partsRequired is maximum parts possible with
// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1))
func partsRequired(size int64) int64 {
maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1)
r := size / int64(maxPartSize)
if size%int64(maxPartSize) > 0 {
r++
}
return r
}
// calculateEvenSplits - computes splits for a source and returns
// start and end index slices. Splits happen evenly to be sure that no
// part is less than 5MiB, as that could fail the multipart request if
// it is not the last part.
func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) {
if size == 0 {
return
}
reqParts := partsRequired(size)
startIndex = make([]int64, reqParts)
endIndex = make([]int64, reqParts)
// Compute number of required parts `k`, as:
//
// k = ceiling(size / copyPartSize)
//
// Now, distribute the `size` bytes in the source into
// k parts as evenly as possible:
//
// r parts sized (q+1) bytes, and
// (k - r) parts sized q bytes, where
//
// size = q * k + r (by simple division of size by k,
// so that 0 <= r < k)
//
start := src.Start
if start == -1 {
start = 0
}
quot, rem := size/reqParts, size%reqParts
nextStart := start
for j := int64(0); j < reqParts; j++ {
curPartSize := quot
if j < rem {
curPartSize++
}
cStart := nextStart
cEnd := cStart + curPartSize - 1
nextStart = cEnd + 1
startIndex[j], endIndex[j] = cStart, cEnd
}
return
} | if dstOpts.Internal.SourceETag != "" {
headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag)
}
if dstOpts.Internal.ReplicationRequest {
headers.Set(minIOBucketReplicationRequest, "true") | random_line_split |
api-compose-object.go | /*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/s3utils"
)
// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs
type CopyDestOptions struct {
Bucket string // points to destination bucket
Object string // points to destination object
// `Encryption` is the key info for server-side-encryption with customer
// provided key. If it is nil, no encryption is performed.
Encryption encrypt.ServerSide
// `userMeta` is the user-metadata key-value pairs to be set on the
// destination. The keys are automatically prefixed with `x-amz-meta-`
// if needed. If nil is passed, and if only a single source (of any
// size) is provided in the ComposeObject call, then metadata from the
// source is copied to the destination.
// if no user-metadata is provided, it is copied from source
// (when there is only once source object in the compose
// request)
UserMetadata map[string]string
// UserMetadata is only set to destination if ReplaceMetadata is true
// other value is UserMetadata is ignored and we preserve src.UserMetadata
// NOTE: if you set this value to true and now metadata is present
// in UserMetadata your destination object will not have any metadata
// set.
ReplaceMetadata bool
// `userTags` is the user defined object tags to be set on destination.
// This will be set only if the `replaceTags` field is set to true.
// Otherwise this field is ignored
UserTags map[string]string
ReplaceTags bool
// Specifies whether you want to apply a Legal Hold to the copied object.
LegalHold LegalHoldStatus
// Object Retention related fields
Mode RetentionMode
RetainUntilDate time.Time
Size int64 // Needs to be specified if progress bar is specified.
// Progress of the entire copy operation will be sent here.
Progress io.Reader
}
// Process custom-metadata to remove a `x-amz-meta-` prefix if
// present and validate that keys are distinct (after this
// prefix removal).
func filterCustomMeta(userMeta map[string]string) map[string]string {
m := make(map[string]string)
for k, v := range userMeta {
if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
k = k[len("x-amz-meta-"):]
}
if _, ok := m[k]; ok {
continue
}
m[k] = v
}
return m
}
// Marshal converts all the CopyDestOptions into their
// equivalent HTTP header representation
func (opts CopyDestOptions) Marshal(header http.Header) {
const replaceDirective = "REPLACE"
if opts.ReplaceTags {
header.Set(amzTaggingHeaderDirective, replaceDirective)
if tags := s3utils.TagEncode(opts.UserTags); tags != "" {
header.Set(amzTaggingHeader, tags)
}
}
if opts.LegalHold != LegalHoldStatus("") {
header.Set(amzLegalHoldHeader, opts.LegalHold.String())
}
if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() {
header.Set(amzLockMode, opts.Mode.String())
header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339))
}
if opts.Encryption != nil {
opts.Encryption.Marshal(header)
}
if opts.ReplaceMetadata {
header.Set("x-amz-metadata-directive", replaceDirective)
for k, v := range filterCustomMeta(opts.UserMetadata) |
}
}
// toDestinationInfo returns a validated copyOptions object.
func (opts CopyDestOptions) validate() (err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
return err
}
if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
return err
}
if opts.Progress != nil && opts.Size < 0 {
return errInvalidArgument("For progress bar effective size needs to be specified")
}
return nil
}
// CopySrcOptions represents a source object to be copied, using
// server-side copying APIs.
type CopySrcOptions struct {
Bucket, Object string
VersionID string
MatchETag string
NoMatchETag string
MatchModifiedSince time.Time
MatchUnmodifiedSince time.Time
MatchRange bool
Start, End int64
Encryption encrypt.ServerSide
}
// Marshal converts all the CopySrcOptions into their
// equivalent HTTP header representation
func (opts CopySrcOptions) Marshal(header http.Header) {
// Set the source header
header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object))
if opts.VersionID != "" {
header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID)
}
if opts.MatchETag != "" {
header.Set("x-amz-copy-source-if-match", opts.MatchETag)
}
if opts.NoMatchETag != "" {
header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag)
}
if !opts.MatchModifiedSince.IsZero() {
header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat))
}
if !opts.MatchUnmodifiedSince.IsZero() {
header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat))
}
if opts.Encryption != nil {
encrypt.SSECopy(opts.Encryption).Marshal(header)
}
}
func (opts CopySrcOptions) validate() (err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
return err
}
if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
return err
}
if opts.Start > opts.End || opts.Start < 0 {
return errInvalidArgument("start must be non-negative, and start must be at most end.")
}
return nil
}
// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions,
) (ObjectInfo, error) {
// Build headers.
headers := make(http.Header)
// Set all the metadata headers.
for k, v := range metadata {
headers.Set(k, v)
}
if !dstOpts.Internal.ReplicationStatus.Empty() {
headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus))
}
if !dstOpts.Internal.SourceMTime.IsZero() {
headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano))
}
if dstOpts.Internal.SourceETag != "" {
headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag)
}
if dstOpts.Internal.ReplicationRequest {
headers.Set(minIOBucketReplicationRequest, "true")
}
if dstOpts.Internal.ReplicationValidityCheck {
headers.Set(minIOBucketReplicationCheck, "true")
}
if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
}
if !dstOpts.Internal.RetentionTimestamp.IsZero() {
headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
}
if !dstOpts.Internal.TaggingTimestamp.IsZero() {
headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
}
if len(dstOpts.UserTags) != 0 {
headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags))
}
reqMetadata := requestMetadata{
bucketName: destBucket,
objectName: destObject,
customHeader: headers,
}
if dstOpts.Internal.SourceVersionID != "" {
if dstOpts.Internal.SourceVersionID != nullVersionID {
if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil {
return ObjectInfo{}, errInvalidArgument(err.Error())
}
}
urlValues := make(url.Values)
urlValues.Set("versionId", dstOpts.Internal.SourceVersionID)
reqMetadata.queryValues = urlValues
}
// Set the source header
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
if srcOpts.VersionID != "" {
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID)
}
// Send upload-part-copy request
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject)
}
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return ObjectInfo{}, err
}
objInfo := ObjectInfo{
Key: destObject,
ETag: strings.Trim(cpObjRes.ETag, "\""),
LastModified: cpObjRes.LastModified,
}
return objInfo, nil
}
func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, metadata map[string]string,
) (p CompletePart, err error) {
headers := make(http.Header)
// Set source
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
if startOffset < 0 {
return p, errInvalidArgument("startOffset must be non-negative")
}
if length >= 0 {
headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
}
for k, v := range metadata {
headers.Set(k, v)
}
queryValues := make(url.Values)
queryValues.Set("partNumber", strconv.Itoa(partID))
queryValues.Set("uploadId", uploadID)
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
bucketName: destBucket,
objectName: destObject,
customHeader: headers,
queryValues: queryValues,
})
defer closeResponse(resp)
if err != nil {
return
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return p, httpRespToErrorResponse(resp, destBucket, destObject)
}
// Decode copy-part response on success.
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return p, err
}
p.PartNumber, p.ETag = partID, cpObjRes.ETag
return p, nil
}
// uploadPartCopy - helper function to create a part in a multipart
// upload via an upload-part-copy request
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
headers http.Header,
) (p CompletePart, err error) {
// Build query parameters
urlValues := make(url.Values)
urlValues.Set("partNumber", strconv.Itoa(partNumber))
urlValues.Set("uploadId", uploadID)
// Send upload-part-copy request
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
bucketName: bucket,
objectName: object,
customHeader: headers,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return p, err
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return p, httpRespToErrorResponse(resp, bucket, object)
}
// Decode copy-part response on success.
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return p, err
}
p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
return p, nil
}
// ComposeObject - creates an object using server-side copying
// of existing objects. It takes a list of source objects (with optional offsets)
// and concatenates them into a new object using only server-side copying
// operations. Optionally takes progress reader hook for applications to
// look at current progress.
func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) {
if len(srcs) < 1 || len(srcs) > maxPartsCount {
return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.")
}
for _, src := range srcs {
if err := src.validate(); err != nil {
return UploadInfo{}, err
}
}
if err := dst.validate(); err != nil {
return UploadInfo{}, err
}
srcObjectInfos := make([]ObjectInfo, len(srcs))
srcObjectSizes := make([]int64, len(srcs))
var totalSize, totalParts int64
var err error
for i, src := range srcs {
opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID}
srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts)
if err != nil {
return UploadInfo{}, err
}
srcCopySize := srcObjectInfos[i].Size
// Check if a segment is specified, and if so, is the
// segment within object bounds?
if src.MatchRange {
// Since range is specified,
// 0 <= src.start <= src.end
// so only invalid case to check is:
if src.End >= srcCopySize || src.Start < 0 {
return UploadInfo{}, errInvalidArgument(
fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)",
i, src.Start, src.End, srcCopySize))
}
srcCopySize = src.End - src.Start + 1
}
// Only the last source may be less than `absMinPartSize`
if srcCopySize < absMinPartSize && i < len(srcs)-1 {
return UploadInfo{}, errInvalidArgument(
fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize))
}
// Is data to copy too large?
totalSize += srcCopySize
if totalSize > maxMultipartPutObjectSize {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
}
// record source size
srcObjectSizes[i] = srcCopySize
// calculate parts needed for current source
totalParts += partsRequired(srcCopySize)
// Do we need more parts than we are allowed?
if totalParts > maxPartsCount {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf(
"Your proposed compose object requires more than %d parts", maxPartsCount))
}
}
// Single source object case (i.e. when only one source is
// involved, it is being copied wholly and at most 5GiB in
// size, emptyfiles are also supported).
if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) {
return c.CopyObject(ctx, dst, srcs[0])
}
// Now, handle multipart-copy cases.
// 1. Ensure that the object has not been changed while
// we are copying data.
for i, src := range srcs {
src.MatchETag = srcObjectInfos[i].ETag
}
// 2. Initiate a new multipart upload.
// Set user-metadata on the destination object. If no
// user-metadata is specified, and there is only one source,
// (only) then metadata from source is copied.
var userMeta map[string]string
if dst.ReplaceMetadata {
userMeta = dst.UserMetadata
} else {
userMeta = srcObjectInfos[0].UserMetadata
}
var userTags map[string]string
if dst.ReplaceTags {
userTags = dst.UserTags
} else {
userTags = srcObjectInfos[0].UserTags
}
uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{
ServerSideEncryption: dst.Encryption,
UserMetadata: userMeta,
UserTags: userTags,
Mode: dst.Mode,
RetainUntilDate: dst.RetainUntilDate,
LegalHold: dst.LegalHold,
})
if err != nil {
return UploadInfo{}, err
}
// 3. Perform copy part uploads
objParts := []CompletePart{}
partIndex := 1
for i, src := range srcs {
h := make(http.Header)
src.Marshal(h)
if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC {
dst.Encryption.Marshal(h)
}
// calculate start/end indices of parts after
// splitting.
startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src)
for j, start := range startIdx {
end := endIdx[j]
// Add (or reset) source range header for
// upload part copy request.
h.Set("x-amz-copy-source-range",
fmt.Sprintf("bytes=%d-%d", start, end))
// make upload-part-copy request
complPart, err := c.uploadPartCopy(ctx, dst.Bucket,
dst.Object, uploadID, partIndex, h)
if err != nil {
return UploadInfo{}, err
}
if dst.Progress != nil {
io.CopyN(io.Discard, dst.Progress, end-start+1)
}
objParts = append(objParts, complPart)
partIndex++
}
}
// 4. Make final complete-multipart request.
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption})
if err != nil {
return UploadInfo{}, err
}
uploadInfo.Size = totalSize
return uploadInfo, nil
}
// partsRequired is maximum parts possible with
// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1))
func partsRequired(size int64) int64 {
maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1)
r := size / int64(maxPartSize)
if size%int64(maxPartSize) > 0 {
r++
}
return r
}
// calculateEvenSplits - computes splits for a source and returns
// start and end index slices. Splits happen evenly to be sure that no
// part is less than 5MiB, as that could fail the multipart request if
// it is not the last part.
func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) {
if size == 0 {
return
}
reqParts := partsRequired(size)
startIndex = make([]int64, reqParts)
endIndex = make([]int64, reqParts)
// Compute number of required parts `k`, as:
//
// k = ceiling(size / copyPartSize)
//
// Now, distribute the `size` bytes in the source into
// k parts as evenly as possible:
//
// r parts sized (q+1) bytes, and
// (k - r) parts sized q bytes, where
//
// size = q * k + r (by simple division of size by k,
// so that 0 <= r < k)
//
start := src.Start
if start == -1 {
start = 0
}
quot, rem := size/reqParts, size%reqParts
nextStart := start
for j := int64(0); j < reqParts; j++ {
curPartSize := quot
if j < rem {
curPartSize++
}
cStart := nextStart
cEnd := cStart + curPartSize - 1
nextStart = cEnd + 1
startIndex[j], endIndex[j] = cStart, cEnd
}
return
}
| {
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
header.Set(k, v)
} else {
header.Set("x-amz-meta-"+k, v)
}
} | conditional_block |
api-compose-object.go | /*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/s3utils"
)
// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs
type CopyDestOptions struct {
Bucket string // points to destination bucket
Object string // points to destination object
// `Encryption` is the key info for server-side-encryption with customer
// provided key. If it is nil, no encryption is performed.
Encryption encrypt.ServerSide
// `userMeta` is the user-metadata key-value pairs to be set on the
// destination. The keys are automatically prefixed with `x-amz-meta-`
// if needed. If nil is passed, and if only a single source (of any
// size) is provided in the ComposeObject call, then metadata from the
// source is copied to the destination.
// if no user-metadata is provided, it is copied from source
// (when there is only once source object in the compose
// request)
UserMetadata map[string]string
// UserMetadata is only set to destination if ReplaceMetadata is true
// other value is UserMetadata is ignored and we preserve src.UserMetadata
// NOTE: if you set this value to true and now metadata is present
// in UserMetadata your destination object will not have any metadata
// set.
ReplaceMetadata bool
// `userTags` is the user defined object tags to be set on destination.
// This will be set only if the `replaceTags` field is set to true.
// Otherwise this field is ignored
UserTags map[string]string
ReplaceTags bool
// Specifies whether you want to apply a Legal Hold to the copied object.
LegalHold LegalHoldStatus
// Object Retention related fields
Mode RetentionMode
RetainUntilDate time.Time
Size int64 // Needs to be specified if progress bar is specified.
// Progress of the entire copy operation will be sent here.
Progress io.Reader
}
// Process custom-metadata to remove a `x-amz-meta-` prefix if
// present and validate that keys are distinct (after this
// prefix removal).
func filterCustomMeta(userMeta map[string]string) map[string]string {
m := make(map[string]string)
for k, v := range userMeta {
if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
k = k[len("x-amz-meta-"):]
}
if _, ok := m[k]; ok {
continue
}
m[k] = v
}
return m
}
// Marshal converts all the CopyDestOptions into their
// equivalent HTTP header representation
func (opts CopyDestOptions) Marshal(header http.Header) {
const replaceDirective = "REPLACE"
if opts.ReplaceTags {
header.Set(amzTaggingHeaderDirective, replaceDirective)
if tags := s3utils.TagEncode(opts.UserTags); tags != "" {
header.Set(amzTaggingHeader, tags)
}
}
if opts.LegalHold != LegalHoldStatus("") {
header.Set(amzLegalHoldHeader, opts.LegalHold.String())
}
if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() {
header.Set(amzLockMode, opts.Mode.String())
header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339))
}
if opts.Encryption != nil {
opts.Encryption.Marshal(header)
}
if opts.ReplaceMetadata {
header.Set("x-amz-metadata-directive", replaceDirective)
for k, v := range filterCustomMeta(opts.UserMetadata) {
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
header.Set(k, v)
} else {
header.Set("x-amz-meta-"+k, v)
}
}
}
}
// toDestinationInfo returns a validated copyOptions object.
func (opts CopyDestOptions) validate() (err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
return err
}
if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
return err
}
if opts.Progress != nil && opts.Size < 0 {
return errInvalidArgument("For progress bar effective size needs to be specified")
}
return nil
}
// CopySrcOptions represents a source object to be copied, using
// server-side copying APIs.
type CopySrcOptions struct {
Bucket, Object string
VersionID string
MatchETag string
NoMatchETag string
MatchModifiedSince time.Time
MatchUnmodifiedSince time.Time
MatchRange bool
Start, End int64
Encryption encrypt.ServerSide
}
// Marshal converts all the CopySrcOptions into their
// equivalent HTTP header representation
func (opts CopySrcOptions) Marshal(header http.Header) {
// Set the source header
header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object))
if opts.VersionID != "" {
header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID)
}
if opts.MatchETag != "" {
header.Set("x-amz-copy-source-if-match", opts.MatchETag)
}
if opts.NoMatchETag != "" {
header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag)
}
if !opts.MatchModifiedSince.IsZero() {
header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat))
}
if !opts.MatchUnmodifiedSince.IsZero() {
header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat))
}
if opts.Encryption != nil {
encrypt.SSECopy(opts.Encryption).Marshal(header)
}
}
func (opts CopySrcOptions) validate() (err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
return err
}
if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
return err
}
if opts.Start > opts.End || opts.Start < 0 {
return errInvalidArgument("start must be non-negative, and start must be at most end.")
}
return nil
}
// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions,
) (ObjectInfo, error) {
// Build headers.
headers := make(http.Header)
// Set all the metadata headers.
for k, v := range metadata {
headers.Set(k, v)
}
if !dstOpts.Internal.ReplicationStatus.Empty() {
headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus))
}
if !dstOpts.Internal.SourceMTime.IsZero() {
headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano))
}
if dstOpts.Internal.SourceETag != "" {
headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag)
}
if dstOpts.Internal.ReplicationRequest {
headers.Set(minIOBucketReplicationRequest, "true")
}
if dstOpts.Internal.ReplicationValidityCheck {
headers.Set(minIOBucketReplicationCheck, "true")
}
if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
}
if !dstOpts.Internal.RetentionTimestamp.IsZero() {
headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
}
if !dstOpts.Internal.TaggingTimestamp.IsZero() {
headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
}
if len(dstOpts.UserTags) != 0 {
headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags))
}
reqMetadata := requestMetadata{
bucketName: destBucket,
objectName: destObject,
customHeader: headers,
}
if dstOpts.Internal.SourceVersionID != "" {
if dstOpts.Internal.SourceVersionID != nullVersionID {
if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil {
return ObjectInfo{}, errInvalidArgument(err.Error())
}
}
urlValues := make(url.Values)
urlValues.Set("versionId", dstOpts.Internal.SourceVersionID)
reqMetadata.queryValues = urlValues
}
// Set the source header
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
if srcOpts.VersionID != "" {
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID)
}
// Send upload-part-copy request
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject)
}
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return ObjectInfo{}, err
}
objInfo := ObjectInfo{
Key: destObject,
ETag: strings.Trim(cpObjRes.ETag, "\""),
LastModified: cpObjRes.LastModified,
}
return objInfo, nil
}
func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, metadata map[string]string,
) (p CompletePart, err error) {
headers := make(http.Header)
// Set source
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
if startOffset < 0 {
return p, errInvalidArgument("startOffset must be non-negative")
}
if length >= 0 {
headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
}
for k, v := range metadata {
headers.Set(k, v)
}
queryValues := make(url.Values)
queryValues.Set("partNumber", strconv.Itoa(partID))
queryValues.Set("uploadId", uploadID)
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
bucketName: destBucket,
objectName: destObject,
customHeader: headers,
queryValues: queryValues,
})
defer closeResponse(resp)
if err != nil {
return
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return p, httpRespToErrorResponse(resp, destBucket, destObject)
}
// Decode copy-part response on success.
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return p, err
}
p.PartNumber, p.ETag = partID, cpObjRes.ETag
return p, nil
}
// uploadPartCopy - helper function to create a part in a multipart
// upload via an upload-part-copy request
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
headers http.Header,
) (p CompletePart, err error) |
// ComposeObject - creates an object using server-side copying
// of existing objects. It takes a list of source objects (with optional offsets)
// and concatenates them into a new object using only server-side copying
// operations. Optionally takes progress reader hook for applications to
// look at current progress.
func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) {
if len(srcs) < 1 || len(srcs) > maxPartsCount {
return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.")
}
for _, src := range srcs {
if err := src.validate(); err != nil {
return UploadInfo{}, err
}
}
if err := dst.validate(); err != nil {
return UploadInfo{}, err
}
srcObjectInfos := make([]ObjectInfo, len(srcs))
srcObjectSizes := make([]int64, len(srcs))
var totalSize, totalParts int64
var err error
for i, src := range srcs {
opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID}
srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts)
if err != nil {
return UploadInfo{}, err
}
srcCopySize := srcObjectInfos[i].Size
// Check if a segment is specified, and if so, is the
// segment within object bounds?
if src.MatchRange {
// Since range is specified,
// 0 <= src.start <= src.end
// so only invalid case to check is:
if src.End >= srcCopySize || src.Start < 0 {
return UploadInfo{}, errInvalidArgument(
fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)",
i, src.Start, src.End, srcCopySize))
}
srcCopySize = src.End - src.Start + 1
}
// Only the last source may be less than `absMinPartSize`
if srcCopySize < absMinPartSize && i < len(srcs)-1 {
return UploadInfo{}, errInvalidArgument(
fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize))
}
// Is data to copy too large?
totalSize += srcCopySize
if totalSize > maxMultipartPutObjectSize {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
}
// record source size
srcObjectSizes[i] = srcCopySize
// calculate parts needed for current source
totalParts += partsRequired(srcCopySize)
// Do we need more parts than we are allowed?
if totalParts > maxPartsCount {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf(
"Your proposed compose object requires more than %d parts", maxPartsCount))
}
}
// Single source object case (i.e. when only one source is
// involved, it is being copied wholly and at most 5GiB in
// size, emptyfiles are also supported).
if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) {
return c.CopyObject(ctx, dst, srcs[0])
}
// Now, handle multipart-copy cases.
// 1. Ensure that the object has not been changed while
// we are copying data.
for i, src := range srcs {
src.MatchETag = srcObjectInfos[i].ETag
}
// 2. Initiate a new multipart upload.
// Set user-metadata on the destination object. If no
// user-metadata is specified, and there is only one source,
// (only) then metadata from source is copied.
var userMeta map[string]string
if dst.ReplaceMetadata {
userMeta = dst.UserMetadata
} else {
userMeta = srcObjectInfos[0].UserMetadata
}
var userTags map[string]string
if dst.ReplaceTags {
userTags = dst.UserTags
} else {
userTags = srcObjectInfos[0].UserTags
}
uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{
ServerSideEncryption: dst.Encryption,
UserMetadata: userMeta,
UserTags: userTags,
Mode: dst.Mode,
RetainUntilDate: dst.RetainUntilDate,
LegalHold: dst.LegalHold,
})
if err != nil {
return UploadInfo{}, err
}
// 3. Perform copy part uploads
objParts := []CompletePart{}
partIndex := 1
for i, src := range srcs {
h := make(http.Header)
src.Marshal(h)
if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC {
dst.Encryption.Marshal(h)
}
// calculate start/end indices of parts after
// splitting.
startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src)
for j, start := range startIdx {
end := endIdx[j]
// Add (or reset) source range header for
// upload part copy request.
h.Set("x-amz-copy-source-range",
fmt.Sprintf("bytes=%d-%d", start, end))
// make upload-part-copy request
complPart, err := c.uploadPartCopy(ctx, dst.Bucket,
dst.Object, uploadID, partIndex, h)
if err != nil {
return UploadInfo{}, err
}
if dst.Progress != nil {
io.CopyN(io.Discard, dst.Progress, end-start+1)
}
objParts = append(objParts, complPart)
partIndex++
}
}
// 4. Make final complete-multipart request.
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption})
if err != nil {
return UploadInfo{}, err
}
uploadInfo.Size = totalSize
return uploadInfo, nil
}
// partsRequired is maximum parts possible with
// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1))
func partsRequired(size int64) int64 {
maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1)
r := size / int64(maxPartSize)
if size%int64(maxPartSize) > 0 {
r++
}
return r
}
// calculateEvenSplits - computes splits for a source and returns
// start and end index slices. Splits happen evenly to be sure that no
// part is less than 5MiB, as that could fail the multipart request if
// it is not the last part.
func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) {
if size == 0 {
return
}
reqParts := partsRequired(size)
startIndex = make([]int64, reqParts)
endIndex = make([]int64, reqParts)
// Compute number of required parts `k`, as:
//
// k = ceiling(size / copyPartSize)
//
// Now, distribute the `size` bytes in the source into
// k parts as evenly as possible:
//
// r parts sized (q+1) bytes, and
// (k - r) parts sized q bytes, where
//
// size = q * k + r (by simple division of size by k,
// so that 0 <= r < k)
//
start := src.Start
if start == -1 {
start = 0
}
quot, rem := size/reqParts, size%reqParts
nextStart := start
for j := int64(0); j < reqParts; j++ {
curPartSize := quot
if j < rem {
curPartSize++
}
cStart := nextStart
cEnd := cStart + curPartSize - 1
nextStart = cEnd + 1
startIndex[j], endIndex[j] = cStart, cEnd
}
return
}
| {
// Build query parameters
urlValues := make(url.Values)
urlValues.Set("partNumber", strconv.Itoa(partNumber))
urlValues.Set("uploadId", uploadID)
// Send upload-part-copy request
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
bucketName: bucket,
objectName: object,
customHeader: headers,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return p, err
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return p, httpRespToErrorResponse(resp, bucket, object)
}
// Decode copy-part response on success.
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return p, err
}
p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
return p, nil
} | identifier_body |
api-compose-object.go | /*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package minio
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/s3utils"
)
// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs
type CopyDestOptions struct {
Bucket string // points to destination bucket
Object string // points to destination object
// `Encryption` is the key info for server-side-encryption with customer
// provided key. If it is nil, no encryption is performed.
Encryption encrypt.ServerSide
// `userMeta` is the user-metadata key-value pairs to be set on the
// destination. The keys are automatically prefixed with `x-amz-meta-`
// if needed. If nil is passed, and if only a single source (of any
// size) is provided in the ComposeObject call, then metadata from the
// source is copied to the destination.
// if no user-metadata is provided, it is copied from source
// (when there is only once source object in the compose
// request)
UserMetadata map[string]string
// UserMetadata is only set to destination if ReplaceMetadata is true
// other value is UserMetadata is ignored and we preserve src.UserMetadata
// NOTE: if you set this value to true and now metadata is present
// in UserMetadata your destination object will not have any metadata
// set.
ReplaceMetadata bool
// `userTags` is the user defined object tags to be set on destination.
// This will be set only if the `replaceTags` field is set to true.
// Otherwise this field is ignored
UserTags map[string]string
ReplaceTags bool
// Specifies whether you want to apply a Legal Hold to the copied object.
LegalHold LegalHoldStatus
// Object Retention related fields
Mode RetentionMode
RetainUntilDate time.Time
Size int64 // Needs to be specified if progress bar is specified.
// Progress of the entire copy operation will be sent here.
Progress io.Reader
}
// Process custom-metadata to remove a `x-amz-meta-` prefix if
// present and validate that keys are distinct (after this
// prefix removal).
func filterCustomMeta(userMeta map[string]string) map[string]string {
m := make(map[string]string)
for k, v := range userMeta {
if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
k = k[len("x-amz-meta-"):]
}
if _, ok := m[k]; ok {
continue
}
m[k] = v
}
return m
}
// Marshal converts all the CopyDestOptions into their
// equivalent HTTP header representation
func (opts CopyDestOptions) | (header http.Header) {
const replaceDirective = "REPLACE"
if opts.ReplaceTags {
header.Set(amzTaggingHeaderDirective, replaceDirective)
if tags := s3utils.TagEncode(opts.UserTags); tags != "" {
header.Set(amzTaggingHeader, tags)
}
}
if opts.LegalHold != LegalHoldStatus("") {
header.Set(amzLegalHoldHeader, opts.LegalHold.String())
}
if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() {
header.Set(amzLockMode, opts.Mode.String())
header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339))
}
if opts.Encryption != nil {
opts.Encryption.Marshal(header)
}
if opts.ReplaceMetadata {
header.Set("x-amz-metadata-directive", replaceDirective)
for k, v := range filterCustomMeta(opts.UserMetadata) {
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
header.Set(k, v)
} else {
header.Set("x-amz-meta-"+k, v)
}
}
}
}
// toDestinationInfo returns a validated copyOptions object.
func (opts CopyDestOptions) validate() (err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
return err
}
if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
return err
}
if opts.Progress != nil && opts.Size < 0 {
return errInvalidArgument("For progress bar effective size needs to be specified")
}
return nil
}
// CopySrcOptions represents a source object to be copied, using
// server-side copying APIs.
type CopySrcOptions struct {
Bucket, Object string
VersionID string
MatchETag string
NoMatchETag string
MatchModifiedSince time.Time
MatchUnmodifiedSince time.Time
MatchRange bool
Start, End int64
Encryption encrypt.ServerSide
}
// Marshal converts all the CopySrcOptions into their
// equivalent HTTP header representation
func (opts CopySrcOptions) Marshal(header http.Header) {
// Set the source header
header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object))
if opts.VersionID != "" {
header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID)
}
if opts.MatchETag != "" {
header.Set("x-amz-copy-source-if-match", opts.MatchETag)
}
if opts.NoMatchETag != "" {
header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag)
}
if !opts.MatchModifiedSince.IsZero() {
header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat))
}
if !opts.MatchUnmodifiedSince.IsZero() {
header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat))
}
if opts.Encryption != nil {
encrypt.SSECopy(opts.Encryption).Marshal(header)
}
}
func (opts CopySrcOptions) validate() (err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil {
return err
}
if err = s3utils.CheckValidObjectName(opts.Object); err != nil {
return err
}
if opts.Start > opts.End || opts.Start < 0 {
return errInvalidArgument("start must be non-negative, and start must be at most end.")
}
return nil
}
// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions,
) (ObjectInfo, error) {
// Build headers.
headers := make(http.Header)
// Set all the metadata headers.
for k, v := range metadata {
headers.Set(k, v)
}
if !dstOpts.Internal.ReplicationStatus.Empty() {
headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus))
}
if !dstOpts.Internal.SourceMTime.IsZero() {
headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano))
}
if dstOpts.Internal.SourceETag != "" {
headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag)
}
if dstOpts.Internal.ReplicationRequest {
headers.Set(minIOBucketReplicationRequest, "true")
}
if dstOpts.Internal.ReplicationValidityCheck {
headers.Set(minIOBucketReplicationCheck, "true")
}
if !dstOpts.Internal.LegalholdTimestamp.IsZero() {
headers.Set(minIOBucketReplicationObjectLegalHoldTimestamp, dstOpts.Internal.LegalholdTimestamp.Format(time.RFC3339Nano))
}
if !dstOpts.Internal.RetentionTimestamp.IsZero() {
headers.Set(minIOBucketReplicationObjectRetentionTimestamp, dstOpts.Internal.RetentionTimestamp.Format(time.RFC3339Nano))
}
if !dstOpts.Internal.TaggingTimestamp.IsZero() {
headers.Set(minIOBucketReplicationTaggingTimestamp, dstOpts.Internal.TaggingTimestamp.Format(time.RFC3339Nano))
}
if len(dstOpts.UserTags) != 0 {
headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags))
}
reqMetadata := requestMetadata{
bucketName: destBucket,
objectName: destObject,
customHeader: headers,
}
if dstOpts.Internal.SourceVersionID != "" {
if dstOpts.Internal.SourceVersionID != nullVersionID {
if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil {
return ObjectInfo{}, errInvalidArgument(err.Error())
}
}
urlValues := make(url.Values)
urlValues.Set("versionId", dstOpts.Internal.SourceVersionID)
reqMetadata.queryValues = urlValues
}
// Set the source header
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
if srcOpts.VersionID != "" {
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID)
}
// Send upload-part-copy request
resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject)
}
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return ObjectInfo{}, err
}
objInfo := ObjectInfo{
Key: destObject,
ETag: strings.Trim(cpObjRes.ETag, "\""),
LastModified: cpObjRes.LastModified,
}
return objInfo, nil
}
func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, metadata map[string]string,
) (p CompletePart, err error) {
headers := make(http.Header)
// Set source
headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
if startOffset < 0 {
return p, errInvalidArgument("startOffset must be non-negative")
}
if length >= 0 {
headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
}
for k, v := range metadata {
headers.Set(k, v)
}
queryValues := make(url.Values)
queryValues.Set("partNumber", strconv.Itoa(partID))
queryValues.Set("uploadId", uploadID)
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
bucketName: destBucket,
objectName: destObject,
customHeader: headers,
queryValues: queryValues,
})
defer closeResponse(resp)
if err != nil {
return
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return p, httpRespToErrorResponse(resp, destBucket, destObject)
}
// Decode copy-part response on success.
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return p, err
}
p.PartNumber, p.ETag = partID, cpObjRes.ETag
return p, nil
}
// uploadPartCopy - helper function to create a part in a multipart
// upload via an upload-part-copy request
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
headers http.Header,
) (p CompletePart, err error) {
// Build query parameters
urlValues := make(url.Values)
urlValues.Set("partNumber", strconv.Itoa(partNumber))
urlValues.Set("uploadId", uploadID)
// Send upload-part-copy request
resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{
bucketName: bucket,
objectName: object,
customHeader: headers,
queryValues: urlValues,
})
defer closeResponse(resp)
if err != nil {
return p, err
}
// Check if we got an error response.
if resp.StatusCode != http.StatusOK {
return p, httpRespToErrorResponse(resp, bucket, object)
}
// Decode copy-part response on success.
cpObjRes := copyObjectResult{}
err = xmlDecoder(resp.Body, &cpObjRes)
if err != nil {
return p, err
}
p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
return p, nil
}
// ComposeObject - creates an object using server-side copying
// of existing objects. It takes a list of source objects (with optional offsets)
// and concatenates them into a new object using only server-side copying
// operations. Optionally takes progress reader hook for applications to
// look at current progress.
func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) {
if len(srcs) < 1 || len(srcs) > maxPartsCount {
return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.")
}
for _, src := range srcs {
if err := src.validate(); err != nil {
return UploadInfo{}, err
}
}
if err := dst.validate(); err != nil {
return UploadInfo{}, err
}
srcObjectInfos := make([]ObjectInfo, len(srcs))
srcObjectSizes := make([]int64, len(srcs))
var totalSize, totalParts int64
var err error
for i, src := range srcs {
opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID}
srcObjectInfos[i], err = c.StatObject(context.Background(), src.Bucket, src.Object, opts)
if err != nil {
return UploadInfo{}, err
}
srcCopySize := srcObjectInfos[i].Size
// Check if a segment is specified, and if so, is the
// segment within object bounds?
if src.MatchRange {
// Since range is specified,
// 0 <= src.start <= src.end
// so only invalid case to check is:
if src.End >= srcCopySize || src.Start < 0 {
return UploadInfo{}, errInvalidArgument(
fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)",
i, src.Start, src.End, srcCopySize))
}
srcCopySize = src.End - src.Start + 1
}
// Only the last source may be less than `absMinPartSize`
if srcCopySize < absMinPartSize && i < len(srcs)-1 {
return UploadInfo{}, errInvalidArgument(
fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize))
}
// Is data to copy too large?
totalSize += srcCopySize
if totalSize > maxMultipartPutObjectSize {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
}
// record source size
srcObjectSizes[i] = srcCopySize
// calculate parts needed for current source
totalParts += partsRequired(srcCopySize)
// Do we need more parts than we are allowed?
if totalParts > maxPartsCount {
return UploadInfo{}, errInvalidArgument(fmt.Sprintf(
"Your proposed compose object requires more than %d parts", maxPartsCount))
}
}
// Single source object case (i.e. when only one source is
// involved, it is being copied wholly and at most 5GiB in
// size, emptyfiles are also supported).
if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) {
return c.CopyObject(ctx, dst, srcs[0])
}
// Now, handle multipart-copy cases.
// 1. Ensure that the object has not been changed while
// we are copying data.
for i, src := range srcs {
src.MatchETag = srcObjectInfos[i].ETag
}
// 2. Initiate a new multipart upload.
// Set user-metadata on the destination object. If no
// user-metadata is specified, and there is only one source,
// (only) then metadata from source is copied.
var userMeta map[string]string
if dst.ReplaceMetadata {
userMeta = dst.UserMetadata
} else {
userMeta = srcObjectInfos[0].UserMetadata
}
var userTags map[string]string
if dst.ReplaceTags {
userTags = dst.UserTags
} else {
userTags = srcObjectInfos[0].UserTags
}
uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{
ServerSideEncryption: dst.Encryption,
UserMetadata: userMeta,
UserTags: userTags,
Mode: dst.Mode,
RetainUntilDate: dst.RetainUntilDate,
LegalHold: dst.LegalHold,
})
if err != nil {
return UploadInfo{}, err
}
// 3. Perform copy part uploads
objParts := []CompletePart{}
partIndex := 1
for i, src := range srcs {
h := make(http.Header)
src.Marshal(h)
if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC {
dst.Encryption.Marshal(h)
}
// calculate start/end indices of parts after
// splitting.
startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src)
for j, start := range startIdx {
end := endIdx[j]
// Add (or reset) source range header for
// upload part copy request.
h.Set("x-amz-copy-source-range",
fmt.Sprintf("bytes=%d-%d", start, end))
// make upload-part-copy request
complPart, err := c.uploadPartCopy(ctx, dst.Bucket,
dst.Object, uploadID, partIndex, h)
if err != nil {
return UploadInfo{}, err
}
if dst.Progress != nil {
io.CopyN(io.Discard, dst.Progress, end-start+1)
}
objParts = append(objParts, complPart)
partIndex++
}
}
// 4. Make final complete-multipart request.
uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID,
completeMultipartUpload{Parts: objParts}, PutObjectOptions{ServerSideEncryption: dst.Encryption})
if err != nil {
return UploadInfo{}, err
}
uploadInfo.Size = totalSize
return uploadInfo, nil
}
// partsRequired is maximum parts possible with
// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1))
func partsRequired(size int64) int64 {
maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1)
r := size / int64(maxPartSize)
if size%int64(maxPartSize) > 0 {
r++
}
return r
}
// calculateEvenSplits - computes splits for a source and returns
// start and end index slices. Splits happen evenly to be sure that no
// part is less than 5MiB, as that could fail the multipart request if
// it is not the last part.
func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) {
if size == 0 {
return
}
reqParts := partsRequired(size)
startIndex = make([]int64, reqParts)
endIndex = make([]int64, reqParts)
// Compute number of required parts `k`, as:
//
// k = ceiling(size / copyPartSize)
//
// Now, distribute the `size` bytes in the source into
// k parts as evenly as possible:
//
// r parts sized (q+1) bytes, and
// (k - r) parts sized q bytes, where
//
// size = q * k + r (by simple division of size by k,
// so that 0 <= r < k)
//
start := src.Start
if start == -1 {
start = 0
}
quot, rem := size/reqParts, size%reqParts
nextStart := start
for j := int64(0); j < reqParts; j++ {
curPartSize := quot
if j < rem {
curPartSize++
}
cStart := nextStart
cEnd := cStart + curPartSize - 1
nextStart = cEnd + 1
startIndex[j], endIndex[j] = cStart, cEnd
}
return
}
| Marshal | identifier_name |
integration.js | 'use strict';
const request = require('postman-request');
const _ = require('lodash');
const async = require('async');
const config = require('./config/config');
const fs = require('fs');
let Logger;
let requestWithDefaults;
let previousDomainRegexAsString = '';
let domainBlocklistRegex = null;
const BASE_URI = 'https://api.domaintools.com/v1/iris-investigate';
const MAX_DOMAIN_LABEL_LENGTH = 63;
const MAX_ENTITY_LENGTH = 100;
const MAX_ENTITIES_TO_BULK_LOOKUP = 30;
const WEB_EXTERNAL_URI = 'https://research.domaintools.com/iris/search/?q=';
function _setupRegexBlocklists(options) {
if (options.domainBlocklistRegex !== previousDomainRegexAsString && options.domainBlocklistRegex.length === 0) {
Logger.debug('Removing Domain Blocklist Regex Filtering');
previousDomainRegexAsString = '';
domainBlocklistRegex = null;
} else {
if (options.domainBlocklistRegex !== previousDomainRegexAsString) {
previousDomainRegexAsString = options.domainBlocklistRegex;
Logger.debug({ domainBlocklistRegex: previousDomainRegexAsString }, 'Modifying Domain Blocklist Regex');
domainBlocklistRegex = new RegExp(options.domainBlocklistRegex, 'i');
}
}
}
function chunk(arr, chunkSize) {
const R = [];
for (let i = 0, len = arr.length; i < len; i += chunkSize) {
R.push(arr.slice(i, i + chunkSize));
}
return R;
}
function doLookup(entities, options, cb) {
let lookupResults = [];
let entityLookup = {};
let entityLists = [];
_setupRegexBlocklists(options);
entities.forEach((entityObj) => {
if (_isInvalidEntity(entityObj) || _isEntityBlocklisted(entityObj, options)) {
return;
}
entityLookup[entityObj.value.toLowerCase()] = entityObj;
entityLists.push(entityObj.value.toLowerCase());
});
entityLists = chunk(entityLists, MAX_ENTITIES_TO_BULK_LOOKUP);
Logger.debug({ entityLists }, 'Entity Lists');
async.each(
entityLists,
(entityList, next) => {
_lookupEntityInvestigate(entityList, entityLookup, options, function (err, results) {
if (err) {
next(err);
} else {
lookupResults = lookupResults.concat(results);
next(null);
}
});
},
function (err) {
cb(err, lookupResults);
}
);
}
function _isInvalidEntity(entityObj) {
// DomaintTools API does not accept entities over 100 characters long so if we get any of those we don't look them up
if (entityObj.value.length > MAX_ENTITY_LENGTH) {
return true;
}
// Domain labels (the parts in between the periods, must be 63 characters or less
if (entityObj.isDomain) {
const invalidLabel = entityObj.value.split('.').find((label) => {
return label.length > MAX_DOMAIN_LABEL_LENGTH;
});
if (typeof invalidLabel !== 'undefined') {
return true;
}
}
return false;
}
function _isEntityBlocklisted(entityObj, options) {
const blocklist = options.blocklist;
Logger.trace({ blocklist: blocklist }, 'checking to see what blocklist looks like');
if (_.includes(blocklist, entityObj.value.toLowerCase())) {
return true;
}
if (entityObj.isDomain) {
if (domainBlocklistRegex !== null) {
if (domainBlocklistRegex.test(entityObj.value)) {
Logger.debug({ domain: entityObj.value }, 'Blocked BlockListed Domain Lookup');
return true;
}
}
}
return false;
}
function _getRequestOptions(entityList, options) {
let requestOptions = {
uri: BASE_URI,
qs: {
api_username: options.apiName,
api_key: options.apiKey,
domain: entityList.join(',')
},
method: 'POST',
json: true
};
return requestOptions;
}
function _lookupEntityInvestigate(entityList, entityLookup, options, cb) {
const lookupResults = [];
const requestOptions = _getRequestOptions(entityList, options);
Logger.debug({ requestOptions }, 'Request Options');
requestWithDefaults(requestOptions, function (err, response, body) {
const errorObject = _isApiError(err, response, body, entityList);
if (errorObject) {
return cb(errorObject);
}
if (_isLookupMiss(response, body)) {
entityList.forEach((entity) => {
lookupResults.push({
entity: entityLookup[entity],
data: null
});
});
Logger.debug('Body is null');
return cb(null, lookupResults);
}
if (body.response.limit_exceeded === true) {
return cb('API Limit Exceeded');
}
body.response.results.forEach((result) => {
let lookupEntity = _getEntityObjFromResult(entityLookup, result);
Logger.trace({ result }, 'lookup result');
if (lookupEntity) {
if (typeof result.domain_risk.risk_score === 'undefined' || result.domain_risk.risk_score < options.minScore) {
lookupResults.push({
entity: lookupEntity,
data: null
});
} else {
lookupResults.push({
entity: lookupEntity,
data: {
summary: [],
details: {
result: {
...result,
domain_risk: {
...result.domain_risk,
components: result.domain_risk.components.filter(
({ risk_score }) => risk_score && risk_score >= options.minScore
)
}
},
maxPivot: options.maxPivot,
entityUri: WEB_EXTERNAL_URI + result.domain,
baseUri: WEB_EXTERNAL_URI
}
}
});
}
}
});
// Any domains that didn't have a hit will be listed in the `missing_domains` array property
body.response.missing_domains.forEach((missingDomain) => {
let lookupEntity = entityLookup[missingDomain];
if (lookupEntity) {
lookupResults.push({
entity: lookupEntity,
data: null
});
}
});
cb(null, lookupResults);
});
}
/**
* In general we can match up the result domain with our entity object by using the result.domain field.
* However, in cases where the domain is internationalized (tld is prepended with `xn--`), the result.domain
* field will have the unicode representatino of the domain which will not match our lookup entity. In this
* case we need to parse the `whois_url` which will have the form of:
*
* "https://whois.domaintools.com/<domain-in-plain-text-format>"
*
* We can grab the domain in plain text format here and then match it up in our entityLookup to get the
* entity object that the result maps to.
*
* @param entityLookup
* @param result
* @returns {*}
* @private
*/
function _getEntityObjFromResult(entityLookup, result) {
let entity = entityLookup[result.domain];
if (entity) {
return entity;
}
let tokens = result.whois_url.split('/');
return entityLookup[tokens[tokens.length - 1]];
}
function _isLookupMiss(response, body) {
return (
response.statusCode === 404 ||
response.statusCode === 500 ||
response.statusCode === 400 ||
response.statusCode === 503 ||
typeof body === 'undefined' ||
_.isNull(body) ||
_.isEmpty(body.response) ||
body.response.results_count === 0
);
}
function _isApiError(err, response, body, entityLookupList) {
if (err) {
return {
detail: 'Error executing HTTP request',
error: err
};
}
// Any code that is not 200 and not 404 (missed response) or 400, we treat as an error
if (response.statusCode !== 200 && response.statusCode !== 404 && response.statusCode !== 400) {
return _createJsonErrorPayload(
'Unexpected HTTP Status Code',
null,
response.statusCode,
'1',
'Unexpected HTTP Status Code',
{
err: err,
body: body,
entityValue: entityLookupList
}
);
}
return null;
}
function validateOptions(userOptions, cb) {
let errors = [];
if (
typeof userOptions.apiKey.value !== 'string' ||
(typeof userOptions.apiKey.value === 'string' && userOptions.apiKey.value.length === 0)
) {
errors.push({
key: 'apiKey',
message: 'You must provide a DomainTools API key'
});
}
if (
typeof userOptions.apiName.value !== 'string' ||
(typeof userOptions.apiName.value === 'string' && userOptions.apiName.value.length === 0)
) {
errors.push({
key: 'apiName',
message: 'You must provide a DomainTools API Username'
});
}
if (typeof userOptions.domainBlocklistRegex.value === 'string' && userOptions.domainBlocklistRegex.value.length > 0) {
try {
new RegExp(userOptions.domainBlocklistRegex.value);
} catch (error) {
errors.push({
key: 'domainBlocklistRegex',
message: error.toString()
});
}
}
cb(null, errors);
}
// function that takes the ErrorObject and passes the error message to the notification window
function _createJsonErrorPayload(msg, pointer, httpCode, code, title, meta) |
// function that creates the Json object to be passed to the payload
function _createJsonErrorObject(msg, pointer, httpCode, code, title, meta) {
let error = {
detail: msg,
status: httpCode.toString(),
title: title,
code: 'IRIS_' + code.toString()
};
if (pointer) {
error.source = {
pointer: pointer
};
}
if (meta) {
error.meta = meta;
}
return error;
}
function startup(logger) {
Logger = logger;
let defaults = {};
if (typeof config.request.cert === 'string' && config.request.cert.length > 0) {
defaults.cert = fs.readFileSync(config.request.cert);
}
if (typeof config.request.key === 'string' && config.request.key.length > 0) {
defaults.key = fs.readFileSync(config.request.key);
}
if (typeof config.request.passphrase === 'string' && config.request.passphrase.length > 0) {
defaults.passphrase = config.request.passphrase;
}
if (typeof config.request.ca === 'string' && config.request.ca.length > 0) {
defaults.ca = fs.readFileSync(config.request.ca);
}
if (typeof config.request.proxy === 'string' && config.request.proxy.length > 0) {
defaults.proxy = config.request.proxy;
}
if (typeof config.request.rejectUnauthorized === 'boolean') {
defaults.rejectUnauthorized = config.request.rejectUnauthorized;
}
requestWithDefaults = request.defaults(defaults);
}
module.exports = {
doLookup: doLookup,
startup: startup,
validateOptions: validateOptions
};
| {
return {
errors: [_createJsonErrorObject(msg, pointer, httpCode, code, title, meta)]
};
} | identifier_body |
integration.js | 'use strict';
const request = require('postman-request');
const _ = require('lodash');
const async = require('async');
const config = require('./config/config');
const fs = require('fs');
let Logger;
let requestWithDefaults;
let previousDomainRegexAsString = '';
let domainBlocklistRegex = null;
const BASE_URI = 'https://api.domaintools.com/v1/iris-investigate';
const MAX_DOMAIN_LABEL_LENGTH = 63;
const MAX_ENTITY_LENGTH = 100;
const MAX_ENTITIES_TO_BULK_LOOKUP = 30;
const WEB_EXTERNAL_URI = 'https://research.domaintools.com/iris/search/?q=';
function _setupRegexBlocklists(options) {
if (options.domainBlocklistRegex !== previousDomainRegexAsString && options.domainBlocklistRegex.length === 0) {
Logger.debug('Removing Domain Blocklist Regex Filtering');
previousDomainRegexAsString = '';
domainBlocklistRegex = null;
} else {
if (options.domainBlocklistRegex !== previousDomainRegexAsString) {
previousDomainRegexAsString = options.domainBlocklistRegex;
Logger.debug({ domainBlocklistRegex: previousDomainRegexAsString }, 'Modifying Domain Blocklist Regex');
domainBlocklistRegex = new RegExp(options.domainBlocklistRegex, 'i');
}
}
}
function chunk(arr, chunkSize) {
const R = [];
for (let i = 0, len = arr.length; i < len; i += chunkSize) |
return R;
}
function doLookup(entities, options, cb) {
let lookupResults = [];
let entityLookup = {};
let entityLists = [];
_setupRegexBlocklists(options);
entities.forEach((entityObj) => {
if (_isInvalidEntity(entityObj) || _isEntityBlocklisted(entityObj, options)) {
return;
}
entityLookup[entityObj.value.toLowerCase()] = entityObj;
entityLists.push(entityObj.value.toLowerCase());
});
entityLists = chunk(entityLists, MAX_ENTITIES_TO_BULK_LOOKUP);
Logger.debug({ entityLists }, 'Entity Lists');
async.each(
entityLists,
(entityList, next) => {
_lookupEntityInvestigate(entityList, entityLookup, options, function (err, results) {
if (err) {
next(err);
} else {
lookupResults = lookupResults.concat(results);
next(null);
}
});
},
function (err) {
cb(err, lookupResults);
}
);
}
function _isInvalidEntity(entityObj) {
// DomaintTools API does not accept entities over 100 characters long so if we get any of those we don't look them up
if (entityObj.value.length > MAX_ENTITY_LENGTH) {
return true;
}
// Domain labels (the parts in between the periods, must be 63 characters or less
if (entityObj.isDomain) {
const invalidLabel = entityObj.value.split('.').find((label) => {
return label.length > MAX_DOMAIN_LABEL_LENGTH;
});
if (typeof invalidLabel !== 'undefined') {
return true;
}
}
return false;
}
function _isEntityBlocklisted(entityObj, options) {
const blocklist = options.blocklist;
Logger.trace({ blocklist: blocklist }, 'checking to see what blocklist looks like');
if (_.includes(blocklist, entityObj.value.toLowerCase())) {
return true;
}
if (entityObj.isDomain) {
if (domainBlocklistRegex !== null) {
if (domainBlocklistRegex.test(entityObj.value)) {
Logger.debug({ domain: entityObj.value }, 'Blocked BlockListed Domain Lookup');
return true;
}
}
}
return false;
}
function _getRequestOptions(entityList, options) {
let requestOptions = {
uri: BASE_URI,
qs: {
api_username: options.apiName,
api_key: options.apiKey,
domain: entityList.join(',')
},
method: 'POST',
json: true
};
return requestOptions;
}
function _lookupEntityInvestigate(entityList, entityLookup, options, cb) {
const lookupResults = [];
const requestOptions = _getRequestOptions(entityList, options);
Logger.debug({ requestOptions }, 'Request Options');
requestWithDefaults(requestOptions, function (err, response, body) {
const errorObject = _isApiError(err, response, body, entityList);
if (errorObject) {
return cb(errorObject);
}
if (_isLookupMiss(response, body)) {
entityList.forEach((entity) => {
lookupResults.push({
entity: entityLookup[entity],
data: null
});
});
Logger.debug('Body is null');
return cb(null, lookupResults);
}
if (body.response.limit_exceeded === true) {
return cb('API Limit Exceeded');
}
body.response.results.forEach((result) => {
let lookupEntity = _getEntityObjFromResult(entityLookup, result);
Logger.trace({ result }, 'lookup result');
if (lookupEntity) {
if (typeof result.domain_risk.risk_score === 'undefined' || result.domain_risk.risk_score < options.minScore) {
lookupResults.push({
entity: lookupEntity,
data: null
});
} else {
lookupResults.push({
entity: lookupEntity,
data: {
summary: [],
details: {
result: {
...result,
domain_risk: {
...result.domain_risk,
components: result.domain_risk.components.filter(
({ risk_score }) => risk_score && risk_score >= options.minScore
)
}
},
maxPivot: options.maxPivot,
entityUri: WEB_EXTERNAL_URI + result.domain,
baseUri: WEB_EXTERNAL_URI
}
}
});
}
}
});
// Any domains that didn't have a hit will be listed in the `missing_domains` array property
body.response.missing_domains.forEach((missingDomain) => {
let lookupEntity = entityLookup[missingDomain];
if (lookupEntity) {
lookupResults.push({
entity: lookupEntity,
data: null
});
}
});
cb(null, lookupResults);
});
}
/**
* In general we can match up the result domain with our entity object by using the result.domain field.
* However, in cases where the domain is internationalized (tld is prepended with `xn--`), the result.domain
* field will have the unicode representatino of the domain which will not match our lookup entity. In this
* case we need to parse the `whois_url` which will have the form of:
*
* "https://whois.domaintools.com/<domain-in-plain-text-format>"
*
* We can grab the domain in plain text format here and then match it up in our entityLookup to get the
* entity object that the result maps to.
*
* @param entityLookup
* @param result
* @returns {*}
* @private
*/
function _getEntityObjFromResult(entityLookup, result) {
let entity = entityLookup[result.domain];
if (entity) {
return entity;
}
let tokens = result.whois_url.split('/');
return entityLookup[tokens[tokens.length - 1]];
}
function _isLookupMiss(response, body) {
return (
response.statusCode === 404 ||
response.statusCode === 500 ||
response.statusCode === 400 ||
response.statusCode === 503 ||
typeof body === 'undefined' ||
_.isNull(body) ||
_.isEmpty(body.response) ||
body.response.results_count === 0
);
}
function _isApiError(err, response, body, entityLookupList) {
if (err) {
return {
detail: 'Error executing HTTP request',
error: err
};
}
// Any code that is not 200 and not 404 (missed response) or 400, we treat as an error
if (response.statusCode !== 200 && response.statusCode !== 404 && response.statusCode !== 400) {
return _createJsonErrorPayload(
'Unexpected HTTP Status Code',
null,
response.statusCode,
'1',
'Unexpected HTTP Status Code',
{
err: err,
body: body,
entityValue: entityLookupList
}
);
}
return null;
}
function validateOptions(userOptions, cb) {
let errors = [];
if (
typeof userOptions.apiKey.value !== 'string' ||
(typeof userOptions.apiKey.value === 'string' && userOptions.apiKey.value.length === 0)
) {
errors.push({
key: 'apiKey',
message: 'You must provide a DomainTools API key'
});
}
if (
typeof userOptions.apiName.value !== 'string' ||
(typeof userOptions.apiName.value === 'string' && userOptions.apiName.value.length === 0)
) {
errors.push({
key: 'apiName',
message: 'You must provide a DomainTools API Username'
});
}
if (typeof userOptions.domainBlocklistRegex.value === 'string' && userOptions.domainBlocklistRegex.value.length > 0) {
try {
new RegExp(userOptions.domainBlocklistRegex.value);
} catch (error) {
errors.push({
key: 'domainBlocklistRegex',
message: error.toString()
});
}
}
cb(null, errors);
}
// function that takes the ErrorObject and passes the error message to the notification window
function _createJsonErrorPayload(msg, pointer, httpCode, code, title, meta) {
return {
errors: [_createJsonErrorObject(msg, pointer, httpCode, code, title, meta)]
};
}
// function that creates the Json object to be passed to the payload
function _createJsonErrorObject(msg, pointer, httpCode, code, title, meta) {
let error = {
detail: msg,
status: httpCode.toString(),
title: title,
code: 'IRIS_' + code.toString()
};
if (pointer) {
error.source = {
pointer: pointer
};
}
if (meta) {
error.meta = meta;
}
return error;
}
function startup(logger) {
Logger = logger;
let defaults = {};
if (typeof config.request.cert === 'string' && config.request.cert.length > 0) {
defaults.cert = fs.readFileSync(config.request.cert);
}
if (typeof config.request.key === 'string' && config.request.key.length > 0) {
defaults.key = fs.readFileSync(config.request.key);
}
if (typeof config.request.passphrase === 'string' && config.request.passphrase.length > 0) {
defaults.passphrase = config.request.passphrase;
}
if (typeof config.request.ca === 'string' && config.request.ca.length > 0) {
defaults.ca = fs.readFileSync(config.request.ca);
}
if (typeof config.request.proxy === 'string' && config.request.proxy.length > 0) {
defaults.proxy = config.request.proxy;
}
if (typeof config.request.rejectUnauthorized === 'boolean') {
defaults.rejectUnauthorized = config.request.rejectUnauthorized;
}
requestWithDefaults = request.defaults(defaults);
}
module.exports = {
doLookup: doLookup,
startup: startup,
validateOptions: validateOptions
};
| {
R.push(arr.slice(i, i + chunkSize));
} | conditional_block |
integration.js | 'use strict';
const request = require('postman-request');
const _ = require('lodash');
const async = require('async');
const config = require('./config/config');
const fs = require('fs');
let Logger;
let requestWithDefaults;
let previousDomainRegexAsString = '';
let domainBlocklistRegex = null;
const BASE_URI = 'https://api.domaintools.com/v1/iris-investigate';
const MAX_DOMAIN_LABEL_LENGTH = 63;
const MAX_ENTITY_LENGTH = 100;
const MAX_ENTITIES_TO_BULK_LOOKUP = 30;
const WEB_EXTERNAL_URI = 'https://research.domaintools.com/iris/search/?q=';
function _setupRegexBlocklists(options) {
if (options.domainBlocklistRegex !== previousDomainRegexAsString && options.domainBlocklistRegex.length === 0) {
Logger.debug('Removing Domain Blocklist Regex Filtering');
previousDomainRegexAsString = '';
domainBlocklistRegex = null;
} else {
if (options.domainBlocklistRegex !== previousDomainRegexAsString) {
previousDomainRegexAsString = options.domainBlocklistRegex;
Logger.debug({ domainBlocklistRegex: previousDomainRegexAsString }, 'Modifying Domain Blocklist Regex');
domainBlocklistRegex = new RegExp(options.domainBlocklistRegex, 'i');
}
}
}
function chunk(arr, chunkSize) {
const R = [];
for (let i = 0, len = arr.length; i < len; i += chunkSize) {
R.push(arr.slice(i, i + chunkSize));
}
return R;
}
function doLookup(entities, options, cb) {
let lookupResults = [];
let entityLookup = {};
let entityLists = [];
_setupRegexBlocklists(options);
entities.forEach((entityObj) => {
if (_isInvalidEntity(entityObj) || _isEntityBlocklisted(entityObj, options)) {
return;
}
entityLookup[entityObj.value.toLowerCase()] = entityObj;
entityLists.push(entityObj.value.toLowerCase());
});
entityLists = chunk(entityLists, MAX_ENTITIES_TO_BULK_LOOKUP);
Logger.debug({ entityLists }, 'Entity Lists');
async.each(
entityLists,
(entityList, next) => {
_lookupEntityInvestigate(entityList, entityLookup, options, function (err, results) {
if (err) {
next(err);
} else {
lookupResults = lookupResults.concat(results);
next(null);
}
});
},
function (err) {
cb(err, lookupResults);
}
);
}
function | (entityObj) {
// DomaintTools API does not accept entities over 100 characters long so if we get any of those we don't look them up
if (entityObj.value.length > MAX_ENTITY_LENGTH) {
return true;
}
// Domain labels (the parts in between the periods, must be 63 characters or less
if (entityObj.isDomain) {
const invalidLabel = entityObj.value.split('.').find((label) => {
return label.length > MAX_DOMAIN_LABEL_LENGTH;
});
if (typeof invalidLabel !== 'undefined') {
return true;
}
}
return false;
}
function _isEntityBlocklisted(entityObj, options) {
const blocklist = options.blocklist;
Logger.trace({ blocklist: blocklist }, 'checking to see what blocklist looks like');
if (_.includes(blocklist, entityObj.value.toLowerCase())) {
return true;
}
if (entityObj.isDomain) {
if (domainBlocklistRegex !== null) {
if (domainBlocklistRegex.test(entityObj.value)) {
Logger.debug({ domain: entityObj.value }, 'Blocked BlockListed Domain Lookup');
return true;
}
}
}
return false;
}
function _getRequestOptions(entityList, options) {
let requestOptions = {
uri: BASE_URI,
qs: {
api_username: options.apiName,
api_key: options.apiKey,
domain: entityList.join(',')
},
method: 'POST',
json: true
};
return requestOptions;
}
function _lookupEntityInvestigate(entityList, entityLookup, options, cb) {
const lookupResults = [];
const requestOptions = _getRequestOptions(entityList, options);
Logger.debug({ requestOptions }, 'Request Options');
requestWithDefaults(requestOptions, function (err, response, body) {
const errorObject = _isApiError(err, response, body, entityList);
if (errorObject) {
return cb(errorObject);
}
if (_isLookupMiss(response, body)) {
entityList.forEach((entity) => {
lookupResults.push({
entity: entityLookup[entity],
data: null
});
});
Logger.debug('Body is null');
return cb(null, lookupResults);
}
if (body.response.limit_exceeded === true) {
return cb('API Limit Exceeded');
}
body.response.results.forEach((result) => {
let lookupEntity = _getEntityObjFromResult(entityLookup, result);
Logger.trace({ result }, 'lookup result');
if (lookupEntity) {
if (typeof result.domain_risk.risk_score === 'undefined' || result.domain_risk.risk_score < options.minScore) {
lookupResults.push({
entity: lookupEntity,
data: null
});
} else {
lookupResults.push({
entity: lookupEntity,
data: {
summary: [],
details: {
result: {
...result,
domain_risk: {
...result.domain_risk,
components: result.domain_risk.components.filter(
({ risk_score }) => risk_score && risk_score >= options.minScore
)
}
},
maxPivot: options.maxPivot,
entityUri: WEB_EXTERNAL_URI + result.domain,
baseUri: WEB_EXTERNAL_URI
}
}
});
}
}
});
// Any domains that didn't have a hit will be listed in the `missing_domains` array property
body.response.missing_domains.forEach((missingDomain) => {
let lookupEntity = entityLookup[missingDomain];
if (lookupEntity) {
lookupResults.push({
entity: lookupEntity,
data: null
});
}
});
cb(null, lookupResults);
});
}
/**
* In general we can match up the result domain with our entity object by using the result.domain field.
* However, in cases where the domain is internationalized (tld is prepended with `xn--`), the result.domain
* field will have the unicode representatino of the domain which will not match our lookup entity. In this
* case we need to parse the `whois_url` which will have the form of:
*
* "https://whois.domaintools.com/<domain-in-plain-text-format>"
*
* We can grab the domain in plain text format here and then match it up in our entityLookup to get the
* entity object that the result maps to.
*
* @param entityLookup
* @param result
* @returns {*}
* @private
*/
function _getEntityObjFromResult(entityLookup, result) {
let entity = entityLookup[result.domain];
if (entity) {
return entity;
}
let tokens = result.whois_url.split('/');
return entityLookup[tokens[tokens.length - 1]];
}
function _isLookupMiss(response, body) {
return (
response.statusCode === 404 ||
response.statusCode === 500 ||
response.statusCode === 400 ||
response.statusCode === 503 ||
typeof body === 'undefined' ||
_.isNull(body) ||
_.isEmpty(body.response) ||
body.response.results_count === 0
);
}
function _isApiError(err, response, body, entityLookupList) {
if (err) {
return {
detail: 'Error executing HTTP request',
error: err
};
}
// Any code that is not 200 and not 404 (missed response) or 400, we treat as an error
if (response.statusCode !== 200 && response.statusCode !== 404 && response.statusCode !== 400) {
return _createJsonErrorPayload(
'Unexpected HTTP Status Code',
null,
response.statusCode,
'1',
'Unexpected HTTP Status Code',
{
err: err,
body: body,
entityValue: entityLookupList
}
);
}
return null;
}
function validateOptions(userOptions, cb) {
let errors = [];
if (
typeof userOptions.apiKey.value !== 'string' ||
(typeof userOptions.apiKey.value === 'string' && userOptions.apiKey.value.length === 0)
) {
errors.push({
key: 'apiKey',
message: 'You must provide a DomainTools API key'
});
}
if (
typeof userOptions.apiName.value !== 'string' ||
(typeof userOptions.apiName.value === 'string' && userOptions.apiName.value.length === 0)
) {
errors.push({
key: 'apiName',
message: 'You must provide a DomainTools API Username'
});
}
if (typeof userOptions.domainBlocklistRegex.value === 'string' && userOptions.domainBlocklistRegex.value.length > 0) {
try {
new RegExp(userOptions.domainBlocklistRegex.value);
} catch (error) {
errors.push({
key: 'domainBlocklistRegex',
message: error.toString()
});
}
}
cb(null, errors);
}
// function that takes the ErrorObject and passes the error message to the notification window
function _createJsonErrorPayload(msg, pointer, httpCode, code, title, meta) {
return {
errors: [_createJsonErrorObject(msg, pointer, httpCode, code, title, meta)]
};
}
// function that creates the Json object to be passed to the payload
function _createJsonErrorObject(msg, pointer, httpCode, code, title, meta) {
let error = {
detail: msg,
status: httpCode.toString(),
title: title,
code: 'IRIS_' + code.toString()
};
if (pointer) {
error.source = {
pointer: pointer
};
}
if (meta) {
error.meta = meta;
}
return error;
}
function startup(logger) {
Logger = logger;
let defaults = {};
if (typeof config.request.cert === 'string' && config.request.cert.length > 0) {
defaults.cert = fs.readFileSync(config.request.cert);
}
if (typeof config.request.key === 'string' && config.request.key.length > 0) {
defaults.key = fs.readFileSync(config.request.key);
}
if (typeof config.request.passphrase === 'string' && config.request.passphrase.length > 0) {
defaults.passphrase = config.request.passphrase;
}
if (typeof config.request.ca === 'string' && config.request.ca.length > 0) {
defaults.ca = fs.readFileSync(config.request.ca);
}
if (typeof config.request.proxy === 'string' && config.request.proxy.length > 0) {
defaults.proxy = config.request.proxy;
}
if (typeof config.request.rejectUnauthorized === 'boolean') {
defaults.rejectUnauthorized = config.request.rejectUnauthorized;
}
requestWithDefaults = request.defaults(defaults);
}
module.exports = {
doLookup: doLookup,
startup: startup,
validateOptions: validateOptions
};
| _isInvalidEntity | identifier_name |
integration.js | 'use strict';
const request = require('postman-request');
const _ = require('lodash');
const async = require('async');
const config = require('./config/config');
const fs = require('fs');
let Logger;
let requestWithDefaults;
let previousDomainRegexAsString = '';
let domainBlocklistRegex = null;
const BASE_URI = 'https://api.domaintools.com/v1/iris-investigate';
const MAX_DOMAIN_LABEL_LENGTH = 63;
const MAX_ENTITY_LENGTH = 100;
const MAX_ENTITIES_TO_BULK_LOOKUP = 30;
const WEB_EXTERNAL_URI = 'https://research.domaintools.com/iris/search/?q=';
function _setupRegexBlocklists(options) {
if (options.domainBlocklistRegex !== previousDomainRegexAsString && options.domainBlocklistRegex.length === 0) {
Logger.debug('Removing Domain Blocklist Regex Filtering');
previousDomainRegexAsString = '';
domainBlocklistRegex = null;
} else {
if (options.domainBlocklistRegex !== previousDomainRegexAsString) {
previousDomainRegexAsString = options.domainBlocklistRegex;
Logger.debug({ domainBlocklistRegex: previousDomainRegexAsString }, 'Modifying Domain Blocklist Regex');
domainBlocklistRegex = new RegExp(options.domainBlocklistRegex, 'i');
}
}
} |
function chunk(arr, chunkSize) {
const R = [];
for (let i = 0, len = arr.length; i < len; i += chunkSize) {
R.push(arr.slice(i, i + chunkSize));
}
return R;
}
function doLookup(entities, options, cb) {
let lookupResults = [];
let entityLookup = {};
let entityLists = [];
_setupRegexBlocklists(options);
entities.forEach((entityObj) => {
if (_isInvalidEntity(entityObj) || _isEntityBlocklisted(entityObj, options)) {
return;
}
entityLookup[entityObj.value.toLowerCase()] = entityObj;
entityLists.push(entityObj.value.toLowerCase());
});
entityLists = chunk(entityLists, MAX_ENTITIES_TO_BULK_LOOKUP);
Logger.debug({ entityLists }, 'Entity Lists');
async.each(
entityLists,
(entityList, next) => {
_lookupEntityInvestigate(entityList, entityLookup, options, function (err, results) {
if (err) {
next(err);
} else {
lookupResults = lookupResults.concat(results);
next(null);
}
});
},
function (err) {
cb(err, lookupResults);
}
);
}
function _isInvalidEntity(entityObj) {
// DomaintTools API does not accept entities over 100 characters long so if we get any of those we don't look them up
if (entityObj.value.length > MAX_ENTITY_LENGTH) {
return true;
}
// Domain labels (the parts in between the periods, must be 63 characters or less
if (entityObj.isDomain) {
const invalidLabel = entityObj.value.split('.').find((label) => {
return label.length > MAX_DOMAIN_LABEL_LENGTH;
});
if (typeof invalidLabel !== 'undefined') {
return true;
}
}
return false;
}
function _isEntityBlocklisted(entityObj, options) {
const blocklist = options.blocklist;
Logger.trace({ blocklist: blocklist }, 'checking to see what blocklist looks like');
if (_.includes(blocklist, entityObj.value.toLowerCase())) {
return true;
}
if (entityObj.isDomain) {
if (domainBlocklistRegex !== null) {
if (domainBlocklistRegex.test(entityObj.value)) {
Logger.debug({ domain: entityObj.value }, 'Blocked BlockListed Domain Lookup');
return true;
}
}
}
return false;
}
function _getRequestOptions(entityList, options) {
let requestOptions = {
uri: BASE_URI,
qs: {
api_username: options.apiName,
api_key: options.apiKey,
domain: entityList.join(',')
},
method: 'POST',
json: true
};
return requestOptions;
}
function _lookupEntityInvestigate(entityList, entityLookup, options, cb) {
const lookupResults = [];
const requestOptions = _getRequestOptions(entityList, options);
Logger.debug({ requestOptions }, 'Request Options');
requestWithDefaults(requestOptions, function (err, response, body) {
const errorObject = _isApiError(err, response, body, entityList);
if (errorObject) {
return cb(errorObject);
}
if (_isLookupMiss(response, body)) {
entityList.forEach((entity) => {
lookupResults.push({
entity: entityLookup[entity],
data: null
});
});
Logger.debug('Body is null');
return cb(null, lookupResults);
}
if (body.response.limit_exceeded === true) {
return cb('API Limit Exceeded');
}
body.response.results.forEach((result) => {
let lookupEntity = _getEntityObjFromResult(entityLookup, result);
Logger.trace({ result }, 'lookup result');
if (lookupEntity) {
if (typeof result.domain_risk.risk_score === 'undefined' || result.domain_risk.risk_score < options.minScore) {
lookupResults.push({
entity: lookupEntity,
data: null
});
} else {
lookupResults.push({
entity: lookupEntity,
data: {
summary: [],
details: {
result: {
...result,
domain_risk: {
...result.domain_risk,
components: result.domain_risk.components.filter(
({ risk_score }) => risk_score && risk_score >= options.minScore
)
}
},
maxPivot: options.maxPivot,
entityUri: WEB_EXTERNAL_URI + result.domain,
baseUri: WEB_EXTERNAL_URI
}
}
});
}
}
});
// Any domains that didn't have a hit will be listed in the `missing_domains` array property
body.response.missing_domains.forEach((missingDomain) => {
let lookupEntity = entityLookup[missingDomain];
if (lookupEntity) {
lookupResults.push({
entity: lookupEntity,
data: null
});
}
});
cb(null, lookupResults);
});
}
/**
* In general we can match up the result domain with our entity object by using the result.domain field.
* However, in cases where the domain is internationalized (tld is prepended with `xn--`), the result.domain
* field will have the unicode representatino of the domain which will not match our lookup entity. In this
* case we need to parse the `whois_url` which will have the form of:
*
* "https://whois.domaintools.com/<domain-in-plain-text-format>"
*
* We can grab the domain in plain text format here and then match it up in our entityLookup to get the
* entity object that the result maps to.
*
* @param entityLookup
* @param result
* @returns {*}
* @private
*/
function _getEntityObjFromResult(entityLookup, result) {
let entity = entityLookup[result.domain];
if (entity) {
return entity;
}
let tokens = result.whois_url.split('/');
return entityLookup[tokens[tokens.length - 1]];
}
function _isLookupMiss(response, body) {
return (
response.statusCode === 404 ||
response.statusCode === 500 ||
response.statusCode === 400 ||
response.statusCode === 503 ||
typeof body === 'undefined' ||
_.isNull(body) ||
_.isEmpty(body.response) ||
body.response.results_count === 0
);
}
function _isApiError(err, response, body, entityLookupList) {
if (err) {
return {
detail: 'Error executing HTTP request',
error: err
};
}
// Any code that is not 200 and not 404 (missed response) or 400, we treat as an error
if (response.statusCode !== 200 && response.statusCode !== 404 && response.statusCode !== 400) {
return _createJsonErrorPayload(
'Unexpected HTTP Status Code',
null,
response.statusCode,
'1',
'Unexpected HTTP Status Code',
{
err: err,
body: body,
entityValue: entityLookupList
}
);
}
return null;
}
function validateOptions(userOptions, cb) {
let errors = [];
if (
typeof userOptions.apiKey.value !== 'string' ||
(typeof userOptions.apiKey.value === 'string' && userOptions.apiKey.value.length === 0)
) {
errors.push({
key: 'apiKey',
message: 'You must provide a DomainTools API key'
});
}
if (
typeof userOptions.apiName.value !== 'string' ||
(typeof userOptions.apiName.value === 'string' && userOptions.apiName.value.length === 0)
) {
errors.push({
key: 'apiName',
message: 'You must provide a DomainTools API Username'
});
}
if (typeof userOptions.domainBlocklistRegex.value === 'string' && userOptions.domainBlocklistRegex.value.length > 0) {
try {
new RegExp(userOptions.domainBlocklistRegex.value);
} catch (error) {
errors.push({
key: 'domainBlocklistRegex',
message: error.toString()
});
}
}
cb(null, errors);
}
// function that takes the ErrorObject and passes the error message to the notification window
function _createJsonErrorPayload(msg, pointer, httpCode, code, title, meta) {
return {
errors: [_createJsonErrorObject(msg, pointer, httpCode, code, title, meta)]
};
}
// function that creates the Json object to be passed to the payload
function _createJsonErrorObject(msg, pointer, httpCode, code, title, meta) {
let error = {
detail: msg,
status: httpCode.toString(),
title: title,
code: 'IRIS_' + code.toString()
};
if (pointer) {
error.source = {
pointer: pointer
};
}
if (meta) {
error.meta = meta;
}
return error;
}
function startup(logger) {
Logger = logger;
let defaults = {};
if (typeof config.request.cert === 'string' && config.request.cert.length > 0) {
defaults.cert = fs.readFileSync(config.request.cert);
}
if (typeof config.request.key === 'string' && config.request.key.length > 0) {
defaults.key = fs.readFileSync(config.request.key);
}
if (typeof config.request.passphrase === 'string' && config.request.passphrase.length > 0) {
defaults.passphrase = config.request.passphrase;
}
if (typeof config.request.ca === 'string' && config.request.ca.length > 0) {
defaults.ca = fs.readFileSync(config.request.ca);
}
if (typeof config.request.proxy === 'string' && config.request.proxy.length > 0) {
defaults.proxy = config.request.proxy;
}
if (typeof config.request.rejectUnauthorized === 'boolean') {
defaults.rejectUnauthorized = config.request.rejectUnauthorized;
}
requestWithDefaults = request.defaults(defaults);
}
module.exports = {
doLookup: doLookup,
startup: startup,
validateOptions: validateOptions
}; | random_line_split | |
amazon_ff_review.py | #%%
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
import json
from sklearn import metrics
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support,mean_squared_error
import time
import sqlite3
import nltk
nltk.download('punkt')
nltk.download('stopwords')
#nltk.download('vader')
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords
import string
from textblob import TextBlob
from sklearn.ensemble import RandomForestClassifier
#from textblob.sentiments import NaiveBayesAnalyzer
from functools import reduce
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
import sprint6
#%%
# con=sqlite3.connect('database.sqlite')
# #df=pd.read_sql('Select * from Reviews',con=con,chunksize=10000)
# df=pd.read_sql('Select * from Reviews',con=con)
# #data=[]
# # for chunk in df:
# # data.append(chunk)
# s=df['Score']
# #s=df['ProductId']
# sample=resample(df,n_samples=25000,replace=False,stratify=s)
#%%
# sample=data[0]
# print(type(df))
#%%
def get_sentiments_from_polarity(polarity=0,threshold=0):
|
#%%
def get_reviews():
con=sqlite3.connect('database.sqlite')
df=pd.read_sql('Select * from Reviews',con=con)
s=df['Score']
sample=resample(df,n_samples=10000,replace=False,stratify=s)
cleaned_review=pd.DataFrame(columns=['Summary','Helpful','Score','Sentiment','Sentiment_Polarity'])
return cleaned_review,sample,df
#%%
def preprocess_and_get_sentiments(cleaned_review=None,sample=None,threshold=0):
cleaned_review['Score']=sample['Score']
cleaned_review['Summary']=sample['Summary']
######################Preprocessing###########################
stop_words=set(stopwords.words("english"))
punc = list(string.punctuation)
punc.extend(["`","``","''","..."])
##################################################
i=0
sentiment_result=[]
sentiment_polarity=[]
for review in sample['Summary']:
#print(review)
##################Preprocessing########################
# review=word_tokenize(review)
# review=[word for word in review if word not in stop_words] #Removing stop words
# review=[word for word in review if word not in punc] #Removing punctuations
# if len(review)>0:
# filtered_review=reduce(lambda a,b:a+' '+b,review)
# else:
# filtered_review=''
if len(review)>0:
filtered_review=review
else:
filtered_review=''
#############################################
sentiment = TextBlob(filtered_review).sentiment
print(f'Sentiment={sentiment}')
s=get_sentiments_from_polarity(sentiment.polarity,threshold)
print(s)
sentiment_result.append(s)
print(sentiment.polarity)
sentiment_polarity.append(sentiment.polarity)
i+=1
deno=sample['HelpfulnessDenominator']
nume=sample['HelpfulnessNumerator']
sample.index=cleaned_review.index
cleaned_review['Helpful']=np.where(deno>0,nume/deno,0)
cleaned_review['Sentiment']=sentiment_result
cleaned_review['Sentiment_Polarity']=sentiment_polarity
print(cleaned_review['Sentiment'])
sample['Sentiment']=cleaned_review['Sentiment']
sample['Sentiment_Polarity']=cleaned_review['Sentiment_Polarity']
#helpful_df=predict_helpfulness(sample)
sample['Helpful_Score']= cleaned_review['Helpful'] #pd.cut(cleaned_review['Helpful'], bins = [1,2,3,4,5], include_lowest = True)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(sample.head())
#print(sample.Sentiment_Polarity)
return cleaned_review,sample
#%%
# s='nn ! 8,l'
# s=s.translate(dict.fromkeys(string.punctuation))
# print(s)
# l=list(string.punctuation)
# cleaned_review.tokenized_reviews[i]=review
# print(cleaned_review.tokenized_reviews)
# reviews=pd.DataFrame(data=rlist,columns=['tokenized_reviews'])
# print(reviews)
#%%
def concat_sentiments(cleaned_review=None,sample=None):
#%%
review_df=sample
review_df=review_df.drop(columns=['HelpfulnessDenominator','HelpfulnessNumerator','Summary','Text'],axis=1)
#%%
review_df['Sentiment']=cleaned_review['Sentiment']
#print("review df .sentiment")
#print(review_df['Sentiment'])
review_df['Sentiment']=review_df['Sentiment'].astype('category')
review_df['Sentiment']=review_df['Sentiment'].cat.codes
review_df=review_df.drop(columns=['Sentiment_Polarity'],axis=1)
review_df['ProductId']=review_df['ProductId'].astype('category')
review_df['ProductId']=review_df['ProductId'].cat.codes
#%%
review_df.dtypes
#one-hot encoding the sentiments
sentiment_df=pd.get_dummies(review_df['Sentiment'],columns=['Sentiment_Neutral','Sentiment_Negative','Sentiment_Positive'])
#print("Sentiemnt DF")
#print(sentiment_df.head())
sentiment_df.columns=['Sentiment_Neutral','Sentiment_Negative','Sentiment_Positive']#(index=str,columns={0:'Sentiment_Neutral',1:'Sentiment_Negative',2:'Sentiment_Positive'})
#%%
review_df=pd.concat([review_df,sentiment_df],axis=1)
review_df=review_df.drop(columns=['Sentiment'])
#print(len(review_df['UserId'].unique()))
#print(len(review_df['ProductId'].unique()))
return review_df
#%%
def train_model(model,X,y,n=5,upsample=False,kernelTransform=False,gamma = 0.1,kernel='rbf',sampleForSVC=False):
print("****************************")
num_rows=len(X)
metrics={}
print("****************************")
print(model)
print("****************************")
print("Total Dataset Size={} x {}".format(num_rows,10))
print(f"before split {len(X)}")
#df_train=df.drop(['Class'],axis=1)
stratify=y
y_test,y_pred,t,_=train_core(model,X,y,stratify,n)
cnf=confusion_matrix(y_test,y_pred)
# This is the Final test accuracy
s=accuracy_score(y_test,y_pred)
prec_recall=precision_recall_fscore_support(y_test,y_pred,average='weighted')
p,r,f,_=prec_recall
#prec_recall=precision_recall_fscore_support(y_test,y_pred,average='weighted')
metrics['Accuracy']=s
metrics['Error']=1-s
metrics['Precision']=p
metrics['Recall']=r
metrics['FScore']=f
metrics['Training_Time_in_s']=t
print("Score= {}".format(s))
print("Error= {}".format(1-s))
print("Training Time={}".format(t))
print("Precision,Recall,F_beta,Support {}".format(prec_recall))
#y_pred=model.predict(X_test)
return metrics,cnf
#%%
def train_core(model=None,X=None,y=None,stratify=None,n=5):
print(y.head())
#stratify=X['ProductId']
X_train, X_test, y_train, y_test = train_test_split(X, y ,stratify=stratify,test_size=0.49,shuffle=True)
print(f" after split {len(X_train)}")
kmodels=[]
kscores=[]
#nfold or kfold crossvaidation
kf=KFold(n_splits=n)
t=0
for train_index,test_index in kf.split(X_train,y_train):
#print(f"Train Index start={train_index[0]} & end= {train_index[-1]}")
#print(f"Test Index start= {test_index[0]} & end= {test_index[-1]}")
Xk_train, Xk_test = X_train[train_index[0]:], X_train[test_index[0]:]
yk_train, yk_test = y_train[train_index[0]:], y_train[test_index[0]:]
start=time.time()
m=model.fit(Xk_train,yk_train)
end=time.time()
kmodels.append(m)
yk_pred=m.predict(Xk_test)
# Note this is the cross validation accuracy and not the final test accuracy
if stratify is not None:
ks=accuracy_score(yk_test,yk_pred) #this function can be used only for multiclass classification
kscores.append(ks)
best_model_index=kscores.index(max(kscores))
else:
ks=mean_squared_error(yk_test,yk_pred)
kscores.append(ks)
best_model_index=kscores.index(min(kscores))
t=t+(end-start)
t=t/n
print(f"Cross validation scores ={kscores}")
print(f"Best model index ={best_model_index}")
best_model=kmodels[best_model_index]
print(f"Best model ={best_model}")
y_pred=best_model.predict(X_test)
return y_test,y_pred,t,best_model
#%%
def run():
cleaned_review,sample,df=get_reviews()
sample_helpful=resample(df,n_samples=10000,replace=False,stratify=df['Score'])
#print(sample.head())
#print(sample_helpful.head())
cleaned_review_helpful=cleaned_review
cleaned_review,sample=preprocess_and_get_sentiments(cleaned_review=cleaned_review,sample=sample,threshold=0.5)
review_df=concat_sentiments(cleaned_review,sample)
y=review_df['Score']
X=review_df
X=X.drop(columns=['UserId','Score','ProfileName','Time'],axis=1)
###########################################################
cleaned_review_helpful,sample_helpful=preprocess_and_get_sentiments(cleaned_review=cleaned_review_helpful,sample=sample_helpful,threshold=0.5)
review_df_helpful=concat_sentiments(cleaned_review_helpful,sample_helpful)
#y_h=review_df_helpful['Score']
#X_h=review_df_helpful
X_helpful=review_df_helpful.drop(columns=['UserId','Score','ProfileName','Time'],axis=1)
y_helpful=X_helpful['Helpful_Score']
X_helpful=X_helpful.drop(columns=['Helpful_Score'],axis=1)
helpful_df=predict_helpfulness(X_helpful,y_helpful,X)
regression_error=mean_squared_error(y_helpful,helpful_df)
###########################################################
#print(X.head)
helpful_df=pd.DataFrame(data=helpful_df,index=X.index,columns=['Helpful_Score'])
X['Helpful_Score']=helpful_df
print("After adding predicted helpful scores")
print(X.head)
print("************* Training Final Reviews Model*****************")
models={}
model=RandomForestClassifier(criterion='gini',n_estimators=10)
models.update({'Random Forest':[model,{'hyper':{'criterion': 'gini','n_estimators':'10'}}]}) #,'Max_depth':'None','Min_samples_split':'2'
metrics,cnf=train_model(model,X,y)
print(metrics)
print(cnf)
# return X
return X,models,metrics,cnf,regression_error
#%%
from sklearn.linear_model import LinearRegression
def predict_helpfulness(X=None,y=None,train_data=None):
model=LinearRegression()
print("**************Training Helpful score model*************")
_,_,_,best_model=train_core(model=model,X=X,y=y,n=5)
train_data=train_data.drop(columns=['Helpful_Score'],axis=1)
helpful_df=best_model.predict(train_data)
# get artificial features for our unkown train_data
return helpful_df
# X_backup=X
# dummy_df=pd.get_dummies(X)
#%%
df,models,metrics,cnf,regression_error=run()
for i in models.items():
m=i
#%%
def make_json(df=None,model_data=None,upsample=False,stratified_column=None,metrics=None,test_size=0.49,nfold=5):
''' Preparation of Metadata in Dictionary Format'''
if upsample:
sampling='Upsampling'
else:
sampling='Stratified Sampling stratified on '+ stratified_column
encoding={'encoding_used':'One-Hot Encoding'}
data_meta_data={'Name':'Amazon Fine Food Reviews','Rows':len(df),'Columns Before Preprocessing':10,'Columns After Preprocessing & one hot encoding':len(df.columns),'Encoding':encoding,'Classification Type':"Multi-Class","Class Variable":'Score'}
training_charc={'Hyper Parameters':m[1][1]['hyper'],'Test_size':test_size,'No. of Cross Validation Folds Used':nfold,'Sampling':sampling}
meta_data={m[0]:{'Data_Meta_Data':data_meta_data,'Training Characteristics':training_charc,"Metrics":metrics}}
jfile='meta_data_'+str(np.random.randint(1,10000,1))+'.json'
return meta_data,jfile
#%%
meta_data,jfile=make_json(df=df,model_data=m,stratified_column='Score',metrics=metrics)
sprint6.write_json(jfile,meta_data)
#%%
| if np.abs(polarity) < threshold :
return "Neutral"
elif np.sign(polarity)>0:
return "Positive"
else:
return "Negative" | identifier_body |
amazon_ff_review.py | #%%
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
import json
from sklearn import metrics
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support,mean_squared_error
import time
import sqlite3
import nltk
nltk.download('punkt')
nltk.download('stopwords')
#nltk.download('vader')
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords
import string
from textblob import TextBlob
from sklearn.ensemble import RandomForestClassifier
#from textblob.sentiments import NaiveBayesAnalyzer
from functools import reduce
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
import sprint6
#%%
# con=sqlite3.connect('database.sqlite')
# #df=pd.read_sql('Select * from Reviews',con=con,chunksize=10000)
# df=pd.read_sql('Select * from Reviews',con=con)
# #data=[]
# # for chunk in df:
# # data.append(chunk)
# s=df['Score']
# #s=df['ProductId']
# sample=resample(df,n_samples=25000,replace=False,stratify=s)
#%%
# sample=data[0]
# print(type(df))
#%%
def get_sentiments_from_polarity(polarity=0,threshold=0):
if np.abs(polarity) < threshold :
return "Neutral"
elif np.sign(polarity)>0:
return "Positive"
else:
return "Negative"
#%%
def get_reviews():
con=sqlite3.connect('database.sqlite')
df=pd.read_sql('Select * from Reviews',con=con)
s=df['Score']
sample=resample(df,n_samples=10000,replace=False,stratify=s)
cleaned_review=pd.DataFrame(columns=['Summary','Helpful','Score','Sentiment','Sentiment_Polarity'])
return cleaned_review,sample,df
#%%
def preprocess_and_get_sentiments(cleaned_review=None,sample=None,threshold=0):
cleaned_review['Score']=sample['Score']
cleaned_review['Summary']=sample['Summary']
######################Preprocessing###########################
stop_words=set(stopwords.words("english"))
punc = list(string.punctuation)
punc.extend(["`","``","''","..."])
##################################################
i=0
sentiment_result=[]
sentiment_polarity=[]
for review in sample['Summary']:
#print(review)
##################Preprocessing########################
# review=word_tokenize(review)
# review=[word for word in review if word not in stop_words] #Removing stop words
# review=[word for word in review if word not in punc] #Removing punctuations
# if len(review)>0:
# filtered_review=reduce(lambda a,b:a+' '+b,review)
# else:
# filtered_review=''
if len(review)>0:
filtered_review=review
else:
filtered_review=''
#############################################
sentiment = TextBlob(filtered_review).sentiment
print(f'Sentiment={sentiment}')
s=get_sentiments_from_polarity(sentiment.polarity,threshold)
print(s)
sentiment_result.append(s)
print(sentiment.polarity)
sentiment_polarity.append(sentiment.polarity)
i+=1
deno=sample['HelpfulnessDenominator']
nume=sample['HelpfulnessNumerator']
sample.index=cleaned_review.index
cleaned_review['Helpful']=np.where(deno>0,nume/deno,0)
cleaned_review['Sentiment']=sentiment_result
cleaned_review['Sentiment_Polarity']=sentiment_polarity
print(cleaned_review['Sentiment'])
sample['Sentiment']=cleaned_review['Sentiment']
sample['Sentiment_Polarity']=cleaned_review['Sentiment_Polarity']
#helpful_df=predict_helpfulness(sample)
sample['Helpful_Score']= cleaned_review['Helpful'] #pd.cut(cleaned_review['Helpful'], bins = [1,2,3,4,5], include_lowest = True)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(sample.head())
#print(sample.Sentiment_Polarity)
return cleaned_review,sample
#%%
# s='nn ! 8,l'
# s=s.translate(dict.fromkeys(string.punctuation))
# print(s)
# l=list(string.punctuation)
# cleaned_review.tokenized_reviews[i]=review
# print(cleaned_review.tokenized_reviews)
# reviews=pd.DataFrame(data=rlist,columns=['tokenized_reviews'])
# print(reviews)
#%%
def concat_sentiments(cleaned_review=None,sample=None):
#%%
review_df=sample
review_df=review_df.drop(columns=['HelpfulnessDenominator','HelpfulnessNumerator','Summary','Text'],axis=1)
#%%
review_df['Sentiment']=cleaned_review['Sentiment']
#print("review df .sentiment")
#print(review_df['Sentiment'])
review_df['Sentiment']=review_df['Sentiment'].astype('category')
review_df['Sentiment']=review_df['Sentiment'].cat.codes
review_df=review_df.drop(columns=['Sentiment_Polarity'],axis=1)
review_df['ProductId']=review_df['ProductId'].astype('category')
review_df['ProductId']=review_df['ProductId'].cat.codes
#%%
review_df.dtypes
#one-hot encoding the sentiments
sentiment_df=pd.get_dummies(review_df['Sentiment'],columns=['Sentiment_Neutral','Sentiment_Negative','Sentiment_Positive'])
#print("Sentiemnt DF")
#print(sentiment_df.head())
sentiment_df.columns=['Sentiment_Neutral','Sentiment_Negative','Sentiment_Positive']#(index=str,columns={0:'Sentiment_Neutral',1:'Sentiment_Negative',2:'Sentiment_Positive'})
#%%
review_df=pd.concat([review_df,sentiment_df],axis=1)
review_df=review_df.drop(columns=['Sentiment'])
#print(len(review_df['UserId'].unique()))
#print(len(review_df['ProductId'].unique()))
return review_df
#%%
def train_model(model,X,y,n=5,upsample=False,kernelTransform=False,gamma = 0.1,kernel='rbf',sampleForSVC=False):
print("****************************")
num_rows=len(X)
metrics={}
print("****************************")
print(model)
print("****************************")
print("Total Dataset Size={} x {}".format(num_rows,10))
print(f"before split {len(X)}")
#df_train=df.drop(['Class'],axis=1)
stratify=y
y_test,y_pred,t,_=train_core(model,X,y,stratify,n)
cnf=confusion_matrix(y_test,y_pred)
# This is the Final test accuracy
s=accuracy_score(y_test,y_pred)
prec_recall=precision_recall_fscore_support(y_test,y_pred,average='weighted')
p,r,f,_=prec_recall
#prec_recall=precision_recall_fscore_support(y_test,y_pred,average='weighted')
metrics['Accuracy']=s
metrics['Error']=1-s
metrics['Precision']=p
metrics['Recall']=r
metrics['FScore']=f
metrics['Training_Time_in_s']=t
print("Score= {}".format(s))
print("Error= {}".format(1-s))
print("Training Time={}".format(t))
print("Precision,Recall,F_beta,Support {}".format(prec_recall))
#y_pred=model.predict(X_test)
return metrics,cnf
#%%
def train_core(model=None,X=None,y=None,stratify=None,n=5):
print(y.head())
#stratify=X['ProductId']
X_train, X_test, y_train, y_test = train_test_split(X, y ,stratify=stratify,test_size=0.49,shuffle=True)
print(f" after split {len(X_train)}")
kmodels=[]
kscores=[]
#nfold or kfold crossvaidation
kf=KFold(n_splits=n)
t=0
for train_index,test_index in kf.split(X_train,y_train):
#print(f"Train Index start={train_index[0]} & end= {train_index[-1]}")
#print(f"Test Index start= {test_index[0]} & end= {test_index[-1]}")
Xk_train, Xk_test = X_train[train_index[0]:], X_train[test_index[0]:]
yk_train, yk_test = y_train[train_index[0]:], y_train[test_index[0]:]
start=time.time()
m=model.fit(Xk_train,yk_train)
end=time.time()
kmodels.append(m)
yk_pred=m.predict(Xk_test)
# Note this is the cross validation accuracy and not the final test accuracy
if stratify is not None:
ks=accuracy_score(yk_test,yk_pred) #this function can be used only for multiclass classification
kscores.append(ks)
best_model_index=kscores.index(max(kscores))
else:
ks=mean_squared_error(yk_test,yk_pred)
kscores.append(ks)
best_model_index=kscores.index(min(kscores))
t=t+(end-start)
t=t/n
print(f"Cross validation scores ={kscores}")
print(f"Best model index ={best_model_index}")
best_model=kmodels[best_model_index]
print(f"Best model ={best_model}")
y_pred=best_model.predict(X_test)
return y_test,y_pred,t,best_model
#%%
def run():
cleaned_review,sample,df=get_reviews()
sample_helpful=resample(df,n_samples=10000,replace=False,stratify=df['Score'])
#print(sample.head())
#print(sample_helpful.head())
cleaned_review_helpful=cleaned_review
cleaned_review,sample=preprocess_and_get_sentiments(cleaned_review=cleaned_review,sample=sample,threshold=0.5)
review_df=concat_sentiments(cleaned_review,sample)
y=review_df['Score']
X=review_df
X=X.drop(columns=['UserId','Score','ProfileName','Time'],axis=1)
###########################################################
cleaned_review_helpful,sample_helpful=preprocess_and_get_sentiments(cleaned_review=cleaned_review_helpful,sample=sample_helpful,threshold=0.5)
review_df_helpful=concat_sentiments(cleaned_review_helpful,sample_helpful)
#y_h=review_df_helpful['Score']
#X_h=review_df_helpful
X_helpful=review_df_helpful.drop(columns=['UserId','Score','ProfileName','Time'],axis=1)
y_helpful=X_helpful['Helpful_Score']
X_helpful=X_helpful.drop(columns=['Helpful_Score'],axis=1)
helpful_df=predict_helpfulness(X_helpful,y_helpful,X)
regression_error=mean_squared_error(y_helpful,helpful_df)
###########################################################
#print(X.head)
helpful_df=pd.DataFrame(data=helpful_df,index=X.index,columns=['Helpful_Score'])
X['Helpful_Score']=helpful_df
print("After adding predicted helpful scores")
print(X.head)
print("************* Training Final Reviews Model*****************")
models={}
model=RandomForestClassifier(criterion='gini',n_estimators=10)
models.update({'Random Forest':[model,{'hyper':{'criterion': 'gini','n_estimators':'10'}}]}) #,'Max_depth':'None','Min_samples_split':'2'
metrics,cnf=train_model(model,X,y)
print(metrics)
print(cnf)
# return X
return X,models,metrics,cnf,regression_error
#%%
from sklearn.linear_model import LinearRegression
def predict_helpfulness(X=None,y=None,train_data=None):
model=LinearRegression()
print("**************Training Helpful score model*************")
_,_,_,best_model=train_core(model=model,X=X,y=y,n=5)
train_data=train_data.drop(columns=['Helpful_Score'],axis=1)
helpful_df=best_model.predict(train_data)
# get artificial features for our unkown train_data
return helpful_df
# X_backup=X
# dummy_df=pd.get_dummies(X)
#%%
df,models,metrics,cnf,regression_error=run()
for i in models.items():
|
#%%
def make_json(df=None,model_data=None,upsample=False,stratified_column=None,metrics=None,test_size=0.49,nfold=5):
''' Preparation of Metadata in Dictionary Format'''
if upsample:
sampling='Upsampling'
else:
sampling='Stratified Sampling stratified on '+ stratified_column
encoding={'encoding_used':'One-Hot Encoding'}
data_meta_data={'Name':'Amazon Fine Food Reviews','Rows':len(df),'Columns Before Preprocessing':10,'Columns After Preprocessing & one hot encoding':len(df.columns),'Encoding':encoding,'Classification Type':"Multi-Class","Class Variable":'Score'}
training_charc={'Hyper Parameters':m[1][1]['hyper'],'Test_size':test_size,'No. of Cross Validation Folds Used':nfold,'Sampling':sampling}
meta_data={m[0]:{'Data_Meta_Data':data_meta_data,'Training Characteristics':training_charc,"Metrics":metrics}}
jfile='meta_data_'+str(np.random.randint(1,10000,1))+'.json'
return meta_data,jfile
#%%
meta_data,jfile=make_json(df=df,model_data=m,stratified_column='Score',metrics=metrics)
sprint6.write_json(jfile,meta_data)
#%%
| m=i | conditional_block |
amazon_ff_review.py | #%%
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
import json
from sklearn import metrics
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support,mean_squared_error
import time
import sqlite3
import nltk
nltk.download('punkt')
nltk.download('stopwords')
#nltk.download('vader')
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords
import string
from textblob import TextBlob
from sklearn.ensemble import RandomForestClassifier
#from textblob.sentiments import NaiveBayesAnalyzer
from functools import reduce
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
import sprint6
#%%
# con=sqlite3.connect('database.sqlite')
# #df=pd.read_sql('Select * from Reviews',con=con,chunksize=10000)
# df=pd.read_sql('Select * from Reviews',con=con)
# #data=[]
# # for chunk in df:
# # data.append(chunk)
# s=df['Score']
# #s=df['ProductId']
# sample=resample(df,n_samples=25000,replace=False,stratify=s)
#%%
# sample=data[0]
# print(type(df))
#%%
def get_sentiments_from_polarity(polarity=0,threshold=0):
if np.abs(polarity) < threshold :
return "Neutral"
elif np.sign(polarity)>0:
return "Positive"
else:
return "Negative"
#%%
def get_reviews():
con=sqlite3.connect('database.sqlite')
df=pd.read_sql('Select * from Reviews',con=con)
s=df['Score']
sample=resample(df,n_samples=10000,replace=False,stratify=s)
cleaned_review=pd.DataFrame(columns=['Summary','Helpful','Score','Sentiment','Sentiment_Polarity'])
return cleaned_review,sample,df
#%%
def preprocess_and_get_sentiments(cleaned_review=None,sample=None,threshold=0):
cleaned_review['Score']=sample['Score']
cleaned_review['Summary']=sample['Summary']
######################Preprocessing###########################
stop_words=set(stopwords.words("english"))
punc = list(string.punctuation)
punc.extend(["`","``","''","..."])
##################################################
i=0
sentiment_result=[]
sentiment_polarity=[]
for review in sample['Summary']:
#print(review)
##################Preprocessing########################
# review=word_tokenize(review)
# review=[word for word in review if word not in stop_words] #Removing stop words
# review=[word for word in review if word not in punc] #Removing punctuations
# if len(review)>0:
# filtered_review=reduce(lambda a,b:a+' '+b,review)
# else:
# filtered_review=''
if len(review)>0:
filtered_review=review
else:
filtered_review=''
#############################################
sentiment = TextBlob(filtered_review).sentiment
print(f'Sentiment={sentiment}')
s=get_sentiments_from_polarity(sentiment.polarity,threshold)
print(s)
sentiment_result.append(s)
print(sentiment.polarity)
sentiment_polarity.append(sentiment.polarity)
i+=1
deno=sample['HelpfulnessDenominator']
nume=sample['HelpfulnessNumerator']
sample.index=cleaned_review.index
cleaned_review['Helpful']=np.where(deno>0,nume/deno,0)
cleaned_review['Sentiment']=sentiment_result
cleaned_review['Sentiment_Polarity']=sentiment_polarity
print(cleaned_review['Sentiment'])
sample['Sentiment']=cleaned_review['Sentiment']
sample['Sentiment_Polarity']=cleaned_review['Sentiment_Polarity']
#helpful_df=predict_helpfulness(sample)
sample['Helpful_Score']= cleaned_review['Helpful'] #pd.cut(cleaned_review['Helpful'], bins = [1,2,3,4,5], include_lowest = True)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(sample.head())
#print(sample.Sentiment_Polarity)
return cleaned_review,sample
#%%
# s='nn ! 8,l'
# s=s.translate(dict.fromkeys(string.punctuation))
# print(s)
# l=list(string.punctuation)
# cleaned_review.tokenized_reviews[i]=review
# print(cleaned_review.tokenized_reviews)
# reviews=pd.DataFrame(data=rlist,columns=['tokenized_reviews'])
# print(reviews)
#%%
def concat_sentiments(cleaned_review=None,sample=None):
#%%
review_df=sample
review_df=review_df.drop(columns=['HelpfulnessDenominator','HelpfulnessNumerator','Summary','Text'],axis=1)
#%%
review_df['Sentiment']=cleaned_review['Sentiment']
#print("review df .sentiment")
#print(review_df['Sentiment'])
review_df['Sentiment']=review_df['Sentiment'].astype('category')
review_df['Sentiment']=review_df['Sentiment'].cat.codes
review_df=review_df.drop(columns=['Sentiment_Polarity'],axis=1)
review_df['ProductId']=review_df['ProductId'].astype('category')
review_df['ProductId']=review_df['ProductId'].cat.codes
#%%
review_df.dtypes
#one-hot encoding the sentiments
sentiment_df=pd.get_dummies(review_df['Sentiment'],columns=['Sentiment_Neutral','Sentiment_Negative','Sentiment_Positive'])
#print("Sentiemnt DF")
#print(sentiment_df.head())
sentiment_df.columns=['Sentiment_Neutral','Sentiment_Negative','Sentiment_Positive']#(index=str,columns={0:'Sentiment_Neutral',1:'Sentiment_Negative',2:'Sentiment_Positive'})
#%%
review_df=pd.concat([review_df,sentiment_df],axis=1)
review_df=review_df.drop(columns=['Sentiment'])
#print(len(review_df['UserId'].unique()))
#print(len(review_df['ProductId'].unique()))
return review_df
#%%
def train_model(model,X,y,n=5,upsample=False,kernelTransform=False,gamma = 0.1,kernel='rbf',sampleForSVC=False):
print("****************************")
num_rows=len(X)
metrics={}
print("****************************")
print(model)
print("****************************")
print("Total Dataset Size={} x {}".format(num_rows,10))
print(f"before split {len(X)}")
#df_train=df.drop(['Class'],axis=1)
stratify=y
y_test,y_pred,t,_=train_core(model,X,y,stratify,n)
cnf=confusion_matrix(y_test,y_pred)
# This is the Final test accuracy
s=accuracy_score(y_test,y_pred)
prec_recall=precision_recall_fscore_support(y_test,y_pred,average='weighted')
p,r,f,_=prec_recall
#prec_recall=precision_recall_fscore_support(y_test,y_pred,average='weighted')
metrics['Accuracy']=s
metrics['Error']=1-s
metrics['Precision']=p
metrics['Recall']=r
metrics['FScore']=f
metrics['Training_Time_in_s']=t
print("Score= {}".format(s))
print("Error= {}".format(1-s))
print("Training Time={}".format(t))
print("Precision,Recall,F_beta,Support {}".format(prec_recall))
#y_pred=model.predict(X_test)
return metrics,cnf
#%%
def train_core(model=None,X=None,y=None,stratify=None,n=5):
print(y.head())
#stratify=X['ProductId']
X_train, X_test, y_train, y_test = train_test_split(X, y ,stratify=stratify,test_size=0.49,shuffle=True)
print(f" after split {len(X_train)}")
kmodels=[]
kscores=[]
#nfold or kfold crossvaidation
kf=KFold(n_splits=n)
t=0
for train_index,test_index in kf.split(X_train,y_train):
#print(f"Train Index start={train_index[0]} & end= {train_index[-1]}")
#print(f"Test Index start= {test_index[0]} & end= {test_index[-1]}")
Xk_train, Xk_test = X_train[train_index[0]:], X_train[test_index[0]:]
yk_train, yk_test = y_train[train_index[0]:], y_train[test_index[0]:]
start=time.time()
m=model.fit(Xk_train,yk_train)
end=time.time()
kmodels.append(m)
yk_pred=m.predict(Xk_test)
# Note this is the cross validation accuracy and not the final test accuracy
if stratify is not None:
ks=accuracy_score(yk_test,yk_pred) #this function can be used only for multiclass classification
kscores.append(ks)
best_model_index=kscores.index(max(kscores))
else:
ks=mean_squared_error(yk_test,yk_pred)
kscores.append(ks)
best_model_index=kscores.index(min(kscores)) |
t=t+(end-start)
t=t/n
print(f"Cross validation scores ={kscores}")
print(f"Best model index ={best_model_index}")
best_model=kmodels[best_model_index]
print(f"Best model ={best_model}")
y_pred=best_model.predict(X_test)
return y_test,y_pred,t,best_model
#%%
def run():
cleaned_review,sample,df=get_reviews()
sample_helpful=resample(df,n_samples=10000,replace=False,stratify=df['Score'])
#print(sample.head())
#print(sample_helpful.head())
cleaned_review_helpful=cleaned_review
cleaned_review,sample=preprocess_and_get_sentiments(cleaned_review=cleaned_review,sample=sample,threshold=0.5)
review_df=concat_sentiments(cleaned_review,sample)
y=review_df['Score']
X=review_df
X=X.drop(columns=['UserId','Score','ProfileName','Time'],axis=1)
###########################################################
cleaned_review_helpful,sample_helpful=preprocess_and_get_sentiments(cleaned_review=cleaned_review_helpful,sample=sample_helpful,threshold=0.5)
review_df_helpful=concat_sentiments(cleaned_review_helpful,sample_helpful)
#y_h=review_df_helpful['Score']
#X_h=review_df_helpful
X_helpful=review_df_helpful.drop(columns=['UserId','Score','ProfileName','Time'],axis=1)
y_helpful=X_helpful['Helpful_Score']
X_helpful=X_helpful.drop(columns=['Helpful_Score'],axis=1)
helpful_df=predict_helpfulness(X_helpful,y_helpful,X)
regression_error=mean_squared_error(y_helpful,helpful_df)
###########################################################
#print(X.head)
helpful_df=pd.DataFrame(data=helpful_df,index=X.index,columns=['Helpful_Score'])
X['Helpful_Score']=helpful_df
print("After adding predicted helpful scores")
print(X.head)
print("************* Training Final Reviews Model*****************")
models={}
model=RandomForestClassifier(criterion='gini',n_estimators=10)
models.update({'Random Forest':[model,{'hyper':{'criterion': 'gini','n_estimators':'10'}}]}) #,'Max_depth':'None','Min_samples_split':'2'
metrics,cnf=train_model(model,X,y)
print(metrics)
print(cnf)
# return X
return X,models,metrics,cnf,regression_error
#%%
from sklearn.linear_model import LinearRegression
def predict_helpfulness(X=None,y=None,train_data=None):
model=LinearRegression()
print("**************Training Helpful score model*************")
_,_,_,best_model=train_core(model=model,X=X,y=y,n=5)
train_data=train_data.drop(columns=['Helpful_Score'],axis=1)
helpful_df=best_model.predict(train_data)
# get artificial features for our unkown train_data
return helpful_df
# X_backup=X
# dummy_df=pd.get_dummies(X)
#%%
df,models,metrics,cnf,regression_error=run()
for i in models.items():
m=i
#%%
def make_json(df=None,model_data=None,upsample=False,stratified_column=None,metrics=None,test_size=0.49,nfold=5):
''' Preparation of Metadata in Dictionary Format'''
if upsample:
sampling='Upsampling'
else:
sampling='Stratified Sampling stratified on '+ stratified_column
encoding={'encoding_used':'One-Hot Encoding'}
data_meta_data={'Name':'Amazon Fine Food Reviews','Rows':len(df),'Columns Before Preprocessing':10,'Columns After Preprocessing & one hot encoding':len(df.columns),'Encoding':encoding,'Classification Type':"Multi-Class","Class Variable":'Score'}
training_charc={'Hyper Parameters':m[1][1]['hyper'],'Test_size':test_size,'No. of Cross Validation Folds Used':nfold,'Sampling':sampling}
meta_data={m[0]:{'Data_Meta_Data':data_meta_data,'Training Characteristics':training_charc,"Metrics":metrics}}
jfile='meta_data_'+str(np.random.randint(1,10000,1))+'.json'
return meta_data,jfile
#%%
meta_data,jfile=make_json(df=df,model_data=m,stratified_column='Score',metrics=metrics)
sprint6.write_json(jfile,meta_data)
#%% | random_line_split | |
amazon_ff_review.py | #%%
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
import json
from sklearn import metrics
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support,mean_squared_error
import time
import sqlite3
import nltk
nltk.download('punkt')
nltk.download('stopwords')
#nltk.download('vader')
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords
import string
from textblob import TextBlob
from sklearn.ensemble import RandomForestClassifier
#from textblob.sentiments import NaiveBayesAnalyzer
from functools import reduce
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
import sprint6
#%%
# con=sqlite3.connect('database.sqlite')
# #df=pd.read_sql('Select * from Reviews',con=con,chunksize=10000)
# df=pd.read_sql('Select * from Reviews',con=con)
# #data=[]
# # for chunk in df:
# # data.append(chunk)
# s=df['Score']
# #s=df['ProductId']
# sample=resample(df,n_samples=25000,replace=False,stratify=s)
#%%
# sample=data[0]
# print(type(df))
#%%
def get_sentiments_from_polarity(polarity=0,threshold=0):
if np.abs(polarity) < threshold :
return "Neutral"
elif np.sign(polarity)>0:
return "Positive"
else:
return "Negative"
#%%
def get_reviews():
con=sqlite3.connect('database.sqlite')
df=pd.read_sql('Select * from Reviews',con=con)
s=df['Score']
sample=resample(df,n_samples=10000,replace=False,stratify=s)
cleaned_review=pd.DataFrame(columns=['Summary','Helpful','Score','Sentiment','Sentiment_Polarity'])
return cleaned_review,sample,df
#%%
def | (cleaned_review=None,sample=None,threshold=0):
cleaned_review['Score']=sample['Score']
cleaned_review['Summary']=sample['Summary']
######################Preprocessing###########################
stop_words=set(stopwords.words("english"))
punc = list(string.punctuation)
punc.extend(["`","``","''","..."])
##################################################
i=0
sentiment_result=[]
sentiment_polarity=[]
for review in sample['Summary']:
#print(review)
##################Preprocessing########################
# review=word_tokenize(review)
# review=[word for word in review if word not in stop_words] #Removing stop words
# review=[word for word in review if word not in punc] #Removing punctuations
# if len(review)>0:
# filtered_review=reduce(lambda a,b:a+' '+b,review)
# else:
# filtered_review=''
if len(review)>0:
filtered_review=review
else:
filtered_review=''
#############################################
sentiment = TextBlob(filtered_review).sentiment
print(f'Sentiment={sentiment}')
s=get_sentiments_from_polarity(sentiment.polarity,threshold)
print(s)
sentiment_result.append(s)
print(sentiment.polarity)
sentiment_polarity.append(sentiment.polarity)
i+=1
deno=sample['HelpfulnessDenominator']
nume=sample['HelpfulnessNumerator']
sample.index=cleaned_review.index
cleaned_review['Helpful']=np.where(deno>0,nume/deno,0)
cleaned_review['Sentiment']=sentiment_result
cleaned_review['Sentiment_Polarity']=sentiment_polarity
print(cleaned_review['Sentiment'])
sample['Sentiment']=cleaned_review['Sentiment']
sample['Sentiment_Polarity']=cleaned_review['Sentiment_Polarity']
#helpful_df=predict_helpfulness(sample)
sample['Helpful_Score']= cleaned_review['Helpful'] #pd.cut(cleaned_review['Helpful'], bins = [1,2,3,4,5], include_lowest = True)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(sample.head())
#print(sample.Sentiment_Polarity)
return cleaned_review,sample
#%%
# s='nn ! 8,l'
# s=s.translate(dict.fromkeys(string.punctuation))
# print(s)
# l=list(string.punctuation)
# cleaned_review.tokenized_reviews[i]=review
# print(cleaned_review.tokenized_reviews)
# reviews=pd.DataFrame(data=rlist,columns=['tokenized_reviews'])
# print(reviews)
#%%
def concat_sentiments(cleaned_review=None,sample=None):
#%%
review_df=sample
review_df=review_df.drop(columns=['HelpfulnessDenominator','HelpfulnessNumerator','Summary','Text'],axis=1)
#%%
review_df['Sentiment']=cleaned_review['Sentiment']
#print("review df .sentiment")
#print(review_df['Sentiment'])
review_df['Sentiment']=review_df['Sentiment'].astype('category')
review_df['Sentiment']=review_df['Sentiment'].cat.codes
review_df=review_df.drop(columns=['Sentiment_Polarity'],axis=1)
review_df['ProductId']=review_df['ProductId'].astype('category')
review_df['ProductId']=review_df['ProductId'].cat.codes
#%%
review_df.dtypes
#one-hot encoding the sentiments
sentiment_df=pd.get_dummies(review_df['Sentiment'],columns=['Sentiment_Neutral','Sentiment_Negative','Sentiment_Positive'])
#print("Sentiemnt DF")
#print(sentiment_df.head())
sentiment_df.columns=['Sentiment_Neutral','Sentiment_Negative','Sentiment_Positive']#(index=str,columns={0:'Sentiment_Neutral',1:'Sentiment_Negative',2:'Sentiment_Positive'})
#%%
review_df=pd.concat([review_df,sentiment_df],axis=1)
review_df=review_df.drop(columns=['Sentiment'])
#print(len(review_df['UserId'].unique()))
#print(len(review_df['ProductId'].unique()))
return review_df
#%%
def train_model(model,X,y,n=5,upsample=False,kernelTransform=False,gamma = 0.1,kernel='rbf',sampleForSVC=False):
print("****************************")
num_rows=len(X)
metrics={}
print("****************************")
print(model)
print("****************************")
print("Total Dataset Size={} x {}".format(num_rows,10))
print(f"before split {len(X)}")
#df_train=df.drop(['Class'],axis=1)
stratify=y
y_test,y_pred,t,_=train_core(model,X,y,stratify,n)
cnf=confusion_matrix(y_test,y_pred)
# This is the Final test accuracy
s=accuracy_score(y_test,y_pred)
prec_recall=precision_recall_fscore_support(y_test,y_pred,average='weighted')
p,r,f,_=prec_recall
#prec_recall=precision_recall_fscore_support(y_test,y_pred,average='weighted')
metrics['Accuracy']=s
metrics['Error']=1-s
metrics['Precision']=p
metrics['Recall']=r
metrics['FScore']=f
metrics['Training_Time_in_s']=t
print("Score= {}".format(s))
print("Error= {}".format(1-s))
print("Training Time={}".format(t))
print("Precision,Recall,F_beta,Support {}".format(prec_recall))
#y_pred=model.predict(X_test)
return metrics,cnf
#%%
def train_core(model=None,X=None,y=None,stratify=None,n=5):
print(y.head())
#stratify=X['ProductId']
X_train, X_test, y_train, y_test = train_test_split(X, y ,stratify=stratify,test_size=0.49,shuffle=True)
print(f" after split {len(X_train)}")
kmodels=[]
kscores=[]
#nfold or kfold crossvaidation
kf=KFold(n_splits=n)
t=0
for train_index,test_index in kf.split(X_train,y_train):
#print(f"Train Index start={train_index[0]} & end= {train_index[-1]}")
#print(f"Test Index start= {test_index[0]} & end= {test_index[-1]}")
Xk_train, Xk_test = X_train[train_index[0]:], X_train[test_index[0]:]
yk_train, yk_test = y_train[train_index[0]:], y_train[test_index[0]:]
start=time.time()
m=model.fit(Xk_train,yk_train)
end=time.time()
kmodels.append(m)
yk_pred=m.predict(Xk_test)
# Note this is the cross validation accuracy and not the final test accuracy
if stratify is not None:
ks=accuracy_score(yk_test,yk_pred) #this function can be used only for multiclass classification
kscores.append(ks)
best_model_index=kscores.index(max(kscores))
else:
ks=mean_squared_error(yk_test,yk_pred)
kscores.append(ks)
best_model_index=kscores.index(min(kscores))
t=t+(end-start)
t=t/n
print(f"Cross validation scores ={kscores}")
print(f"Best model index ={best_model_index}")
best_model=kmodels[best_model_index]
print(f"Best model ={best_model}")
y_pred=best_model.predict(X_test)
return y_test,y_pred,t,best_model
#%%
def run():
cleaned_review,sample,df=get_reviews()
sample_helpful=resample(df,n_samples=10000,replace=False,stratify=df['Score'])
#print(sample.head())
#print(sample_helpful.head())
cleaned_review_helpful=cleaned_review
cleaned_review,sample=preprocess_and_get_sentiments(cleaned_review=cleaned_review,sample=sample,threshold=0.5)
review_df=concat_sentiments(cleaned_review,sample)
y=review_df['Score']
X=review_df
X=X.drop(columns=['UserId','Score','ProfileName','Time'],axis=1)
###########################################################
cleaned_review_helpful,sample_helpful=preprocess_and_get_sentiments(cleaned_review=cleaned_review_helpful,sample=sample_helpful,threshold=0.5)
review_df_helpful=concat_sentiments(cleaned_review_helpful,sample_helpful)
#y_h=review_df_helpful['Score']
#X_h=review_df_helpful
X_helpful=review_df_helpful.drop(columns=['UserId','Score','ProfileName','Time'],axis=1)
y_helpful=X_helpful['Helpful_Score']
X_helpful=X_helpful.drop(columns=['Helpful_Score'],axis=1)
helpful_df=predict_helpfulness(X_helpful,y_helpful,X)
regression_error=mean_squared_error(y_helpful,helpful_df)
###########################################################
#print(X.head)
helpful_df=pd.DataFrame(data=helpful_df,index=X.index,columns=['Helpful_Score'])
X['Helpful_Score']=helpful_df
print("After adding predicted helpful scores")
print(X.head)
print("************* Training Final Reviews Model*****************")
models={}
model=RandomForestClassifier(criterion='gini',n_estimators=10)
models.update({'Random Forest':[model,{'hyper':{'criterion': 'gini','n_estimators':'10'}}]}) #,'Max_depth':'None','Min_samples_split':'2'
metrics,cnf=train_model(model,X,y)
print(metrics)
print(cnf)
# return X
return X,models,metrics,cnf,regression_error
#%%
from sklearn.linear_model import LinearRegression
def predict_helpfulness(X=None,y=None,train_data=None):
model=LinearRegression()
print("**************Training Helpful score model*************")
_,_,_,best_model=train_core(model=model,X=X,y=y,n=5)
train_data=train_data.drop(columns=['Helpful_Score'],axis=1)
helpful_df=best_model.predict(train_data)
# get artificial features for our unkown train_data
return helpful_df
# X_backup=X
# dummy_df=pd.get_dummies(X)
#%%
df,models,metrics,cnf,regression_error=run()
for i in models.items():
m=i
#%%
def make_json(df=None,model_data=None,upsample=False,stratified_column=None,metrics=None,test_size=0.49,nfold=5):
''' Preparation of Metadata in Dictionary Format'''
if upsample:
sampling='Upsampling'
else:
sampling='Stratified Sampling stratified on '+ stratified_column
encoding={'encoding_used':'One-Hot Encoding'}
data_meta_data={'Name':'Amazon Fine Food Reviews','Rows':len(df),'Columns Before Preprocessing':10,'Columns After Preprocessing & one hot encoding':len(df.columns),'Encoding':encoding,'Classification Type':"Multi-Class","Class Variable":'Score'}
training_charc={'Hyper Parameters':m[1][1]['hyper'],'Test_size':test_size,'No. of Cross Validation Folds Used':nfold,'Sampling':sampling}
meta_data={m[0]:{'Data_Meta_Data':data_meta_data,'Training Characteristics':training_charc,"Metrics":metrics}}
jfile='meta_data_'+str(np.random.randint(1,10000,1))+'.json'
return meta_data,jfile
#%%
meta_data,jfile=make_json(df=df,model_data=m,stratified_column='Score',metrics=metrics)
sprint6.write_json(jfile,meta_data)
#%%
| preprocess_and_get_sentiments | identifier_name |
lib.rs | //! # Acteur Actor System
//!
//! An safe & opinionated actor-like framework written in Rust that just works. Simple, robust, fast, documented.
//!
//!<div align="center">
//! <!-- Crates version -->
//! <a href="https://crates.io/crates/acteur">
//! <img src="https://img.shields.io/crates/v/acteur.svg?style=flat-square"
//! alt="Crates.io version" />
//! </a>
//! <!-- docs.rs docs -->
//! <a href="https://docs.rs/acteur">
//! <img src="https://img.shields.io/badge/docs-latest-blue.svg?style=flat-square"
//! alt="docs.rs docs" />
//! </a>
//! </div>
//!
//! ## Status update
//!
//! #### Update 1:
//! So, I took some time to think about this framework and have intention to move it into business
//! logic + distributed framework. The idea is to make a framework that allows you to write identified
//! aggregates/models/actors without much burden.
//!
//! #### Update 2:
//! I'm playing with raft and sled in order to implement the cluster part. You can it in the file
//! playing_with_raft.rs
//!
//! ## Motivation
//!
//! Actors are cool. Many people write about them and Actix rules the benchmarks. But to write a backend system
//! spawning several servers using actors is not easy. Actually, it bring many other complexities. But actors are
//! not a bad abstraction, but they are a solution for concurrency, not for business logic organization. They
//! tangentially solve some problems and that is nice, but introduce others. So, this framework seeks to implement
//! a framework which implement something very similar to Actors but with many adaptations and niceties in order
//! to write business logic.
//!
//! Said that, Acteur is provably **not** the tool you want if:
//!
//! - You want to have a full ACID compliant system
//! - You want to fully follow the Actor model
//! - You need to scale to A LOT of traffic. In which case you will need more than one server. (I'm planning to
//! implement some multi-server clustering, but for now, only one server).
//!
//! But it may help you if you want:
//!
//! - To have a database but not incur in the cost of READ, APPLY, SAVE, and instead you want to keep object
//! instances in RAM.
//! - You don't want to deal with optimistic concurrency and you want the messages to process one by one for each
//! ID, but concurrently between IDs.
//! - You want to make an backend for an online videogame with many entities interacting at the same time but don't
//! want to go all the way with ECS.
//!
//! ## Main features of Acteur
//!
//! This actor system is a bit different than other frameworks. It work under the following premises:
//! - **High-level**: The framework is oriented to map business logic rather than task concurrency.
//! - **Simple**: The API should be small, simple and intuitive. No surprises.
//! - **Concurrent**: The system should be fast and use all available CPU cores.
//! - **Documented**: Everything must be documented with exhaustive examples.
//!
//! ### Regarding the implementation:
//!
//! - Acteur is **asynchronous** and uses `async_std` under the hood.
//! - Actors have an *ID* which its type is defined by the developer.
//! - Messages are routed to an *Actor* and an *ID* .
//! - Actor life-cycle is *automatically* managed by the framework.
//! - Messages for the same Actor & ID are *sequential*. Everything else is executed **concurrently**.
//! - Services are provided for other concurrency forms.
//! - Services **don't** have ID and are concurrent.
//! - Services can **subscribe** to messages and everyone can **publish** messages.
//! - Acteur is **global**, only one instance can exist.
//!
//! ### State of the implementation
//!
//! My main focus of work now is in adding concurrency and improving ergonomics. Features already implemented:
//!
//! - ☑️ Actor / Service is activated on first message
//! - ☑️ Actor can send messages to other actors / services
//! - ☑️ System can send messages to any actor / service
//! - ☑️ Actors / Services can optionally, respond to messages
//! - ☑️ Services: statefull or stateless, without ID (like real actors) and concurrent.
//! - ☑️ Automatic deallocation of unused actors (after 5 minutes without messages)
//! - ☑️ Services can subscribe to messages
//! - □ Actor deallocation configuration (based in RAM, Actor count or timeout)
//! - □ Clustering: Implement Raft in order to assign each actor to a different server
//!
//! ## Acteur structure
//!
//! In order to use Acteur you just need to implement the correct trait and Acteur will
//! automatically use your implementation when a message is routed to your Actor/Service.
//!
//! The main traits are:
//!
//! - [Actor](./trait.Actor.html): Represents an actor
//! - [Service](./trait.Service.html): Represents a service
//!
//! Just implement them and your Actor/Service is ready to use.
//!
//! For Actors you have two traits in order to handle messages:
//!
//! - [Receive](./trait.Receive.html): Receives a message without responding to it. The most
//! efficient way to handle messages.
//! - [Respond](./trait.Respond.html): Receives a message and allows to respond to it. Forces
//! to sender to await until the actor respond.
//!
//! For Services you have other two traits.
//!
//! - [Listen](./trait.Listen.html): Receives a message without responding to it. The most efficient way
//! to handle messages.
//! - [Serve](./trait.Serve.html): Receives a message and allows to respond to it. Forces to sender to
//! await until the actor respond.
//!
//! ### Why are you using 4 different trait instead of 1 or 2?
//!
//! I tried to merge Traits but I didn't find how to do it because:
//!
//! A) The handle method contains the ActorAssistant and ServiceAssistant types in the signatures,
//! witch have different types.
//! B) I don't like to create a response channel for EVERY message when many messages don't need a response.
//!
//! Both blocks make 4 combinations. Receive/Respond for Actors and Listen/Serve for Services.
//!
//! I'm still trying to improve the naming and ergonomics. I think the concept will remain, but the ergonomics may change a bit.
//!
//! ## Actors vs Services
//!
//! Acteur provides 2 ways of concurrency. Actors and Services.
//!
//! ### Actors
//!
//! Actors have an ID and will consume messages directed to the same Actor's ID sequentially.
//! That means that if you send 2 messages to the Actor User-32, they will be handled sequentially.
//! On the other side, if you send a message to the Actor User-32 and other to the User-52 the
//! messages will be handled concurrently.
//!
//! That means, Actors instances keep messages order for the same ID, but not between different IDs.
//!
//! ### Services
//!
//! Services, on the other side, have no ID and they are concurrent. That means that you choose
//! how many instances of the Service there will be (Acteur provides a default). Services can
//! or can't have an State, but if they have, they require to be Sync (aka Mutex<state>).
//!
//! In short. Services are more like normal Actors (or, you can think as normal web services)
//! but with some preset concurrency factor. You can have many instances and there is
//! no synchronization of any type when consuming messages. Think of them as the primitive you
//! use when you want to create something that doesn't fit the Actors model in this framework.
//!
//! ### Use cases
//!
//! Choose Actor for Entities (Users, Invoices, Players, anything which their instances are identified).
//!
//! Choose Services for Business Logic, Infrastructure, Adapters, etc (Storage, DB access, HTTP services, | //!
//! ## Subscription or Pub/Sub
//!
//! Sometime we don't want to know who should receive the message but to subscribe to a type and wait.
//! Acteur models the Pub/Sub patter with Services. Actors in Acteur can't perform subscriptions as
//! that would require the framework to know all possible IDs of all possible Actor instances in
//! order to direct the message to the correct one (or all) and it doesn't play well with the deallocation
//! of unused actors.
//!
//! If you want to send messages to some Actors from a Subscription, you can create a Service that
//! subscribes to a message and then figures out to what Actor IDs to send the message. For example,
//! doing a query in the DB/Service in order to get the set of IDs that need to receive some message.
//!
//! Unlike sending/calling to services/actors, publishing doesn't know who needs to receive the
//! message in compilation time. That is the reason behind requiring the Services to subscribe in
//! runtime to any message they want to receive. In order to ensure that services perform the
//! subscriptions, it is a good idea to run `acteur.preload_service<Service>();` for each service
//! that should perform any subscription at the beginning of your Application start.
//!
//! ## Simple Example
//!
//! ```rust,no_run
//! use acteur::{Actor, Receive, ActorAssistant, Acteur};
//! use async_trait::async_trait;
//!
//! #[derive(Debug)]
//! struct Employee {
//! salary: u32
//! }
//!
//! #[async_trait]
//! impl Actor for Employee {
//! type Id = u32;
//!
//! async fn activate(_: Self::Id, _: &ActorAssistant<Self>) -> Self {
//! Employee {
//! salary: 0 // Load from DB or set a default,
//! }
//! }
//! }
//!
//! #[derive(Debug)]
//! struct SalaryChanged(u32);
//!
//! #[async_trait]
//! impl Receive<SalaryChanged> for Employee {
//! async fn handle(&mut self, message: SalaryChanged, _: &ActorAssistant<Employee>) {
//! self.salary = message.0;
//! }
//! }
//!
//! fn main() {
//! let sys = Acteur::new();
//!
//! sys.send_to_actor_sync::<Employee, SalaryChanged>(42, SalaryChanged(55000));
//!
//! sys.wait_until_stopped();
//! }
//!
//! ```
//!
//! ## Why another Actors framework?
//!
//! Somethings bothered me.
//!
//! 1. Actor systems are a concurrency level but I see example of them being used for business logic. Using
//! a normal HTTP framework + SQL feels more natural than using Actix.
//! 2. In order to use Actix you need to learn how it works. You need to manage the concurrency,
//! the addresses, etc
//! 3. Unsafe. I don't want unsafe. I wouldn't trust myself to do something like this in C++,
//! therefore, I don't want to have unsafe code. Rust opens the door to do these kind of projects
//! to people with less than 10 years of experience in C/C++ in a safer way.
//!
//! After async_std 1.0 announcement and speaking with some friends I started to envision how I would
//! like an actor framework be. Not that Actix and others are wrong, but they are too low level in my
//! opinion and not for business logic. I wanted something that just runs without leaking so many underlying
//! concepts. At the same time I don't think that competing for the last nanosecond is healthy. Even less
//! if the framework is already super fast.
//!
//! ## Common patterns
//!
//! This section will be updated with common patters you can use in your applications. If
//! you have one you want to add or just a question of how to so something, let me know with a GitHub Issue.
//!
//! ### Web server
//!
//! Given that all actors are managed by the framework, it is really easy to have, for
//! example, Rocket or Tide getting new HTTP calls and just calling `acteur.call_service` or
//! `acteur.call_actor` and wait for the response. You can use the sync version of the call
//! if you are working with synchronous code. Keep in mind that you can clone Acteur and send
//! it to as many threads/struct you need.
//!
//! ```rust,no_run
//!
//! use acteur::Acteur;
//!
//! let acteur = Acteur::new();
//!
//! // You can clone and send it to another thread/struct
//! let acteur2 = acteur.clone();
//!
//! ```
//!
//! If you need actors to query databases it would, generally, be a good idea to keep the database
//! connection / pool in a service, where you can handle connection errors, reconnect in case of error
//! and where you can control the concurrency.
//!
//! ## Error handling
//!
//! If you have operation that can error it is better if you encode them in services and reserve
//! Actors to operations that cannot fail. For example, database connections, network connections, etc.
//!
//! It is perfectly ok to encode a failure, from the point of view of the business rules, in an actor, for
//! example, in a videogame, where a character cannot attack another character because the second is
//! invulnerable.
//!
//! So, keep anything that can fail because external circumstances (network, hard drive, etc) in services
//! and let actors to request the services for whatever they need.
//!
//! If you have an error that should stop the application startup like database connections, add them to
//! a service construction and use the method `preload_service` for trying to start the service on the
//! app startup and let the app crash is something goes wrong.
//!
//! ## Safe Rust
//!
//! No unsafe code was directly used in this crate. You can check in lib.rs the `#![deny(unsafe_code)]` line.
//!
//! ## Contributing
//!
//! First of all, I would be really happy if you decide to check the framework and contribute to it! Just open
//! an issue / pull request and we can check what you would like to implement. Check more about contributing in
//! here: [https://github.com/DavidBM/acteur-rs/blob/master/CONTRIBUTING.md]()
//!
//! ## License
//!
//! <sup>
//! Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
//! 2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
//! </sup>
//!
//! <br/>
//!
//! <sub>
//! Unless you explicitly state otherwise, any contribution intentionally submitted
//! for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
//! be dual licensed as above, without any additional terms or conditions.
//! </sub>
//!
#![deny(unsafe_code)]
#[macro_use]
mod utils;
mod actors;
mod facade;
mod services;
mod system_director;
pub use facade::Acteur;
pub use actors::actor::Actor;
pub use actors::assistant::ActorAssistant;
pub use actors::handle::{Receive, Respond};
pub use services::handle::{Listen, Serve};
pub use services::service::{Service, ServiceConcurrency, ServiceConfiguration};
pub use services::system_facade::ServiceAssistant; | //! calculations of some sort that doesn't belong to any Actor, etc) and for subscribing to messages (Pub/Sub) | random_line_split |
preprocessing_poland.py | import logging
from contextlib import closing
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
from openpyxl import load_workbook
from xlrd import XLRDError
from src.data.datasets import *
def prepare_family_structure_from_voivodship(data_folder: Path) -> pd.DataFrame:
"""
Preprocesses the family structure excel for a voivodship from pivoted to melted table for easier further processing.
"""
if (data_folder / household_family_structure_xlsx.file_name).is_file():
return pd.read_excel(str(data_folder / household_family_structure_xlsx.file_name))
headcount_columns = [1, 2, 3, 4, 5, 6, 7]
try:
df = pd.read_excel(str(data_folder / voivodship_cities_household_family_structure_xlsx.file_name),
sheet_name=voivodship_cities_household_family_structure_xlsx.sheet_name)
except XLRDError: # sheet not found
df_q = pd.read_excel(str(data_folder / voivodship_cities_household_family_structure_xlsx.file_name),
sheet_name='quantities')
df = df_q.copy()
for column in headcount_columns:
df[column] = df[column] / df[column].sum()
# FIXME: append a sheet instead of replacing it
with closing(pd.ExcelWriter(str(data_folder / voivodship_cities_household_family_structure_xlsx.file_name),
engine='openpyxl')) as writer:
df.to_excel(writer, sheet_name=voivodship_cities_household_family_structure_xlsx.sheet_name, index=False)
df2 = pd.melt(df,
id_vars=['family_type', 'relationship', 'house master'],
value_vars=[1, 2, 3, 4, 5, 6, 7], var_name='household_headcount',
value_name='probability_within_headcount').rename(columns={'house master': 'house_master'})
df2.to_excel(str(data_folder / household_family_structure_xlsx.file_name), index=False)
return df2
def temporary_hack(fcn):
# FIXME: introducing a hack that is actually going to destroy the probability distribution
# since it is not possible to have 3 generations in a household where only 2 people live
# but there is no distribution given for that
# the input data has to be replaced with a proper distribution that also depends on headcount to have these values
# correct
def inner(df, headcount, family_type, relationship, house_master):
out_df = fcn(df, headcount, family_type, relationship, house_master)
if len(out_df[out_df['nb_generations'] <= headcount]) > 0:
out_df = out_df[out_df['nb_generations'] <= headcount]
out_df['probability'] /= out_df['probability'].sum()
return out_df
return inner
@temporary_hack
def | (df, headcount, family_type, relationship, house_master):
"""
Given a headcount, family type (0,1,2,3), relationship between families
(if applicable) and who the housemaster is (in multi-family households), this method returns all matching
households in the df dataframe.
"""
if house_master not in (np.nan, '', None) and isinstance(house_master, str):
house_master = house_master.strip()
if relationship not in (np.nan, '', None) and isinstance(relationship, str):
relationship = relationship.strip()
if family_type == 1:
if house_master not in (np.nan, '', None):
return df[(df.family_type == family_type) & (df.relationship == relationship)
& (df.house_master == house_master)]
return df[(df.family_type == family_type) & (df.relationship == relationship)]
if family_type == 2:
if house_master not in (np.nan, '', None):
out_df = df[(df.family_type == family_type) & (df.relationship == relationship)
& (df.house_master == house_master)]
if len(out_df) > 0:
return out_df
if relationship not in (np.nan, '', None):
out_df = df[(df.family_type == family_type) & (df.relationship == relationship)]
if len(out_df) > 0:
return out_df
return df[(df.family_type == family_type)]
if family_type == 3:
return df[(df.family_type == family_type)]
if family_type == 0:
if headcount == 1:
return df[(df.family_type == family_type) & (df.relationship == 'Jednoosobowe')]
return df[(df.family_type == family_type) & (df.relationship == 'Wieloosobowe')]
raise ValueError(f'Unknown family type {family_type}')
def _sanitize_households_count(households_count_df, population_size):
old_population = (households_count_df['nb_of_people_in_household'] * households_count_df['nb_of_households']).sum()
households_count_df['nb_of_households'] *= (population_size / old_population)
households_count_df['nb_of_households'] = households_count_df['nb_of_households'].apply(np.ceil).astype(int)
assert (households_count_df['nb_of_people_in_household'] * households_count_df['nb_of_households']).sum() \
>= population_size
def _filter_family_structures_for_household(family_structure_df, hc_row):
fs_df = family_structure_df[family_structure_df.household_headcount == hc_row.nb_of_people_in_household].copy()
fs_df['total'] = (np.round(fs_df.probability_within_headcount * hc_row.nb_of_households)).astype(int)
difference_due_to_rounding = hc_row.nb_of_households - fs_df['total'].sum()
if difference_due_to_rounding != 0:
logging.info(f'Difference due to numeric error {difference_due_to_rounding} - assigning randomly')
try:
fs_df.loc[np.random.choice(fs_df[fs_df.total > 0].index.tolist()), 'total'] += difference_due_to_rounding
except ValueError:
fs_df.loc[np.random.choice(fs_df.index.tolist()), 'total'] += difference_due_to_rounding
return fs_df
def get_generations_configuration_df(data_folder: Path, xlsx_file: XlsxFile) -> pd.DataFrame:
generations_configuration_df = pd.read_excel(str(data_folder / xlsx_file.file_name),
sheet_name=xlsx_file.sheet_name)
generations_configuration_df['nb_generations'] = generations_configuration_df['young'] \
+ generations_configuration_df['middle'] \
+ generations_configuration_df['elderly']
return generations_configuration_df
def generate_household_indices(data_folder: Path, output_folder: Path, population_size: int, start_index: int = 0) \
-> pd.DataFrame:
"""Generates and saves to an excel file a dataframe of households. Each household consists of:
* an index,
* headcount,
* family_type (0,1,2,3 - number of families in the household)
* relationship - between families, if more than one lives in the household
* house_master - in 2 and 3 families households, which family does the housemaster belong to
* family_structure_regex - auxiliary description of a household
* young - flag whether people younger than 30 years old live in a household
* middle - flag, whether people between 30 and 59 inclusive live in a household
* elderly - flag, whether people older than 59 live in a household
:param data_folder: data folder
:param output_folder: where to save an output file
:param population_size: size of a population needing accommodation
:param start_index: index to start numbering households with (default 0)
:return: dataframe of households
"""
household_headcount = []
family_type = []
relationship = []
house_master = []
young = []
middle = []
elderly = []
family_structure_df = pd.read_excel(str(data_folder / household_family_structure_xlsx.file_name),
sheet_name=household_family_structure_xlsx.sheet_name)
households_count_df = pd.read_excel(str(data_folder / households_count_xlsx.file_name),
sheet_name=households_count_xlsx.sheet_name)
generations_configuration_df = get_generations_configuration_df(data_folder, generations_configuration_xlsx)
_sanitize_households_count(households_count_df, population_size)
for i, hc_row in tqdm(households_count_df.iterrows(), total=len(households_count_df.index)):
# family structure given this headcount
fs_df = _filter_family_structures_for_household(family_structure_df, hc_row)
for j, row in fs_df[fs_df.total > 0].iterrows():
household_headcount.extend([row.household_headcount] * row.total)
family_type.extend([row.family_type] * row.total)
relationship.extend([row.relationship] * row.total)
house_master.extend([row.house_master] * row.total)
gc_df = draw_generation_configuration_for_household(generations_configuration_df,
row.household_headcount,
row.family_type,
row.relationship,
row.house_master)
gc_idx = np.random.choice(gc_df.index.tolist(), p=gc_df.probability, size=row.total)
young.extend(gc_df.loc[gc_idx, 'young'])
middle.extend(gc_df.loc[gc_idx, 'middle'])
elderly.extend(gc_df.loc[gc_idx, 'elderly'])
household_indices = list(range(start_index, len(household_headcount) + start_index))
household_df = pd.DataFrame(data=dict(household_index=household_indices,
household_headcount=household_headcount,
family_type=family_type,
relationship=relationship,
house_master=house_master,
young=young, middle=middle, elderly=elderly))
household_df.to_feather(str(output_folder / output_households_basic_feather.file_name))
return household_df
def generate_generations_configuration(data_folder: Path) -> pd.DataFrame:
"""
This function does the preprocessing of Census data for age generations living together in households:
Generations - a table that contains probability of living together. In the original table there are seven columns:
* young alone -> cat1
* middle-aged alone -> cat2
* elderly alone -> cat3
* young and middle-aged together -> cat4
* young and elderly together -> cat5
* middle-aged and elderly together -> cat6
* young, middle-aged and elderly together -> cat7
The function takes occurrences of each category and models them as three boolean columns: young, middle, elderly.
Additionally, family_type field is changed from descriptive, string form into a number (0, 1, 2, 3) that represents
the number of families living in a household.
"""
output_file = data_folder / generations_configuration_xlsx.file_name
if output_file.is_file():
return pd.read_excel(str(output_file), sheet_name=generations_configuration_xlsx.sheet_name)
voivodship_workbook_path = str(data_folder / voivodship_cities_generations_configuration_xlsx.file_name)
v_config_df = pd.read_excel(voivodship_workbook_path, sheet_name='preprocessed', header=[0, 1])
melted = pd.melt(v_config_df, id_vars=[('Unnamed: 0_level_0', 'family_type'),
('Unnamed: 1_level_0', 'relationship'),
('Unnamed: 2_level_0', 'house_master')],
var_name=['unit', 'category'],
value_name='total')
melted = melted.rename(columns={('Unnamed: 0_level_0', 'family_type'): 'family_type',
('Unnamed: 1_level_0', 'relationship'): 'relationship',
('Unnamed: 2_level_0', 'house_master'): 'house_master'})
melted['young'] = melted.category.isin(['cat1', 'cat4', 'cat5', 'cat7']).astype(int)
melted['middle'] = melted.category.isin(['cat2', 'cat4', 'cat6', 'cat7']).astype(int)
melted['elderly'] = melted.category.isin(['cat3', 'cat5', 'cat6', 'cat7']).astype(int)
melted = melted[melted.category != 'total']
melted = melted.drop(columns=['category'])
melted['relationship'] = melted['relationship'].fillna('N/A')
melted['house_master'] = melted['house_master'].fillna('N/A')
pivoted = pd.pivot_table(melted, columns=['unit'], values='total',
index=['family_type', 'relationship', 'house_master', 'young', 'middle', 'elderly'],
aggfunc='first').reset_index()
pivoted.households = pd.to_numeric(pivoted.households, errors='coerce')
pivoted.people = pd.to_numeric(pivoted.people, errors='coerce')
pivoted = pivoted.fillna(0)
pivoted.loc[pivoted['family_type'] == 'Jednorodzinne', 'family_type'] = 1
pivoted.loc[pivoted['family_type'] == 'Dwurodzinne', 'family_type'] = 2
pivoted.loc[pivoted['family_type'] == 'Trzy i więcej rodzinne', 'family_type'] = 3
pivoted.loc[pivoted['family_type'] == 'Nierodzinne', 'family_type'] = 0
book = load_workbook(voivodship_workbook_path)
if voivodship_cities_generations_configuration_xlsx.sheet_name in book.sheetnames:
del book[voivodship_cities_generations_configuration_xlsx.sheet_name]
with closing(pd.ExcelWriter(voivodship_workbook_path, engine='openpyxl')) as writer:
writer.book = book
pivoted.to_excel(writer, sheet_name=voivodship_cities_generations_configuration_xlsx.sheet_name, index=False)
writer.save()
# update with probabilities
df = pivoted.groupby(by=['family_type', 'relationship', 'house_master'])['households'].sum().reset_index() \
.rename(columns={'households': 'total'})
pivoted = pivoted.merge(df, how='left', on=['family_type', 'relationship', 'house_master'])
pivoted['probability'] = pivoted['households'] / pivoted['total']
pivoted.to_excel(str(output_file), sheet_name=generations_configuration_xlsx.sheet_name, index=False)
return pivoted
def voivodship_to_symbol(voivodship: str):
voivodship = voivodship.lower()
voivodship_dict = {
'podlaskie': 'B',
'kujawsko-pomorskie': 'C',
'dolnośląskie': 'D',
'łódzkie': 'E',
'lubuskie': 'F',
'pomorskie': 'G',
'małopolskie': 'K',
'lubelskie': 'L',
'warmińsko-mazurskie': 'N',
'opolskie': 'O',
'wielkopolskie': 'P',
'podkarpackie': 'R',
'śląskie': 'S',
'świętokrzyskie': 'T',
'mazowieckie': 'W',
'zachodniopomorskie': 'Z'}
return voivodship_dict.get(voivodship)
if __name__ == '__main__':
project_dir = Path(__file__).resolve().parents[2]
city_folder = project_dir / 'data' / 'processed' / 'poland' / 'WW'
prepare_family_structure_from_voivodship(city_folder)
generate_generations_configuration(city_folder)
| draw_generation_configuration_for_household | identifier_name |
preprocessing_poland.py | import logging
from contextlib import closing
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
from openpyxl import load_workbook
from xlrd import XLRDError
from src.data.datasets import *
def prepare_family_structure_from_voivodship(data_folder: Path) -> pd.DataFrame:
|
def temporary_hack(fcn):
# FIXME: introducing a hack that is actually going to destroy the probability distribution
# since it is not possible to have 3 generations in a household where only 2 people live
# but there is no distribution given for that
# the input data has to be replaced with a proper distribution that also depends on headcount to have these values
# correct
def inner(df, headcount, family_type, relationship, house_master):
out_df = fcn(df, headcount, family_type, relationship, house_master)
if len(out_df[out_df['nb_generations'] <= headcount]) > 0:
out_df = out_df[out_df['nb_generations'] <= headcount]
out_df['probability'] /= out_df['probability'].sum()
return out_df
return inner
@temporary_hack
def draw_generation_configuration_for_household(df, headcount, family_type, relationship, house_master):
"""
Given a headcount, family type (0,1,2,3), relationship between families
(if applicable) and who the housemaster is (in multi-family households), this method returns all matching
households in the df dataframe.
"""
if house_master not in (np.nan, '', None) and isinstance(house_master, str):
house_master = house_master.strip()
if relationship not in (np.nan, '', None) and isinstance(relationship, str):
relationship = relationship.strip()
if family_type == 1:
if house_master not in (np.nan, '', None):
return df[(df.family_type == family_type) & (df.relationship == relationship)
& (df.house_master == house_master)]
return df[(df.family_type == family_type) & (df.relationship == relationship)]
if family_type == 2:
if house_master not in (np.nan, '', None):
out_df = df[(df.family_type == family_type) & (df.relationship == relationship)
& (df.house_master == house_master)]
if len(out_df) > 0:
return out_df
if relationship not in (np.nan, '', None):
out_df = df[(df.family_type == family_type) & (df.relationship == relationship)]
if len(out_df) > 0:
return out_df
return df[(df.family_type == family_type)]
if family_type == 3:
return df[(df.family_type == family_type)]
if family_type == 0:
if headcount == 1:
return df[(df.family_type == family_type) & (df.relationship == 'Jednoosobowe')]
return df[(df.family_type == family_type) & (df.relationship == 'Wieloosobowe')]
raise ValueError(f'Unknown family type {family_type}')
def _sanitize_households_count(households_count_df, population_size):
old_population = (households_count_df['nb_of_people_in_household'] * households_count_df['nb_of_households']).sum()
households_count_df['nb_of_households'] *= (population_size / old_population)
households_count_df['nb_of_households'] = households_count_df['nb_of_households'].apply(np.ceil).astype(int)
assert (households_count_df['nb_of_people_in_household'] * households_count_df['nb_of_households']).sum() \
>= population_size
def _filter_family_structures_for_household(family_structure_df, hc_row):
fs_df = family_structure_df[family_structure_df.household_headcount == hc_row.nb_of_people_in_household].copy()
fs_df['total'] = (np.round(fs_df.probability_within_headcount * hc_row.nb_of_households)).astype(int)
difference_due_to_rounding = hc_row.nb_of_households - fs_df['total'].sum()
if difference_due_to_rounding != 0:
logging.info(f'Difference due to numeric error {difference_due_to_rounding} - assigning randomly')
try:
fs_df.loc[np.random.choice(fs_df[fs_df.total > 0].index.tolist()), 'total'] += difference_due_to_rounding
except ValueError:
fs_df.loc[np.random.choice(fs_df.index.tolist()), 'total'] += difference_due_to_rounding
return fs_df
def get_generations_configuration_df(data_folder: Path, xlsx_file: XlsxFile) -> pd.DataFrame:
generations_configuration_df = pd.read_excel(str(data_folder / xlsx_file.file_name),
sheet_name=xlsx_file.sheet_name)
generations_configuration_df['nb_generations'] = generations_configuration_df['young'] \
+ generations_configuration_df['middle'] \
+ generations_configuration_df['elderly']
return generations_configuration_df
def generate_household_indices(data_folder: Path, output_folder: Path, population_size: int, start_index: int = 0) \
-> pd.DataFrame:
"""Generates and saves to an excel file a dataframe of households. Each household consists of:
* an index,
* headcount,
* family_type (0,1,2,3 - number of families in the household)
* relationship - between families, if more than one lives in the household
* house_master - in 2 and 3 families households, which family does the housemaster belong to
* family_structure_regex - auxiliary description of a household
* young - flag whether people younger than 30 years old live in a household
* middle - flag, whether people between 30 and 59 inclusive live in a household
* elderly - flag, whether people older than 59 live in a household
:param data_folder: data folder
:param output_folder: where to save an output file
:param population_size: size of a population needing accommodation
:param start_index: index to start numbering households with (default 0)
:return: dataframe of households
"""
household_headcount = []
family_type = []
relationship = []
house_master = []
young = []
middle = []
elderly = []
family_structure_df = pd.read_excel(str(data_folder / household_family_structure_xlsx.file_name),
sheet_name=household_family_structure_xlsx.sheet_name)
households_count_df = pd.read_excel(str(data_folder / households_count_xlsx.file_name),
sheet_name=households_count_xlsx.sheet_name)
generations_configuration_df = get_generations_configuration_df(data_folder, generations_configuration_xlsx)
_sanitize_households_count(households_count_df, population_size)
for i, hc_row in tqdm(households_count_df.iterrows(), total=len(households_count_df.index)):
# family structure given this headcount
fs_df = _filter_family_structures_for_household(family_structure_df, hc_row)
for j, row in fs_df[fs_df.total > 0].iterrows():
household_headcount.extend([row.household_headcount] * row.total)
family_type.extend([row.family_type] * row.total)
relationship.extend([row.relationship] * row.total)
house_master.extend([row.house_master] * row.total)
gc_df = draw_generation_configuration_for_household(generations_configuration_df,
row.household_headcount,
row.family_type,
row.relationship,
row.house_master)
gc_idx = np.random.choice(gc_df.index.tolist(), p=gc_df.probability, size=row.total)
young.extend(gc_df.loc[gc_idx, 'young'])
middle.extend(gc_df.loc[gc_idx, 'middle'])
elderly.extend(gc_df.loc[gc_idx, 'elderly'])
household_indices = list(range(start_index, len(household_headcount) + start_index))
household_df = pd.DataFrame(data=dict(household_index=household_indices,
household_headcount=household_headcount,
family_type=family_type,
relationship=relationship,
house_master=house_master,
young=young, middle=middle, elderly=elderly))
household_df.to_feather(str(output_folder / output_households_basic_feather.file_name))
return household_df
def generate_generations_configuration(data_folder: Path) -> pd.DataFrame:
"""
This function does the preprocessing of Census data for age generations living together in households:
Generations - a table that contains probability of living together. In the original table there are seven columns:
* young alone -> cat1
* middle-aged alone -> cat2
* elderly alone -> cat3
* young and middle-aged together -> cat4
* young and elderly together -> cat5
* middle-aged and elderly together -> cat6
* young, middle-aged and elderly together -> cat7
The function takes occurrences of each category and models them as three boolean columns: young, middle, elderly.
Additionally, family_type field is changed from descriptive, string form into a number (0, 1, 2, 3) that represents
the number of families living in a household.
"""
output_file = data_folder / generations_configuration_xlsx.file_name
if output_file.is_file():
return pd.read_excel(str(output_file), sheet_name=generations_configuration_xlsx.sheet_name)
voivodship_workbook_path = str(data_folder / voivodship_cities_generations_configuration_xlsx.file_name)
v_config_df = pd.read_excel(voivodship_workbook_path, sheet_name='preprocessed', header=[0, 1])
melted = pd.melt(v_config_df, id_vars=[('Unnamed: 0_level_0', 'family_type'),
('Unnamed: 1_level_0', 'relationship'),
('Unnamed: 2_level_0', 'house_master')],
var_name=['unit', 'category'],
value_name='total')
melted = melted.rename(columns={('Unnamed: 0_level_0', 'family_type'): 'family_type',
('Unnamed: 1_level_0', 'relationship'): 'relationship',
('Unnamed: 2_level_0', 'house_master'): 'house_master'})
melted['young'] = melted.category.isin(['cat1', 'cat4', 'cat5', 'cat7']).astype(int)
melted['middle'] = melted.category.isin(['cat2', 'cat4', 'cat6', 'cat7']).astype(int)
melted['elderly'] = melted.category.isin(['cat3', 'cat5', 'cat6', 'cat7']).astype(int)
melted = melted[melted.category != 'total']
melted = melted.drop(columns=['category'])
melted['relationship'] = melted['relationship'].fillna('N/A')
melted['house_master'] = melted['house_master'].fillna('N/A')
pivoted = pd.pivot_table(melted, columns=['unit'], values='total',
index=['family_type', 'relationship', 'house_master', 'young', 'middle', 'elderly'],
aggfunc='first').reset_index()
pivoted.households = pd.to_numeric(pivoted.households, errors='coerce')
pivoted.people = pd.to_numeric(pivoted.people, errors='coerce')
pivoted = pivoted.fillna(0)
pivoted.loc[pivoted['family_type'] == 'Jednorodzinne', 'family_type'] = 1
pivoted.loc[pivoted['family_type'] == 'Dwurodzinne', 'family_type'] = 2
pivoted.loc[pivoted['family_type'] == 'Trzy i więcej rodzinne', 'family_type'] = 3
pivoted.loc[pivoted['family_type'] == 'Nierodzinne', 'family_type'] = 0
book = load_workbook(voivodship_workbook_path)
if voivodship_cities_generations_configuration_xlsx.sheet_name in book.sheetnames:
del book[voivodship_cities_generations_configuration_xlsx.sheet_name]
with closing(pd.ExcelWriter(voivodship_workbook_path, engine='openpyxl')) as writer:
writer.book = book
pivoted.to_excel(writer, sheet_name=voivodship_cities_generations_configuration_xlsx.sheet_name, index=False)
writer.save()
# update with probabilities
df = pivoted.groupby(by=['family_type', 'relationship', 'house_master'])['households'].sum().reset_index() \
.rename(columns={'households': 'total'})
pivoted = pivoted.merge(df, how='left', on=['family_type', 'relationship', 'house_master'])
pivoted['probability'] = pivoted['households'] / pivoted['total']
pivoted.to_excel(str(output_file), sheet_name=generations_configuration_xlsx.sheet_name, index=False)
return pivoted
def voivodship_to_symbol(voivodship: str):
voivodship = voivodship.lower()
voivodship_dict = {
'podlaskie': 'B',
'kujawsko-pomorskie': 'C',
'dolnośląskie': 'D',
'łódzkie': 'E',
'lubuskie': 'F',
'pomorskie': 'G',
'małopolskie': 'K',
'lubelskie': 'L',
'warmińsko-mazurskie': 'N',
'opolskie': 'O',
'wielkopolskie': 'P',
'podkarpackie': 'R',
'śląskie': 'S',
'świętokrzyskie': 'T',
'mazowieckie': 'W',
'zachodniopomorskie': 'Z'}
return voivodship_dict.get(voivodship)
if __name__ == '__main__':
project_dir = Path(__file__).resolve().parents[2]
city_folder = project_dir / 'data' / 'processed' / 'poland' / 'WW'
prepare_family_structure_from_voivodship(city_folder)
generate_generations_configuration(city_folder)
| """
Preprocesses the family structure excel for a voivodship from pivoted to melted table for easier further processing.
"""
if (data_folder / household_family_structure_xlsx.file_name).is_file():
return pd.read_excel(str(data_folder / household_family_structure_xlsx.file_name))
headcount_columns = [1, 2, 3, 4, 5, 6, 7]
try:
df = pd.read_excel(str(data_folder / voivodship_cities_household_family_structure_xlsx.file_name),
sheet_name=voivodship_cities_household_family_structure_xlsx.sheet_name)
except XLRDError: # sheet not found
df_q = pd.read_excel(str(data_folder / voivodship_cities_household_family_structure_xlsx.file_name),
sheet_name='quantities')
df = df_q.copy()
for column in headcount_columns:
df[column] = df[column] / df[column].sum()
# FIXME: append a sheet instead of replacing it
with closing(pd.ExcelWriter(str(data_folder / voivodship_cities_household_family_structure_xlsx.file_name),
engine='openpyxl')) as writer:
df.to_excel(writer, sheet_name=voivodship_cities_household_family_structure_xlsx.sheet_name, index=False)
df2 = pd.melt(df,
id_vars=['family_type', 'relationship', 'house master'],
value_vars=[1, 2, 3, 4, 5, 6, 7], var_name='household_headcount',
value_name='probability_within_headcount').rename(columns={'house master': 'house_master'})
df2.to_excel(str(data_folder / household_family_structure_xlsx.file_name), index=False)
return df2 | identifier_body |
preprocessing_poland.py | import logging
from contextlib import closing
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
from openpyxl import load_workbook
from xlrd import XLRDError
from src.data.datasets import *
def prepare_family_structure_from_voivodship(data_folder: Path) -> pd.DataFrame:
"""
Preprocesses the family structure excel for a voivodship from pivoted to melted table for easier further processing.
"""
if (data_folder / household_family_structure_xlsx.file_name).is_file():
return pd.read_excel(str(data_folder / household_family_structure_xlsx.file_name))
headcount_columns = [1, 2, 3, 4, 5, 6, 7]
try:
df = pd.read_excel(str(data_folder / voivodship_cities_household_family_structure_xlsx.file_name),
sheet_name=voivodship_cities_household_family_structure_xlsx.sheet_name)
except XLRDError: # sheet not found
df_q = pd.read_excel(str(data_folder / voivodship_cities_household_family_structure_xlsx.file_name),
sheet_name='quantities')
df = df_q.copy()
for column in headcount_columns:
df[column] = df[column] / df[column].sum()
# FIXME: append a sheet instead of replacing it
with closing(pd.ExcelWriter(str(data_folder / voivodship_cities_household_family_structure_xlsx.file_name),
engine='openpyxl')) as writer:
df.to_excel(writer, sheet_name=voivodship_cities_household_family_structure_xlsx.sheet_name, index=False)
df2 = pd.melt(df,
id_vars=['family_type', 'relationship', 'house master'],
value_vars=[1, 2, 3, 4, 5, 6, 7], var_name='household_headcount',
value_name='probability_within_headcount').rename(columns={'house master': 'house_master'})
df2.to_excel(str(data_folder / household_family_structure_xlsx.file_name), index=False)
return df2
def temporary_hack(fcn):
# FIXME: introducing a hack that is actually going to destroy the probability distribution
# since it is not possible to have 3 generations in a household where only 2 people live
# but there is no distribution given for that
# the input data has to be replaced with a proper distribution that also depends on headcount to have these values
# correct
def inner(df, headcount, family_type, relationship, house_master):
out_df = fcn(df, headcount, family_type, relationship, house_master)
if len(out_df[out_df['nb_generations'] <= headcount]) > 0:
out_df = out_df[out_df['nb_generations'] <= headcount]
out_df['probability'] /= out_df['probability'].sum()
return out_df
return inner
@temporary_hack
def draw_generation_configuration_for_household(df, headcount, family_type, relationship, house_master):
"""
Given a headcount, family type (0,1,2,3), relationship between families
(if applicable) and who the housemaster is (in multi-family households), this method returns all matching
households in the df dataframe.
"""
if house_master not in (np.nan, '', None) and isinstance(house_master, str):
house_master = house_master.strip()
if relationship not in (np.nan, '', None) and isinstance(relationship, str):
relationship = relationship.strip()
if family_type == 1:
if house_master not in (np.nan, '', None):
return df[(df.family_type == family_type) & (df.relationship == relationship)
& (df.house_master == house_master)]
return df[(df.family_type == family_type) & (df.relationship == relationship)]
if family_type == 2:
if house_master not in (np.nan, '', None):
out_df = df[(df.family_type == family_type) & (df.relationship == relationship)
& (df.house_master == house_master)]
if len(out_df) > 0:
return out_df
if relationship not in (np.nan, '', None):
out_df = df[(df.family_type == family_type) & (df.relationship == relationship)]
if len(out_df) > 0:
return out_df
return df[(df.family_type == family_type)]
if family_type == 3:
return df[(df.family_type == family_type)]
if family_type == 0:
if headcount == 1:
return df[(df.family_type == family_type) & (df.relationship == 'Jednoosobowe')]
return df[(df.family_type == family_type) & (df.relationship == 'Wieloosobowe')]
raise ValueError(f'Unknown family type {family_type}')
def _sanitize_households_count(households_count_df, population_size):
old_population = (households_count_df['nb_of_people_in_household'] * households_count_df['nb_of_households']).sum()
households_count_df['nb_of_households'] *= (population_size / old_population)
households_count_df['nb_of_households'] = households_count_df['nb_of_households'].apply(np.ceil).astype(int)
assert (households_count_df['nb_of_people_in_household'] * households_count_df['nb_of_households']).sum() \
>= population_size
def _filter_family_structures_for_household(family_structure_df, hc_row):
fs_df = family_structure_df[family_structure_df.household_headcount == hc_row.nb_of_people_in_household].copy()
fs_df['total'] = (np.round(fs_df.probability_within_headcount * hc_row.nb_of_households)).astype(int)
difference_due_to_rounding = hc_row.nb_of_households - fs_df['total'].sum()
if difference_due_to_rounding != 0:
logging.info(f'Difference due to numeric error {difference_due_to_rounding} - assigning randomly')
try:
fs_df.loc[np.random.choice(fs_df[fs_df.total > 0].index.tolist()), 'total'] += difference_due_to_rounding
except ValueError:
fs_df.loc[np.random.choice(fs_df.index.tolist()), 'total'] += difference_due_to_rounding
return fs_df
def get_generations_configuration_df(data_folder: Path, xlsx_file: XlsxFile) -> pd.DataFrame:
generations_configuration_df = pd.read_excel(str(data_folder / xlsx_file.file_name),
sheet_name=xlsx_file.sheet_name)
generations_configuration_df['nb_generations'] = generations_configuration_df['young'] \
+ generations_configuration_df['middle'] \
+ generations_configuration_df['elderly']
return generations_configuration_df
def generate_household_indices(data_folder: Path, output_folder: Path, population_size: int, start_index: int = 0) \
-> pd.DataFrame:
"""Generates and saves to an excel file a dataframe of households. Each household consists of:
* an index,
* headcount,
* family_type (0,1,2,3 - number of families in the household)
* relationship - between families, if more than one lives in the household
* house_master - in 2 and 3 families households, which family does the housemaster belong to | * young - flag whether people younger than 30 years old live in a household
* middle - flag, whether people between 30 and 59 inclusive live in a household
* elderly - flag, whether people older than 59 live in a household
:param data_folder: data folder
:param output_folder: where to save an output file
:param population_size: size of a population needing accommodation
:param start_index: index to start numbering households with (default 0)
:return: dataframe of households
"""
household_headcount = []
family_type = []
relationship = []
house_master = []
young = []
middle = []
elderly = []
family_structure_df = pd.read_excel(str(data_folder / household_family_structure_xlsx.file_name),
sheet_name=household_family_structure_xlsx.sheet_name)
households_count_df = pd.read_excel(str(data_folder / households_count_xlsx.file_name),
sheet_name=households_count_xlsx.sheet_name)
generations_configuration_df = get_generations_configuration_df(data_folder, generations_configuration_xlsx)
_sanitize_households_count(households_count_df, population_size)
for i, hc_row in tqdm(households_count_df.iterrows(), total=len(households_count_df.index)):
# family structure given this headcount
fs_df = _filter_family_structures_for_household(family_structure_df, hc_row)
for j, row in fs_df[fs_df.total > 0].iterrows():
household_headcount.extend([row.household_headcount] * row.total)
family_type.extend([row.family_type] * row.total)
relationship.extend([row.relationship] * row.total)
house_master.extend([row.house_master] * row.total)
gc_df = draw_generation_configuration_for_household(generations_configuration_df,
row.household_headcount,
row.family_type,
row.relationship,
row.house_master)
gc_idx = np.random.choice(gc_df.index.tolist(), p=gc_df.probability, size=row.total)
young.extend(gc_df.loc[gc_idx, 'young'])
middle.extend(gc_df.loc[gc_idx, 'middle'])
elderly.extend(gc_df.loc[gc_idx, 'elderly'])
household_indices = list(range(start_index, len(household_headcount) + start_index))
household_df = pd.DataFrame(data=dict(household_index=household_indices,
household_headcount=household_headcount,
family_type=family_type,
relationship=relationship,
house_master=house_master,
young=young, middle=middle, elderly=elderly))
household_df.to_feather(str(output_folder / output_households_basic_feather.file_name))
return household_df
def generate_generations_configuration(data_folder: Path) -> pd.DataFrame:
"""
This function does the preprocessing of Census data for age generations living together in households:
Generations - a table that contains probability of living together. In the original table there are seven columns:
* young alone -> cat1
* middle-aged alone -> cat2
* elderly alone -> cat3
* young and middle-aged together -> cat4
* young and elderly together -> cat5
* middle-aged and elderly together -> cat6
* young, middle-aged and elderly together -> cat7
The function takes occurrences of each category and models them as three boolean columns: young, middle, elderly.
Additionally, family_type field is changed from descriptive, string form into a number (0, 1, 2, 3) that represents
the number of families living in a household.
"""
output_file = data_folder / generations_configuration_xlsx.file_name
if output_file.is_file():
return pd.read_excel(str(output_file), sheet_name=generations_configuration_xlsx.sheet_name)
voivodship_workbook_path = str(data_folder / voivodship_cities_generations_configuration_xlsx.file_name)
v_config_df = pd.read_excel(voivodship_workbook_path, sheet_name='preprocessed', header=[0, 1])
melted = pd.melt(v_config_df, id_vars=[('Unnamed: 0_level_0', 'family_type'),
('Unnamed: 1_level_0', 'relationship'),
('Unnamed: 2_level_0', 'house_master')],
var_name=['unit', 'category'],
value_name='total')
melted = melted.rename(columns={('Unnamed: 0_level_0', 'family_type'): 'family_type',
('Unnamed: 1_level_0', 'relationship'): 'relationship',
('Unnamed: 2_level_0', 'house_master'): 'house_master'})
melted['young'] = melted.category.isin(['cat1', 'cat4', 'cat5', 'cat7']).astype(int)
melted['middle'] = melted.category.isin(['cat2', 'cat4', 'cat6', 'cat7']).astype(int)
melted['elderly'] = melted.category.isin(['cat3', 'cat5', 'cat6', 'cat7']).astype(int)
melted = melted[melted.category != 'total']
melted = melted.drop(columns=['category'])
melted['relationship'] = melted['relationship'].fillna('N/A')
melted['house_master'] = melted['house_master'].fillna('N/A')
pivoted = pd.pivot_table(melted, columns=['unit'], values='total',
index=['family_type', 'relationship', 'house_master', 'young', 'middle', 'elderly'],
aggfunc='first').reset_index()
pivoted.households = pd.to_numeric(pivoted.households, errors='coerce')
pivoted.people = pd.to_numeric(pivoted.people, errors='coerce')
pivoted = pivoted.fillna(0)
pivoted.loc[pivoted['family_type'] == 'Jednorodzinne', 'family_type'] = 1
pivoted.loc[pivoted['family_type'] == 'Dwurodzinne', 'family_type'] = 2
pivoted.loc[pivoted['family_type'] == 'Trzy i więcej rodzinne', 'family_type'] = 3
pivoted.loc[pivoted['family_type'] == 'Nierodzinne', 'family_type'] = 0
book = load_workbook(voivodship_workbook_path)
if voivodship_cities_generations_configuration_xlsx.sheet_name in book.sheetnames:
del book[voivodship_cities_generations_configuration_xlsx.sheet_name]
with closing(pd.ExcelWriter(voivodship_workbook_path, engine='openpyxl')) as writer:
writer.book = book
pivoted.to_excel(writer, sheet_name=voivodship_cities_generations_configuration_xlsx.sheet_name, index=False)
writer.save()
# update with probabilities
df = pivoted.groupby(by=['family_type', 'relationship', 'house_master'])['households'].sum().reset_index() \
.rename(columns={'households': 'total'})
pivoted = pivoted.merge(df, how='left', on=['family_type', 'relationship', 'house_master'])
pivoted['probability'] = pivoted['households'] / pivoted['total']
pivoted.to_excel(str(output_file), sheet_name=generations_configuration_xlsx.sheet_name, index=False)
return pivoted
def voivodship_to_symbol(voivodship: str):
voivodship = voivodship.lower()
voivodship_dict = {
'podlaskie': 'B',
'kujawsko-pomorskie': 'C',
'dolnośląskie': 'D',
'łódzkie': 'E',
'lubuskie': 'F',
'pomorskie': 'G',
'małopolskie': 'K',
'lubelskie': 'L',
'warmińsko-mazurskie': 'N',
'opolskie': 'O',
'wielkopolskie': 'P',
'podkarpackie': 'R',
'śląskie': 'S',
'świętokrzyskie': 'T',
'mazowieckie': 'W',
'zachodniopomorskie': 'Z'}
return voivodship_dict.get(voivodship)
if __name__ == '__main__':
project_dir = Path(__file__).resolve().parents[2]
city_folder = project_dir / 'data' / 'processed' / 'poland' / 'WW'
prepare_family_structure_from_voivodship(city_folder)
generate_generations_configuration(city_folder) | * family_structure_regex - auxiliary description of a household | random_line_split |
preprocessing_poland.py | import logging
from contextlib import closing
from pathlib import Path
import numpy as np
import pandas as pd
from tqdm import tqdm
from openpyxl import load_workbook
from xlrd import XLRDError
from src.data.datasets import *
def prepare_family_structure_from_voivodship(data_folder: Path) -> pd.DataFrame:
"""
Preprocesses the family structure excel for a voivodship from pivoted to melted table for easier further processing.
"""
if (data_folder / household_family_structure_xlsx.file_name).is_file():
return pd.read_excel(str(data_folder / household_family_structure_xlsx.file_name))
headcount_columns = [1, 2, 3, 4, 5, 6, 7]
try:
df = pd.read_excel(str(data_folder / voivodship_cities_household_family_structure_xlsx.file_name),
sheet_name=voivodship_cities_household_family_structure_xlsx.sheet_name)
except XLRDError: # sheet not found
df_q = pd.read_excel(str(data_folder / voivodship_cities_household_family_structure_xlsx.file_name),
sheet_name='quantities')
df = df_q.copy()
for column in headcount_columns:
df[column] = df[column] / df[column].sum()
# FIXME: append a sheet instead of replacing it
with closing(pd.ExcelWriter(str(data_folder / voivodship_cities_household_family_structure_xlsx.file_name),
engine='openpyxl')) as writer:
df.to_excel(writer, sheet_name=voivodship_cities_household_family_structure_xlsx.sheet_name, index=False)
df2 = pd.melt(df,
id_vars=['family_type', 'relationship', 'house master'],
value_vars=[1, 2, 3, 4, 5, 6, 7], var_name='household_headcount',
value_name='probability_within_headcount').rename(columns={'house master': 'house_master'})
df2.to_excel(str(data_folder / household_family_structure_xlsx.file_name), index=False)
return df2
def temporary_hack(fcn):
# FIXME: introducing a hack that is actually going to destroy the probability distribution
# since it is not possible to have 3 generations in a household where only 2 people live
# but there is no distribution given for that
# the input data has to be replaced with a proper distribution that also depends on headcount to have these values
# correct
def inner(df, headcount, family_type, relationship, house_master):
out_df = fcn(df, headcount, family_type, relationship, house_master)
if len(out_df[out_df['nb_generations'] <= headcount]) > 0:
out_df = out_df[out_df['nb_generations'] <= headcount]
out_df['probability'] /= out_df['probability'].sum()
return out_df
return inner
@temporary_hack
def draw_generation_configuration_for_household(df, headcount, family_type, relationship, house_master):
"""
Given a headcount, family type (0,1,2,3), relationship between families
(if applicable) and who the housemaster is (in multi-family households), this method returns all matching
households in the df dataframe.
"""
if house_master not in (np.nan, '', None) and isinstance(house_master, str):
house_master = house_master.strip()
if relationship not in (np.nan, '', None) and isinstance(relationship, str):
relationship = relationship.strip()
if family_type == 1:
if house_master not in (np.nan, '', None):
return df[(df.family_type == family_type) & (df.relationship == relationship)
& (df.house_master == house_master)]
return df[(df.family_type == family_type) & (df.relationship == relationship)]
if family_type == 2:
|
if family_type == 3:
return df[(df.family_type == family_type)]
if family_type == 0:
if headcount == 1:
return df[(df.family_type == family_type) & (df.relationship == 'Jednoosobowe')]
return df[(df.family_type == family_type) & (df.relationship == 'Wieloosobowe')]
raise ValueError(f'Unknown family type {family_type}')
def _sanitize_households_count(households_count_df, population_size):
old_population = (households_count_df['nb_of_people_in_household'] * households_count_df['nb_of_households']).sum()
households_count_df['nb_of_households'] *= (population_size / old_population)
households_count_df['nb_of_households'] = households_count_df['nb_of_households'].apply(np.ceil).astype(int)
assert (households_count_df['nb_of_people_in_household'] * households_count_df['nb_of_households']).sum() \
>= population_size
def _filter_family_structures_for_household(family_structure_df, hc_row):
fs_df = family_structure_df[family_structure_df.household_headcount == hc_row.nb_of_people_in_household].copy()
fs_df['total'] = (np.round(fs_df.probability_within_headcount * hc_row.nb_of_households)).astype(int)
difference_due_to_rounding = hc_row.nb_of_households - fs_df['total'].sum()
if difference_due_to_rounding != 0:
logging.info(f'Difference due to numeric error {difference_due_to_rounding} - assigning randomly')
try:
fs_df.loc[np.random.choice(fs_df[fs_df.total > 0].index.tolist()), 'total'] += difference_due_to_rounding
except ValueError:
fs_df.loc[np.random.choice(fs_df.index.tolist()), 'total'] += difference_due_to_rounding
return fs_df
def get_generations_configuration_df(data_folder: Path, xlsx_file: XlsxFile) -> pd.DataFrame:
generations_configuration_df = pd.read_excel(str(data_folder / xlsx_file.file_name),
sheet_name=xlsx_file.sheet_name)
generations_configuration_df['nb_generations'] = generations_configuration_df['young'] \
+ generations_configuration_df['middle'] \
+ generations_configuration_df['elderly']
return generations_configuration_df
def generate_household_indices(data_folder: Path, output_folder: Path, population_size: int, start_index: int = 0) \
-> pd.DataFrame:
"""Generates and saves to an excel file a dataframe of households. Each household consists of:
* an index,
* headcount,
* family_type (0,1,2,3 - number of families in the household)
* relationship - between families, if more than one lives in the household
* house_master - in 2 and 3 families households, which family does the housemaster belong to
* family_structure_regex - auxiliary description of a household
* young - flag whether people younger than 30 years old live in a household
* middle - flag, whether people between 30 and 59 inclusive live in a household
* elderly - flag, whether people older than 59 live in a household
:param data_folder: data folder
:param output_folder: where to save an output file
:param population_size: size of a population needing accommodation
:param start_index: index to start numbering households with (default 0)
:return: dataframe of households
"""
household_headcount = []
family_type = []
relationship = []
house_master = []
young = []
middle = []
elderly = []
family_structure_df = pd.read_excel(str(data_folder / household_family_structure_xlsx.file_name),
sheet_name=household_family_structure_xlsx.sheet_name)
households_count_df = pd.read_excel(str(data_folder / households_count_xlsx.file_name),
sheet_name=households_count_xlsx.sheet_name)
generations_configuration_df = get_generations_configuration_df(data_folder, generations_configuration_xlsx)
_sanitize_households_count(households_count_df, population_size)
for i, hc_row in tqdm(households_count_df.iterrows(), total=len(households_count_df.index)):
# family structure given this headcount
fs_df = _filter_family_structures_for_household(family_structure_df, hc_row)
for j, row in fs_df[fs_df.total > 0].iterrows():
household_headcount.extend([row.household_headcount] * row.total)
family_type.extend([row.family_type] * row.total)
relationship.extend([row.relationship] * row.total)
house_master.extend([row.house_master] * row.total)
gc_df = draw_generation_configuration_for_household(generations_configuration_df,
row.household_headcount,
row.family_type,
row.relationship,
row.house_master)
gc_idx = np.random.choice(gc_df.index.tolist(), p=gc_df.probability, size=row.total)
young.extend(gc_df.loc[gc_idx, 'young'])
middle.extend(gc_df.loc[gc_idx, 'middle'])
elderly.extend(gc_df.loc[gc_idx, 'elderly'])
household_indices = list(range(start_index, len(household_headcount) + start_index))
household_df = pd.DataFrame(data=dict(household_index=household_indices,
household_headcount=household_headcount,
family_type=family_type,
relationship=relationship,
house_master=house_master,
young=young, middle=middle, elderly=elderly))
household_df.to_feather(str(output_folder / output_households_basic_feather.file_name))
return household_df
def generate_generations_configuration(data_folder: Path) -> pd.DataFrame:
"""
This function does the preprocessing of Census data for age generations living together in households:
Generations - a table that contains probability of living together. In the original table there are seven columns:
* young alone -> cat1
* middle-aged alone -> cat2
* elderly alone -> cat3
* young and middle-aged together -> cat4
* young and elderly together -> cat5
* middle-aged and elderly together -> cat6
* young, middle-aged and elderly together -> cat7
The function takes occurrences of each category and models them as three boolean columns: young, middle, elderly.
Additionally, family_type field is changed from descriptive, string form into a number (0, 1, 2, 3) that represents
the number of families living in a household.
"""
output_file = data_folder / generations_configuration_xlsx.file_name
if output_file.is_file():
return pd.read_excel(str(output_file), sheet_name=generations_configuration_xlsx.sheet_name)
voivodship_workbook_path = str(data_folder / voivodship_cities_generations_configuration_xlsx.file_name)
v_config_df = pd.read_excel(voivodship_workbook_path, sheet_name='preprocessed', header=[0, 1])
melted = pd.melt(v_config_df, id_vars=[('Unnamed: 0_level_0', 'family_type'),
('Unnamed: 1_level_0', 'relationship'),
('Unnamed: 2_level_0', 'house_master')],
var_name=['unit', 'category'],
value_name='total')
melted = melted.rename(columns={('Unnamed: 0_level_0', 'family_type'): 'family_type',
('Unnamed: 1_level_0', 'relationship'): 'relationship',
('Unnamed: 2_level_0', 'house_master'): 'house_master'})
melted['young'] = melted.category.isin(['cat1', 'cat4', 'cat5', 'cat7']).astype(int)
melted['middle'] = melted.category.isin(['cat2', 'cat4', 'cat6', 'cat7']).astype(int)
melted['elderly'] = melted.category.isin(['cat3', 'cat5', 'cat6', 'cat7']).astype(int)
melted = melted[melted.category != 'total']
melted = melted.drop(columns=['category'])
melted['relationship'] = melted['relationship'].fillna('N/A')
melted['house_master'] = melted['house_master'].fillna('N/A')
pivoted = pd.pivot_table(melted, columns=['unit'], values='total',
index=['family_type', 'relationship', 'house_master', 'young', 'middle', 'elderly'],
aggfunc='first').reset_index()
pivoted.households = pd.to_numeric(pivoted.households, errors='coerce')
pivoted.people = pd.to_numeric(pivoted.people, errors='coerce')
pivoted = pivoted.fillna(0)
pivoted.loc[pivoted['family_type'] == 'Jednorodzinne', 'family_type'] = 1
pivoted.loc[pivoted['family_type'] == 'Dwurodzinne', 'family_type'] = 2
pivoted.loc[pivoted['family_type'] == 'Trzy i więcej rodzinne', 'family_type'] = 3
pivoted.loc[pivoted['family_type'] == 'Nierodzinne', 'family_type'] = 0
book = load_workbook(voivodship_workbook_path)
if voivodship_cities_generations_configuration_xlsx.sheet_name in book.sheetnames:
del book[voivodship_cities_generations_configuration_xlsx.sheet_name]
with closing(pd.ExcelWriter(voivodship_workbook_path, engine='openpyxl')) as writer:
writer.book = book
pivoted.to_excel(writer, sheet_name=voivodship_cities_generations_configuration_xlsx.sheet_name, index=False)
writer.save()
# update with probabilities
df = pivoted.groupby(by=['family_type', 'relationship', 'house_master'])['households'].sum().reset_index() \
.rename(columns={'households': 'total'})
pivoted = pivoted.merge(df, how='left', on=['family_type', 'relationship', 'house_master'])
pivoted['probability'] = pivoted['households'] / pivoted['total']
pivoted.to_excel(str(output_file), sheet_name=generations_configuration_xlsx.sheet_name, index=False)
return pivoted
def voivodship_to_symbol(voivodship: str):
voivodship = voivodship.lower()
voivodship_dict = {
'podlaskie': 'B',
'kujawsko-pomorskie': 'C',
'dolnośląskie': 'D',
'łódzkie': 'E',
'lubuskie': 'F',
'pomorskie': 'G',
'małopolskie': 'K',
'lubelskie': 'L',
'warmińsko-mazurskie': 'N',
'opolskie': 'O',
'wielkopolskie': 'P',
'podkarpackie': 'R',
'śląskie': 'S',
'świętokrzyskie': 'T',
'mazowieckie': 'W',
'zachodniopomorskie': 'Z'}
return voivodship_dict.get(voivodship)
if __name__ == '__main__':
project_dir = Path(__file__).resolve().parents[2]
city_folder = project_dir / 'data' / 'processed' / 'poland' / 'WW'
prepare_family_structure_from_voivodship(city_folder)
generate_generations_configuration(city_folder)
| if house_master not in (np.nan, '', None):
out_df = df[(df.family_type == family_type) & (df.relationship == relationship)
& (df.house_master == house_master)]
if len(out_df) > 0:
return out_df
if relationship not in (np.nan, '', None):
out_df = df[(df.family_type == family_type) & (df.relationship == relationship)]
if len(out_df) > 0:
return out_df
return df[(df.family_type == family_type)] | conditional_block |
progress.rs | use num_enum::IntoPrimitive;
use once_cell::sync::Lazy;
use std::sync::mpsc::Sender;
use std::{mem, pin::Pin, ptr};
use wchar::*;
use widestring::*;
use winapi::shared::basetsd;
use winapi::shared::minwindef as win;
use winapi::shared::windef::*;
use winapi::um::commctrl;
use winapi::um::errhandlingapi;
use winapi::um::libloaderapi;
use winapi::um::wingdi;
use winapi::um::winuser;
use wslscript_common::error::*;
use wslscript_common::font::Font;
use wslscript_common::wcstring;
use wslscript_common::win32;
pub struct ProgressWindow {
/// Maximum value for progress.
high_limit: usize,
/// Sender to signal for cancellation.
cancel_sender: Option<Sender<()>>,
/// Window handle.
hwnd: HWND,
/// Default font.
font: Font,
}
impl Default for ProgressWindow {
fn default() -> Self {
Self {
high_limit: 0,
cancel_sender: None,
hwnd: ptr::null_mut(),
font: Font::default(),
}
}
}
/// Progress window class name.
static WND_CLASS: Lazy<WideCString> = Lazy::new(|| wcstring("WSLScriptProgress"));
/// Window message for progress update.
pub const WM_PROGRESS: win::UINT = winuser::WM_USER + 1;
/// Child window identifiers.
#[derive(IntoPrimitive, PartialEq)]
#[repr(u16)]
enum Control {
ProgressBar = 100,
Message,
Title,
}
/// Minimum and initial main window size as a (width, height) tuple.
const MIN_WINDOW_SIZE: (i32, i32) = (300, 150);
impl ProgressWindow {
pub fn new(high_limit: usize, cancel_sender: Sender<()>) -> Result<Pin<Box<Self>>, Error> {
use winuser::*;
// register window class
if !Self::is_window_class_registered() {
Self::register_window_class()?;
}
let mut wnd = Pin::new(Box::new(Self::default()));
wnd.high_limit = high_limit;
wnd.cancel_sender = Some(cancel_sender);
let instance = unsafe { libloaderapi::GetModuleHandleW(ptr::null_mut()) };
let title = wchz!("WSL Script");
// create window
#[rustfmt::skip]
let hwnd = unsafe { CreateWindowExW(
WS_EX_TOOLWINDOW | WS_EX_TOPMOST, WND_CLASS.as_ptr(), title.as_ptr(),
WS_OVERLAPPEDWINDOW & !WS_MAXIMIZEBOX | WS_VISIBLE,
CW_USEDEFAULT, CW_USEDEFAULT, MIN_WINDOW_SIZE.0, MIN_WINDOW_SIZE.1,
ptr::null_mut(), ptr::null_mut(), instance,
// self as a `CREATESTRUCT`'s `lpCreateParams`
&*wnd as *const Self as win::LPVOID)
};
if hwnd.is_null() {
return Err(win32::last_error());
}
Ok(wnd)
}
/// Get handle to main window.
pub fn handle(&self) -> HWND {
self.hwnd
}
/// Run message loop.
pub fn run(&self) -> Result<(), Error> {
log::debug!("Starting message loop");
loop {
let mut msg: winuser::MSG = unsafe { mem::zeroed() };
match unsafe { winuser::GetMessageW(&mut msg, ptr::null_mut(), 0, 0) } {
1..=std::i32::MAX => unsafe {
winuser::TranslateMessage(&msg);
winuser::DispatchMessageW(&msg);
},
std::i32::MIN..=-1 => return Err(win32::last_error()),
0 => {
log::debug!("Received WM_QUIT");
return Ok(());
}
}
}
}
| /// Signal that progress should be cancelled.
pub fn cancel(&self) {
if let Some(tx) = &self.cancel_sender {
tx.send(()).unwrap_or_else(|_| {
log::error!("Failed to send cancel signal");
});
}
}
/// Close main window.
pub fn close(&self) {
unsafe { winuser::PostMessageW(self.hwnd, winuser::WM_CLOSE, 0, 0) };
}
/// Create child control windows.
fn create_window_controls(&mut self) -> Result<(), Error> {
use winuser::*;
let instance = unsafe { GetWindowLongPtrW(self.hwnd, GWLP_HINSTANCE) as win::HINSTANCE };
self.font = Font::new_caption(20)?;
// init common controls
let icex = commctrl::INITCOMMONCONTROLSEX {
dwSize: mem::size_of::<commctrl::INITCOMMONCONTROLSEX>() as u32,
dwICC: commctrl::ICC_PROGRESS_CLASS,
};
unsafe { commctrl::InitCommonControlsEx(&icex) };
// progress bar
#[rustfmt::skip]
let hwnd = unsafe { CreateWindowExW(
0, wcstring(commctrl::PROGRESS_CLASS).as_ptr(), ptr::null_mut(),
WS_CHILD | WS_VISIBLE | commctrl::PBS_MARQUEE,
0, 0, 0, 0, self.hwnd,
Control::ProgressBar as u16 as _, instance, ptr::null_mut(),
) };
unsafe { SendMessageW(hwnd, commctrl::PBM_SETRANGE32, 0, self.high_limit as _) };
unsafe { SendMessageW(hwnd, commctrl::PBM_SETMARQUEE, 1, 0) };
// static message area
#[rustfmt::skip]
let hwnd = unsafe { CreateWindowExW(
0, wchz!("STATIC").as_ptr(), ptr::null_mut(),
SS_CENTER | WS_CHILD | WS_VISIBLE,
0, 0, 0, 0, self.hwnd,
Control::Message as u16 as _, instance, ptr::null_mut(),
) };
Self::set_window_font(hwnd, &self.font);
// static title
#[rustfmt::skip]
let hwnd = unsafe { CreateWindowExW(
0, wchz!("STATIC").as_ptr(), ptr::null_mut(),
SS_CENTER | WS_CHILD | WS_VISIBLE,
0, 0, 0, 0, self.hwnd,
Control::Title as u16 as _, instance, ptr::null_mut(),
) };
Self::set_window_font(hwnd, &self.font);
unsafe { SetWindowTextW(hwnd, wchz!("Converting paths...").as_ptr()) };
Ok(())
}
/// Called when client was resized.
fn on_resize(&self, width: i32, _height: i32) {
self.move_control(Control::Title, 10, 10, width - 20, 20);
self.move_control(Control::ProgressBar, 10, 40, width - 20, 30);
self.move_control(Control::Message, 10, 80, width - 20, 20);
}
/// Move control relative to main window.
fn move_control(&self, control: Control, x: i32, y: i32, width: i32, height: i32) {
let hwnd = self.get_control_handle(control);
unsafe { winuser::MoveWindow(hwnd, x, y, width, height, win::TRUE) };
}
/// Get window handle of given control.
fn get_control_handle(&self, control: Control) -> HWND {
unsafe { winuser::GetDlgItem(self.hwnd, control as i32) }
}
/// Set font to given window.
fn set_window_font(hwnd: HWND, font: &Font) {
unsafe {
winuser::SendMessageW(hwnd, winuser::WM_SETFONT, font.handle as _, win::TRUE as _)
};
}
/// Update controls to display given progress.
fn update_progress(&mut self, current: usize, max: usize) {
use commctrl::*;
use winuser::*;
log::debug!("Progress update: {}/{}", current, max);
let msg = format!("{} / {}", current, max);
unsafe {
SetWindowTextW(
self.get_control_handle(Control::Message),
wcstring(msg).as_ptr(),
)
};
if self.is_marquee_progress() {
self.set_progress_to_range_mode();
}
let hwnd = self.get_control_handle(Control::ProgressBar);
unsafe { SendMessageW(hwnd, PBM_SETPOS, current, 0) };
// if done, close cancellation channel
if current == max {
self.cancel_sender.take();
}
}
/// Check whether progress bar is in marquee mode.
fn is_marquee_progress(&self) -> bool {
let style = unsafe {
winuser::GetWindowLongW(
self.get_control_handle(Control::ProgressBar),
winuser::GWL_STYLE,
)
} as u32;
style & commctrl::PBS_MARQUEE != 0
}
/// Set progress bar to range mode.
fn set_progress_to_range_mode(&self) {
use commctrl::*;
use winuser::*;
let hwnd = self.get_control_handle(Control::ProgressBar);
let mut style = unsafe { GetWindowLongW(hwnd, GWL_STYLE) } as u32;
style &= !PBS_MARQUEE;
style |= PBS_SMOOTH;
unsafe { SetWindowLongW(hwnd, GWL_STYLE, style as _) };
unsafe { SendMessageW(hwnd, PBM_SETMARQUEE, 0, 0) };
}
}
impl ProgressWindow {
/// Check whether window class is registered.
pub fn is_window_class_registered() -> bool {
unsafe {
let instance = libloaderapi::GetModuleHandleW(ptr::null_mut());
let mut wc: winuser::WNDCLASSEXW = mem::zeroed();
winuser::GetClassInfoExW(instance, WND_CLASS.as_ptr(), &mut wc) != 0
}
}
/// Register window class.
pub fn register_window_class() -> Result<(), Error> {
use winuser::*;
log::debug!("Registering {} window class", WND_CLASS.to_string_lossy());
let instance = unsafe { libloaderapi::GetModuleHandleW(ptr::null_mut()) };
let wc = WNDCLASSEXW {
cbSize: mem::size_of::<WNDCLASSEXW>() as u32,
style: CS_OWNDC | CS_HREDRAW | CS_VREDRAW,
hbrBackground: (COLOR_WINDOW + 1) as HBRUSH,
lpfnWndProc: Some(window_proc_wrapper::<ProgressWindow>),
hInstance: instance,
lpszClassName: WND_CLASS.as_ptr(),
hIcon: ptr::null_mut(),
hCursor: unsafe { LoadCursorW(ptr::null_mut(), IDC_ARROW) },
..unsafe { mem::zeroed() }
};
if 0 == unsafe { RegisterClassExW(&wc) } {
Err(win32::last_error())
} else {
Ok(())
}
}
/// Unregister window class.
pub fn unregister_window_class() {
log::debug!("Unregistering {} window class", WND_CLASS.to_string_lossy());
unsafe {
let instance = libloaderapi::GetModuleHandleW(ptr::null_mut());
winuser::UnregisterClassW(WND_CLASS.as_ptr(), instance);
}
}
}
trait WindowProc {
/// Window procedure callback.
///
/// If None is returned, underlying wrapper calls `DefWindowProcW`.
fn window_proc(
&mut self,
hwnd: HWND,
msg: win::UINT,
wparam: win::WPARAM,
lparam: win::LPARAM,
) -> Option<win::LRESULT>;
}
/// Window proc wrapper that manages the `&self` pointer to `ProgressWindow` object.
///
/// Must be `extern "system"` because the function is called by Windows.
extern "system" fn window_proc_wrapper<T: WindowProc>(
hwnd: HWND,
msg: win::UINT,
wparam: win::WPARAM,
lparam: win::LPARAM,
) -> win::LRESULT {
use winuser::*;
// get pointer to T from userdata
let mut ptr = unsafe { GetWindowLongPtrW(hwnd, GWLP_USERDATA) } as *mut T;
// not yet set, initialize from CREATESTRUCT
if ptr.is_null() && msg == WM_NCCREATE {
let cs = unsafe { &*(lparam as LPCREATESTRUCTW) };
ptr = cs.lpCreateParams as *mut T;
log::debug!("Initialize window pointer {:p}", ptr);
unsafe { errhandlingapi::SetLastError(0) };
if 0 == unsafe {
SetWindowLongPtrW(hwnd, GWLP_USERDATA, ptr as *const _ as basetsd::LONG_PTR)
} && unsafe { errhandlingapi::GetLastError() } != 0
{
return win::FALSE as win::LRESULT;
}
}
// call wrapped window proc
if !ptr.is_null() {
let this = unsafe { &mut *(ptr as *mut T) };
if let Some(result) = this.window_proc(hwnd, msg, wparam, lparam) {
return result;
}
}
unsafe { DefWindowProcW(hwnd, msg, wparam, lparam) }
}
impl WindowProc for ProgressWindow {
fn window_proc(
&mut self,
hwnd: HWND,
msg: win::UINT,
wparam: win::WPARAM,
lparam: win::LPARAM,
) -> Option<win::LRESULT> {
use winuser::*;
match msg {
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-nccreate
WM_NCCREATE => {
// store main window handle
self.hwnd = hwnd;
// WM_NCCREATE must be passed to DefWindowProc
None
}
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-create
WM_CREATE => match self.create_window_controls() {
Err(e) => {
log::error!("Failed to create window controls: {}", e);
Some(-1)
}
Ok(()) => Some(0),
},
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-size
WM_SIZE => {
self.on_resize(
i32::from(win::LOWORD(lparam as u32)),
i32::from(win::HIWORD(lparam as u32)),
);
Some(0)
}
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-getminmaxinfo
WM_GETMINMAXINFO => {
let mmi = unsafe { &mut *(lparam as LPMINMAXINFO) };
mmi.ptMinTrackSize.x = MIN_WINDOW_SIZE.0;
mmi.ptMinTrackSize.y = MIN_WINDOW_SIZE.1;
Some(0)
}
// https://docs.microsoft.com/en-us/windows/win32/controls/wm-ctlcolorstatic
WM_CTLCOLORSTATIC => {
Some(unsafe { wingdi::GetStockObject(COLOR_WINDOW + 1) } as win::LPARAM)
}
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-close
WM_CLOSE => {
self.cancel();
unsafe { DestroyWindow(hwnd) };
Some(0)
}
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-destroy
WM_DESTROY => {
unsafe { PostQuitMessage(0) };
Some(0)
}
WM_PROGRESS => {
self.update_progress(wparam, lparam as _);
Some(0)
}
_ => None,
}
}
} | random_line_split | |
progress.rs | use num_enum::IntoPrimitive;
use once_cell::sync::Lazy;
use std::sync::mpsc::Sender;
use std::{mem, pin::Pin, ptr};
use wchar::*;
use widestring::*;
use winapi::shared::basetsd;
use winapi::shared::minwindef as win;
use winapi::shared::windef::*;
use winapi::um::commctrl;
use winapi::um::errhandlingapi;
use winapi::um::libloaderapi;
use winapi::um::wingdi;
use winapi::um::winuser;
use wslscript_common::error::*;
use wslscript_common::font::Font;
use wslscript_common::wcstring;
use wslscript_common::win32;
pub struct ProgressWindow {
/// Maximum value for progress.
high_limit: usize,
/// Sender to signal for cancellation.
cancel_sender: Option<Sender<()>>,
/// Window handle.
hwnd: HWND,
/// Default font.
font: Font,
}
impl Default for ProgressWindow {
fn default() -> Self {
Self {
high_limit: 0,
cancel_sender: None,
hwnd: ptr::null_mut(),
font: Font::default(),
}
}
}
/// Progress window class name.
static WND_CLASS: Lazy<WideCString> = Lazy::new(|| wcstring("WSLScriptProgress"));
/// Window message for progress update.
pub const WM_PROGRESS: win::UINT = winuser::WM_USER + 1;
/// Child window identifiers.
#[derive(IntoPrimitive, PartialEq)]
#[repr(u16)]
enum Control {
ProgressBar = 100,
Message,
Title,
}
/// Minimum and initial main window size as a (width, height) tuple.
const MIN_WINDOW_SIZE: (i32, i32) = (300, 150);
impl ProgressWindow {
pub fn new(high_limit: usize, cancel_sender: Sender<()>) -> Result<Pin<Box<Self>>, Error> {
use winuser::*;
// register window class
if !Self::is_window_class_registered() {
Self::register_window_class()?;
}
let mut wnd = Pin::new(Box::new(Self::default()));
wnd.high_limit = high_limit;
wnd.cancel_sender = Some(cancel_sender);
let instance = unsafe { libloaderapi::GetModuleHandleW(ptr::null_mut()) };
let title = wchz!("WSL Script");
// create window
#[rustfmt::skip]
let hwnd = unsafe { CreateWindowExW(
WS_EX_TOOLWINDOW | WS_EX_TOPMOST, WND_CLASS.as_ptr(), title.as_ptr(),
WS_OVERLAPPEDWINDOW & !WS_MAXIMIZEBOX | WS_VISIBLE,
CW_USEDEFAULT, CW_USEDEFAULT, MIN_WINDOW_SIZE.0, MIN_WINDOW_SIZE.1,
ptr::null_mut(), ptr::null_mut(), instance,
// self as a `CREATESTRUCT`'s `lpCreateParams`
&*wnd as *const Self as win::LPVOID)
};
if hwnd.is_null() {
return Err(win32::last_error());
}
Ok(wnd)
}
/// Get handle to main window.
pub fn handle(&self) -> HWND {
self.hwnd
}
/// Run message loop.
pub fn run(&self) -> Result<(), Error> {
log::debug!("Starting message loop");
loop {
let mut msg: winuser::MSG = unsafe { mem::zeroed() };
match unsafe { winuser::GetMessageW(&mut msg, ptr::null_mut(), 0, 0) } {
1..=std::i32::MAX => unsafe {
winuser::TranslateMessage(&msg);
winuser::DispatchMessageW(&msg);
},
std::i32::MIN..=-1 => return Err(win32::last_error()),
0 => {
log::debug!("Received WM_QUIT");
return Ok(());
}
}
}
}
/// Signal that progress should be cancelled.
pub fn cancel(&self) {
if let Some(tx) = &self.cancel_sender {
tx.send(()).unwrap_or_else(|_| {
log::error!("Failed to send cancel signal");
});
}
}
/// Close main window.
pub fn close(&self) {
unsafe { winuser::PostMessageW(self.hwnd, winuser::WM_CLOSE, 0, 0) };
}
/// Create child control windows.
fn create_window_controls(&mut self) -> Result<(), Error> {
use winuser::*;
let instance = unsafe { GetWindowLongPtrW(self.hwnd, GWLP_HINSTANCE) as win::HINSTANCE };
self.font = Font::new_caption(20)?;
// init common controls
let icex = commctrl::INITCOMMONCONTROLSEX {
dwSize: mem::size_of::<commctrl::INITCOMMONCONTROLSEX>() as u32,
dwICC: commctrl::ICC_PROGRESS_CLASS,
};
unsafe { commctrl::InitCommonControlsEx(&icex) };
// progress bar
#[rustfmt::skip]
let hwnd = unsafe { CreateWindowExW(
0, wcstring(commctrl::PROGRESS_CLASS).as_ptr(), ptr::null_mut(),
WS_CHILD | WS_VISIBLE | commctrl::PBS_MARQUEE,
0, 0, 0, 0, self.hwnd,
Control::ProgressBar as u16 as _, instance, ptr::null_mut(),
) };
unsafe { SendMessageW(hwnd, commctrl::PBM_SETRANGE32, 0, self.high_limit as _) };
unsafe { SendMessageW(hwnd, commctrl::PBM_SETMARQUEE, 1, 0) };
// static message area
#[rustfmt::skip]
let hwnd = unsafe { CreateWindowExW(
0, wchz!("STATIC").as_ptr(), ptr::null_mut(),
SS_CENTER | WS_CHILD | WS_VISIBLE,
0, 0, 0, 0, self.hwnd,
Control::Message as u16 as _, instance, ptr::null_mut(),
) };
Self::set_window_font(hwnd, &self.font);
// static title
#[rustfmt::skip]
let hwnd = unsafe { CreateWindowExW(
0, wchz!("STATIC").as_ptr(), ptr::null_mut(),
SS_CENTER | WS_CHILD | WS_VISIBLE,
0, 0, 0, 0, self.hwnd,
Control::Title as u16 as _, instance, ptr::null_mut(),
) };
Self::set_window_font(hwnd, &self.font);
unsafe { SetWindowTextW(hwnd, wchz!("Converting paths...").as_ptr()) };
Ok(())
}
/// Called when client was resized.
fn on_resize(&self, width: i32, _height: i32) {
self.move_control(Control::Title, 10, 10, width - 20, 20);
self.move_control(Control::ProgressBar, 10, 40, width - 20, 30);
self.move_control(Control::Message, 10, 80, width - 20, 20);
}
/// Move control relative to main window.
fn move_control(&self, control: Control, x: i32, y: i32, width: i32, height: i32) {
let hwnd = self.get_control_handle(control);
unsafe { winuser::MoveWindow(hwnd, x, y, width, height, win::TRUE) };
}
/// Get window handle of given control.
fn get_control_handle(&self, control: Control) -> HWND {
unsafe { winuser::GetDlgItem(self.hwnd, control as i32) }
}
/// Set font to given window.
fn set_window_font(hwnd: HWND, font: &Font) {
unsafe {
winuser::SendMessageW(hwnd, winuser::WM_SETFONT, font.handle as _, win::TRUE as _)
};
}
/// Update controls to display given progress.
fn update_progress(&mut self, current: usize, max: usize) {
use commctrl::*;
use winuser::*;
log::debug!("Progress update: {}/{}", current, max);
let msg = format!("{} / {}", current, max);
unsafe {
SetWindowTextW(
self.get_control_handle(Control::Message),
wcstring(msg).as_ptr(),
)
};
if self.is_marquee_progress() {
self.set_progress_to_range_mode();
}
let hwnd = self.get_control_handle(Control::ProgressBar);
unsafe { SendMessageW(hwnd, PBM_SETPOS, current, 0) };
// if done, close cancellation channel
if current == max {
self.cancel_sender.take();
}
}
/// Check whether progress bar is in marquee mode.
fn is_marquee_progress(&self) -> bool {
let style = unsafe {
winuser::GetWindowLongW(
self.get_control_handle(Control::ProgressBar),
winuser::GWL_STYLE,
)
} as u32;
style & commctrl::PBS_MARQUEE != 0
}
/// Set progress bar to range mode.
fn set_progress_to_range_mode(&self) {
use commctrl::*;
use winuser::*;
let hwnd = self.get_control_handle(Control::ProgressBar);
let mut style = unsafe { GetWindowLongW(hwnd, GWL_STYLE) } as u32;
style &= !PBS_MARQUEE;
style |= PBS_SMOOTH;
unsafe { SetWindowLongW(hwnd, GWL_STYLE, style as _) };
unsafe { SendMessageW(hwnd, PBM_SETMARQUEE, 0, 0) };
}
}
impl ProgressWindow {
/// Check whether window class is registered.
pub fn | () -> bool {
unsafe {
let instance = libloaderapi::GetModuleHandleW(ptr::null_mut());
let mut wc: winuser::WNDCLASSEXW = mem::zeroed();
winuser::GetClassInfoExW(instance, WND_CLASS.as_ptr(), &mut wc) != 0
}
}
/// Register window class.
pub fn register_window_class() -> Result<(), Error> {
use winuser::*;
log::debug!("Registering {} window class", WND_CLASS.to_string_lossy());
let instance = unsafe { libloaderapi::GetModuleHandleW(ptr::null_mut()) };
let wc = WNDCLASSEXW {
cbSize: mem::size_of::<WNDCLASSEXW>() as u32,
style: CS_OWNDC | CS_HREDRAW | CS_VREDRAW,
hbrBackground: (COLOR_WINDOW + 1) as HBRUSH,
lpfnWndProc: Some(window_proc_wrapper::<ProgressWindow>),
hInstance: instance,
lpszClassName: WND_CLASS.as_ptr(),
hIcon: ptr::null_mut(),
hCursor: unsafe { LoadCursorW(ptr::null_mut(), IDC_ARROW) },
..unsafe { mem::zeroed() }
};
if 0 == unsafe { RegisterClassExW(&wc) } {
Err(win32::last_error())
} else {
Ok(())
}
}
/// Unregister window class.
pub fn unregister_window_class() {
log::debug!("Unregistering {} window class", WND_CLASS.to_string_lossy());
unsafe {
let instance = libloaderapi::GetModuleHandleW(ptr::null_mut());
winuser::UnregisterClassW(WND_CLASS.as_ptr(), instance);
}
}
}
trait WindowProc {
/// Window procedure callback.
///
/// If None is returned, underlying wrapper calls `DefWindowProcW`.
fn window_proc(
&mut self,
hwnd: HWND,
msg: win::UINT,
wparam: win::WPARAM,
lparam: win::LPARAM,
) -> Option<win::LRESULT>;
}
/// Window proc wrapper that manages the `&self` pointer to `ProgressWindow` object.
///
/// Must be `extern "system"` because the function is called by Windows.
extern "system" fn window_proc_wrapper<T: WindowProc>(
hwnd: HWND,
msg: win::UINT,
wparam: win::WPARAM,
lparam: win::LPARAM,
) -> win::LRESULT {
use winuser::*;
// get pointer to T from userdata
let mut ptr = unsafe { GetWindowLongPtrW(hwnd, GWLP_USERDATA) } as *mut T;
// not yet set, initialize from CREATESTRUCT
if ptr.is_null() && msg == WM_NCCREATE {
let cs = unsafe { &*(lparam as LPCREATESTRUCTW) };
ptr = cs.lpCreateParams as *mut T;
log::debug!("Initialize window pointer {:p}", ptr);
unsafe { errhandlingapi::SetLastError(0) };
if 0 == unsafe {
SetWindowLongPtrW(hwnd, GWLP_USERDATA, ptr as *const _ as basetsd::LONG_PTR)
} && unsafe { errhandlingapi::GetLastError() } != 0
{
return win::FALSE as win::LRESULT;
}
}
// call wrapped window proc
if !ptr.is_null() {
let this = unsafe { &mut *(ptr as *mut T) };
if let Some(result) = this.window_proc(hwnd, msg, wparam, lparam) {
return result;
}
}
unsafe { DefWindowProcW(hwnd, msg, wparam, lparam) }
}
impl WindowProc for ProgressWindow {
fn window_proc(
&mut self,
hwnd: HWND,
msg: win::UINT,
wparam: win::WPARAM,
lparam: win::LPARAM,
) -> Option<win::LRESULT> {
use winuser::*;
match msg {
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-nccreate
WM_NCCREATE => {
// store main window handle
self.hwnd = hwnd;
// WM_NCCREATE must be passed to DefWindowProc
None
}
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-create
WM_CREATE => match self.create_window_controls() {
Err(e) => {
log::error!("Failed to create window controls: {}", e);
Some(-1)
}
Ok(()) => Some(0),
},
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-size
WM_SIZE => {
self.on_resize(
i32::from(win::LOWORD(lparam as u32)),
i32::from(win::HIWORD(lparam as u32)),
);
Some(0)
}
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-getminmaxinfo
WM_GETMINMAXINFO => {
let mmi = unsafe { &mut *(lparam as LPMINMAXINFO) };
mmi.ptMinTrackSize.x = MIN_WINDOW_SIZE.0;
mmi.ptMinTrackSize.y = MIN_WINDOW_SIZE.1;
Some(0)
}
// https://docs.microsoft.com/en-us/windows/win32/controls/wm-ctlcolorstatic
WM_CTLCOLORSTATIC => {
Some(unsafe { wingdi::GetStockObject(COLOR_WINDOW + 1) } as win::LPARAM)
}
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-close
WM_CLOSE => {
self.cancel();
unsafe { DestroyWindow(hwnd) };
Some(0)
}
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-destroy
WM_DESTROY => {
unsafe { PostQuitMessage(0) };
Some(0)
}
WM_PROGRESS => {
self.update_progress(wparam, lparam as _);
Some(0)
}
_ => None,
}
}
}
| is_window_class_registered | identifier_name |
progress.rs | use num_enum::IntoPrimitive;
use once_cell::sync::Lazy;
use std::sync::mpsc::Sender;
use std::{mem, pin::Pin, ptr};
use wchar::*;
use widestring::*;
use winapi::shared::basetsd;
use winapi::shared::minwindef as win;
use winapi::shared::windef::*;
use winapi::um::commctrl;
use winapi::um::errhandlingapi;
use winapi::um::libloaderapi;
use winapi::um::wingdi;
use winapi::um::winuser;
use wslscript_common::error::*;
use wslscript_common::font::Font;
use wslscript_common::wcstring;
use wslscript_common::win32;
pub struct ProgressWindow {
/// Maximum value for progress.
high_limit: usize,
/// Sender to signal for cancellation.
cancel_sender: Option<Sender<()>>,
/// Window handle.
hwnd: HWND,
/// Default font.
font: Font,
}
impl Default for ProgressWindow {
fn default() -> Self {
Self {
high_limit: 0,
cancel_sender: None,
hwnd: ptr::null_mut(),
font: Font::default(),
}
}
}
/// Progress window class name.
static WND_CLASS: Lazy<WideCString> = Lazy::new(|| wcstring("WSLScriptProgress"));
/// Window message for progress update.
pub const WM_PROGRESS: win::UINT = winuser::WM_USER + 1;
/// Child window identifiers.
#[derive(IntoPrimitive, PartialEq)]
#[repr(u16)]
enum Control {
ProgressBar = 100,
Message,
Title,
}
/// Minimum and initial main window size as a (width, height) tuple.
const MIN_WINDOW_SIZE: (i32, i32) = (300, 150);
impl ProgressWindow {
pub fn new(high_limit: usize, cancel_sender: Sender<()>) -> Result<Pin<Box<Self>>, Error> {
use winuser::*;
// register window class
if !Self::is_window_class_registered() {
Self::register_window_class()?;
}
let mut wnd = Pin::new(Box::new(Self::default()));
wnd.high_limit = high_limit;
wnd.cancel_sender = Some(cancel_sender);
let instance = unsafe { libloaderapi::GetModuleHandleW(ptr::null_mut()) };
let title = wchz!("WSL Script");
// create window
#[rustfmt::skip]
let hwnd = unsafe { CreateWindowExW(
WS_EX_TOOLWINDOW | WS_EX_TOPMOST, WND_CLASS.as_ptr(), title.as_ptr(),
WS_OVERLAPPEDWINDOW & !WS_MAXIMIZEBOX | WS_VISIBLE,
CW_USEDEFAULT, CW_USEDEFAULT, MIN_WINDOW_SIZE.0, MIN_WINDOW_SIZE.1,
ptr::null_mut(), ptr::null_mut(), instance,
// self as a `CREATESTRUCT`'s `lpCreateParams`
&*wnd as *const Self as win::LPVOID)
};
if hwnd.is_null() {
return Err(win32::last_error());
}
Ok(wnd)
}
/// Get handle to main window.
pub fn handle(&self) -> HWND {
self.hwnd
}
/// Run message loop.
pub fn run(&self) -> Result<(), Error> {
log::debug!("Starting message loop");
loop {
let mut msg: winuser::MSG = unsafe { mem::zeroed() };
match unsafe { winuser::GetMessageW(&mut msg, ptr::null_mut(), 0, 0) } {
1..=std::i32::MAX => unsafe {
winuser::TranslateMessage(&msg);
winuser::DispatchMessageW(&msg);
},
std::i32::MIN..=-1 => return Err(win32::last_error()),
0 => {
log::debug!("Received WM_QUIT");
return Ok(());
}
}
}
}
/// Signal that progress should be cancelled.
pub fn cancel(&self) {
if let Some(tx) = &self.cancel_sender {
tx.send(()).unwrap_or_else(|_| {
log::error!("Failed to send cancel signal");
});
}
}
/// Close main window.
pub fn close(&self) {
unsafe { winuser::PostMessageW(self.hwnd, winuser::WM_CLOSE, 0, 0) };
}
/// Create child control windows.
fn create_window_controls(&mut self) -> Result<(), Error> {
use winuser::*;
let instance = unsafe { GetWindowLongPtrW(self.hwnd, GWLP_HINSTANCE) as win::HINSTANCE };
self.font = Font::new_caption(20)?;
// init common controls
let icex = commctrl::INITCOMMONCONTROLSEX {
dwSize: mem::size_of::<commctrl::INITCOMMONCONTROLSEX>() as u32,
dwICC: commctrl::ICC_PROGRESS_CLASS,
};
unsafe { commctrl::InitCommonControlsEx(&icex) };
// progress bar
#[rustfmt::skip]
let hwnd = unsafe { CreateWindowExW(
0, wcstring(commctrl::PROGRESS_CLASS).as_ptr(), ptr::null_mut(),
WS_CHILD | WS_VISIBLE | commctrl::PBS_MARQUEE,
0, 0, 0, 0, self.hwnd,
Control::ProgressBar as u16 as _, instance, ptr::null_mut(),
) };
unsafe { SendMessageW(hwnd, commctrl::PBM_SETRANGE32, 0, self.high_limit as _) };
unsafe { SendMessageW(hwnd, commctrl::PBM_SETMARQUEE, 1, 0) };
// static message area
#[rustfmt::skip]
let hwnd = unsafe { CreateWindowExW(
0, wchz!("STATIC").as_ptr(), ptr::null_mut(),
SS_CENTER | WS_CHILD | WS_VISIBLE,
0, 0, 0, 0, self.hwnd,
Control::Message as u16 as _, instance, ptr::null_mut(),
) };
Self::set_window_font(hwnd, &self.font);
// static title
#[rustfmt::skip]
let hwnd = unsafe { CreateWindowExW(
0, wchz!("STATIC").as_ptr(), ptr::null_mut(),
SS_CENTER | WS_CHILD | WS_VISIBLE,
0, 0, 0, 0, self.hwnd,
Control::Title as u16 as _, instance, ptr::null_mut(),
) };
Self::set_window_font(hwnd, &self.font);
unsafe { SetWindowTextW(hwnd, wchz!("Converting paths...").as_ptr()) };
Ok(())
}
/// Called when client was resized.
fn on_resize(&self, width: i32, _height: i32) {
self.move_control(Control::Title, 10, 10, width - 20, 20);
self.move_control(Control::ProgressBar, 10, 40, width - 20, 30);
self.move_control(Control::Message, 10, 80, width - 20, 20);
}
/// Move control relative to main window.
fn move_control(&self, control: Control, x: i32, y: i32, width: i32, height: i32) {
let hwnd = self.get_control_handle(control);
unsafe { winuser::MoveWindow(hwnd, x, y, width, height, win::TRUE) };
}
/// Get window handle of given control.
fn get_control_handle(&self, control: Control) -> HWND {
unsafe { winuser::GetDlgItem(self.hwnd, control as i32) }
}
/// Set font to given window.
fn set_window_font(hwnd: HWND, font: &Font) {
unsafe {
winuser::SendMessageW(hwnd, winuser::WM_SETFONT, font.handle as _, win::TRUE as _)
};
}
/// Update controls to display given progress.
fn update_progress(&mut self, current: usize, max: usize) {
use commctrl::*;
use winuser::*;
log::debug!("Progress update: {}/{}", current, max);
let msg = format!("{} / {}", current, max);
unsafe {
SetWindowTextW(
self.get_control_handle(Control::Message),
wcstring(msg).as_ptr(),
)
};
if self.is_marquee_progress() |
let hwnd = self.get_control_handle(Control::ProgressBar);
unsafe { SendMessageW(hwnd, PBM_SETPOS, current, 0) };
// if done, close cancellation channel
if current == max {
self.cancel_sender.take();
}
}
/// Check whether progress bar is in marquee mode.
fn is_marquee_progress(&self) -> bool {
let style = unsafe {
winuser::GetWindowLongW(
self.get_control_handle(Control::ProgressBar),
winuser::GWL_STYLE,
)
} as u32;
style & commctrl::PBS_MARQUEE != 0
}
/// Set progress bar to range mode.
fn set_progress_to_range_mode(&self) {
use commctrl::*;
use winuser::*;
let hwnd = self.get_control_handle(Control::ProgressBar);
let mut style = unsafe { GetWindowLongW(hwnd, GWL_STYLE) } as u32;
style &= !PBS_MARQUEE;
style |= PBS_SMOOTH;
unsafe { SetWindowLongW(hwnd, GWL_STYLE, style as _) };
unsafe { SendMessageW(hwnd, PBM_SETMARQUEE, 0, 0) };
}
}
impl ProgressWindow {
/// Check whether window class is registered.
pub fn is_window_class_registered() -> bool {
unsafe {
let instance = libloaderapi::GetModuleHandleW(ptr::null_mut());
let mut wc: winuser::WNDCLASSEXW = mem::zeroed();
winuser::GetClassInfoExW(instance, WND_CLASS.as_ptr(), &mut wc) != 0
}
}
/// Register window class.
pub fn register_window_class() -> Result<(), Error> {
use winuser::*;
log::debug!("Registering {} window class", WND_CLASS.to_string_lossy());
let instance = unsafe { libloaderapi::GetModuleHandleW(ptr::null_mut()) };
let wc = WNDCLASSEXW {
cbSize: mem::size_of::<WNDCLASSEXW>() as u32,
style: CS_OWNDC | CS_HREDRAW | CS_VREDRAW,
hbrBackground: (COLOR_WINDOW + 1) as HBRUSH,
lpfnWndProc: Some(window_proc_wrapper::<ProgressWindow>),
hInstance: instance,
lpszClassName: WND_CLASS.as_ptr(),
hIcon: ptr::null_mut(),
hCursor: unsafe { LoadCursorW(ptr::null_mut(), IDC_ARROW) },
..unsafe { mem::zeroed() }
};
if 0 == unsafe { RegisterClassExW(&wc) } {
Err(win32::last_error())
} else {
Ok(())
}
}
/// Unregister window class.
pub fn unregister_window_class() {
log::debug!("Unregistering {} window class", WND_CLASS.to_string_lossy());
unsafe {
let instance = libloaderapi::GetModuleHandleW(ptr::null_mut());
winuser::UnregisterClassW(WND_CLASS.as_ptr(), instance);
}
}
}
trait WindowProc {
/// Window procedure callback.
///
/// If None is returned, underlying wrapper calls `DefWindowProcW`.
fn window_proc(
&mut self,
hwnd: HWND,
msg: win::UINT,
wparam: win::WPARAM,
lparam: win::LPARAM,
) -> Option<win::LRESULT>;
}
/// Window proc wrapper that manages the `&self` pointer to `ProgressWindow` object.
///
/// Must be `extern "system"` because the function is called by Windows.
extern "system" fn window_proc_wrapper<T: WindowProc>(
hwnd: HWND,
msg: win::UINT,
wparam: win::WPARAM,
lparam: win::LPARAM,
) -> win::LRESULT {
use winuser::*;
// get pointer to T from userdata
let mut ptr = unsafe { GetWindowLongPtrW(hwnd, GWLP_USERDATA) } as *mut T;
// not yet set, initialize from CREATESTRUCT
if ptr.is_null() && msg == WM_NCCREATE {
let cs = unsafe { &*(lparam as LPCREATESTRUCTW) };
ptr = cs.lpCreateParams as *mut T;
log::debug!("Initialize window pointer {:p}", ptr);
unsafe { errhandlingapi::SetLastError(0) };
if 0 == unsafe {
SetWindowLongPtrW(hwnd, GWLP_USERDATA, ptr as *const _ as basetsd::LONG_PTR)
} && unsafe { errhandlingapi::GetLastError() } != 0
{
return win::FALSE as win::LRESULT;
}
}
// call wrapped window proc
if !ptr.is_null() {
let this = unsafe { &mut *(ptr as *mut T) };
if let Some(result) = this.window_proc(hwnd, msg, wparam, lparam) {
return result;
}
}
unsafe { DefWindowProcW(hwnd, msg, wparam, lparam) }
}
impl WindowProc for ProgressWindow {
fn window_proc(
&mut self,
hwnd: HWND,
msg: win::UINT,
wparam: win::WPARAM,
lparam: win::LPARAM,
) -> Option<win::LRESULT> {
use winuser::*;
match msg {
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-nccreate
WM_NCCREATE => {
// store main window handle
self.hwnd = hwnd;
// WM_NCCREATE must be passed to DefWindowProc
None
}
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-create
WM_CREATE => match self.create_window_controls() {
Err(e) => {
log::error!("Failed to create window controls: {}", e);
Some(-1)
}
Ok(()) => Some(0),
},
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-size
WM_SIZE => {
self.on_resize(
i32::from(win::LOWORD(lparam as u32)),
i32::from(win::HIWORD(lparam as u32)),
);
Some(0)
}
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-getminmaxinfo
WM_GETMINMAXINFO => {
let mmi = unsafe { &mut *(lparam as LPMINMAXINFO) };
mmi.ptMinTrackSize.x = MIN_WINDOW_SIZE.0;
mmi.ptMinTrackSize.y = MIN_WINDOW_SIZE.1;
Some(0)
}
// https://docs.microsoft.com/en-us/windows/win32/controls/wm-ctlcolorstatic
WM_CTLCOLORSTATIC => {
Some(unsafe { wingdi::GetStockObject(COLOR_WINDOW + 1) } as win::LPARAM)
}
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-close
WM_CLOSE => {
self.cancel();
unsafe { DestroyWindow(hwnd) };
Some(0)
}
// https://docs.microsoft.com/en-us/windows/win32/winmsg/wm-destroy
WM_DESTROY => {
unsafe { PostQuitMessage(0) };
Some(0)
}
WM_PROGRESS => {
self.update_progress(wparam, lparam as _);
Some(0)
}
_ => None,
}
}
}
| {
self.set_progress_to_range_mode();
} | conditional_block |
typescript.rs | //! Generation of Typescript types from Stencila Schema
use std::{
collections::HashSet,
fs::read_dir,
path::{Path, PathBuf},
};
use common::{
async_recursion::async_recursion,
eyre::{bail, Context, Report, Result},
futures::future::try_join_all,
inflector::Inflector,
itertools::Itertools,
tokio::fs::{create_dir_all, remove_file, write},
};
use crate::schemas::{Items, Schema, Schemas, Type, Value};
/// Comment to place at top of a files to indicate it is generated
const GENERATED_COMMENT: &str = "// Generated file; do not edit. See `../rust/schema-gen` crate.";
/// Modules that should not be generated
///
/// These modules are manually written, usually because they are
/// an alias for a native JavasScript type.
const NO_GENERATE_MODULE: &[&str] = &[
"Array",
"Boolean",
"Integer",
"Null",
"Number",
"Object",
"Primitive",
"String",
"TextValue",
"UnsignedInteger",
];
/// Types for which native to TypesScript types are used directly
/// Note that this excludes `Integer`, `UnsignedInteger` and `Object`
/// which although they are implemented as native types have different semantics.
const NATIVE_TYPES: &[&str] = &["null", "boolean", "number", "string"];
impl Schemas {
/// Generate a TypeScript module for each schema
pub async fn typescript(&self) -> Result<()> {
eprintln!("Generating TypeScript types");
// The top level destination
let dest = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../../typescript/src");
let dest = dest
.canonicalize()
.context(format!("can not find directory `{}`", dest.display()))?;
// The types directory that modules get generated into
let types = dest.join("types");
if types.exists() {
// Already exists, so clean up existing files, except for those that are not generated
for file in read_dir(&types)?.flatten() {
let path = file.path();
if NO_GENERATE_MODULE.contains(
&path
.file_name()
.unwrap()
.to_string_lossy()
.strip_suffix(".ts")
.unwrap(),
) {
continue;
}
remove_file(&path).await?
}
} else {
// Doesn't exist, so create it
create_dir_all(&types).await?;
}
// Create a module for each schema
let futures = self
.schemas
.values()
.map(|schema| Self::typescript_module(&types, schema));
try_join_all(futures).await?;
// Create an index.ts which export types from all modules (including those
// that are not generated)
let exports = read_dir(&types)
.wrap_err(format!("unable to read directory `{}`", types.display()))?
.flatten()
.map(|entry| {
entry
.path()
.file_name()
.unwrap()
.to_string_lossy()
.strip_suffix(".ts")
.unwrap()
.to_string()
})
.sorted()
.map(|module| format!("export * from './types/{module}';"))
.join("\n");
write(
dest.join("index.ts"),
format!(
r"{GENERATED_COMMENT}
{exports}
"
),
)
.await?;
Ok(())
}
/// Generate a TypeScript module for a schema
async fn typescript_module(dest: &Path, schema: &Schema) -> Result<()> {
let Some(title) = &schema.title else {
bail!("Schema has no title");
};
if NO_GENERATE_MODULE.contains(&title.as_str()) || schema.r#abstract {
return Ok(());
}
if schema.any_of.is_some() {
Self::typescript_any_of(dest, schema).await?;
} else if schema.r#type.is_none() {
Self::typescript_object(dest, title, schema).await?;
}
Ok(())
}
/// Generate a TypeScript type for a schema
///
/// Returns the name of the type and whether:
/// - it is an array
/// - it is a type (rather than an enum variant)
#[async_recursion]
async fn typescript_type(dest: &Path, schema: &Schema) -> Result<(String, bool, bool)> {
use Type::*;
// If the Stencila Schema type name corresponds to a TypeScript
// native type then return the name of the native type, otherwise
// return the pascal cased name (e.g. `integer` -> `Integer`)
let maybe_native_type = |type_name: &str| {
let lower = type_name.to_lowercase();
if NATIVE_TYPES.contains(&lower.as_str()) {
lower
} else {
type_name.to_pascal_case()
}
};
let result = if let Some(r#type) = &schema.r#type {
match r#type {
Array => {
let items = match &schema.items {
Some(Items::Ref(inner)) => maybe_native_type(&inner.r#ref),
Some(Items::Type(inner)) => maybe_native_type(&inner.r#type),
Some(Items::AnyOf(inner)) => {
let schema = Schema {
any_of: Some(inner.any_of.clone()),
..Default::default()
};
Self::typescript_type(dest, &schema).await?.0
}
Some(Items::List(inner)) => {
let schema = Schema {
any_of: Some(inner.clone()),
..Default::default()
};
Self::typescript_type(dest, &schema).await?.0
}
None => "Unhandled".to_string(),
};
(items, true, true)
}
_ => (maybe_native_type(r#type.as_ref()), false, true),
}
} else if let Some(r#ref) = &schema.r#ref {
(maybe_native_type(r#ref), false, true)
} else if schema.any_of.is_some() {
(Self::typescript_any_of(dest, schema).await?, false, true)
} else if let Some(title) = &schema.title {
(title.to_string(), false, true)
} else if let Some(r#const) = &schema.r#const {
(Self::typescript_value(r#const), false, false)
} else {
("Unhandled".to_string(), false, true)
};
Ok(result)
}
/// Generate a TypeScript `class` for an object schema with `properties`
///
/// Returns the name of the generated `class`.
async fn | (dest: &Path, title: &String, schema: &Schema) -> Result<String> {
let path = dest.join(format!("{}.ts", title));
if path.exists() {
return Ok(title.to_string());
}
let description = schema
.description
.as_ref()
.unwrap_or(title)
.trim_end_matches('\n')
.replace('\n', "\n // ");
let mut props = Vec::new();
let mut required_props = Vec::new();
let mut used_types = HashSet::new();
for (name, property) in schema.properties.iter().flatten() {
let description = property
.description
.as_ref()
.unwrap_or(name)
.trim_end_matches('\n')
.replace('\n', "\n // ");
let name = name.to_camel_case();
// Early return for "type" property
if name == "type" {
props.push(format!(" type = \"{title}\";"));
continue;
}
let mut prop = name.clone();
// Determine Typescript type of the property
let (mut prop_type, is_array, ..) = Self::typescript_type(dest, property).await?;
used_types.insert(prop_type.clone());
// Is the property optional?
if !property.is_required {
prop.push('?');
}
prop.push_str(": ");
// Is the property an array?
if is_array {
prop_type.push_str("[]");
};
prop.push_str(&prop_type);
// If the property is required, add it to the constructor args.
if property.is_required {
// An argument can not be named `arguments` so deal with that
// special case here.
required_props.push(if name == "arguments" {
(
format!("this.{name} = args;"),
format!("args: {prop_type}, "),
)
} else {
(
format!("this.{name} = {name};"),
format!("{name}: {prop_type}, "),
)
});
}
// Does the property have a default?
if let Some(default) = property.default.as_ref() {
let default = Self::typescript_value(default);
prop.push_str(&format!(" = {default}"));
};
props.push(format!(" // {description}\n {prop};"));
}
let props = props.join("\n\n");
let required_args = required_props.iter().map(|(.., arg)| arg).join("");
let required_assignments = required_props
.iter()
.map(|(assignment, ..)| assignment)
.join("\n ");
let mut imports = used_types
.into_iter()
.filter(|used_type| {
used_type != title && !NATIVE_TYPES.contains(&used_type.to_lowercase().as_str())
})
.sorted()
.map(|used_type| format!("import {{ {used_type} }} from './{used_type}';"))
.join("\n");
if !imports.is_empty() {
imports.push_str("\n\n");
}
write(
path,
&format!(
r#"{GENERATED_COMMENT}
{imports}// {description}
export class {title} {{
{props}
constructor({required_args}options?: {title}) {{
if (options) Object.assign(this, options)
{required_assignments}
}}
}}
"#
),
)
.await?;
Ok(title.to_string())
}
/// Generate a TypeScript discriminated union `type` for an `anyOf` root schema or property schema
///
/// Returns the name of the generated enum.
async fn typescript_any_of(dest: &Path, schema: &Schema) -> Result<String> {
let Some(any_of) = &schema.any_of else {
bail!("Schema has no anyOf");
};
let (alternatives, are_types): (Vec<_>, Vec<_>) =
try_join_all(any_of.iter().map(|schema| async {
let (typ, is_array, is_type) = Self::typescript_type(dest, schema).await?;
let typ = if is_array {
Self::typescript_array_of(dest, &typ).await?
} else {
typ
};
Ok::<_, Report>((typ, is_type))
}))
.await?
.into_iter()
.unzip();
let name = schema.title.clone().unwrap_or_else(|| {
alternatives
.iter()
.map(|name| name.to_pascal_case())
.join("Or")
});
let path = dest.join(format!("{}.ts", name));
if path.exists() {
return Ok(name);
}
let description = if let Some(title) = &schema.title {
schema
.description
.clone()
.unwrap_or(title.clone())
.trim_end_matches('\n')
.replace('\n', "\n // ")
} else {
alternatives
.iter()
.map(|variant| format!("`{variant}`"))
.join(" or ")
};
let alternatives = alternatives
.into_iter()
.zip(are_types.into_iter())
.collect_vec();
let mut imports = alternatives
.iter()
.sorted()
.filter_map(|(name, is_type)| {
(*is_type && !NATIVE_TYPES.contains(&name.to_lowercase().as_str()))
.then_some(format!("import {{ {name} }} from './{name}'",))
})
.join("\n");
if !imports.is_empty() {
imports.push_str("\n\n");
}
let variants = alternatives
.into_iter()
.map(|(variant, is_type)| {
if is_type {
variant
} else {
format!("'{variant}'")
}
})
.join(" |\n ");
write(
path,
format!(
r#"{GENERATED_COMMENT}
{imports}// {description}
export type {name} =
{variants};
"#
),
)
.await?;
Ok(name)
}
/// Generate a TypeScript `type` for an "array of" type
///
/// Returns the name of the generated type which will be the plural
/// of the type of the array items.
async fn typescript_array_of(dest: &Path, item_type: &str) -> Result<String> {
let name = item_type.to_plural();
let path = dest.join(format!("{}.ts", name));
if path.exists() {
return Ok(name);
}
write(
path,
format!(
r#"{GENERATED_COMMENT}
import {{ {item_type} }} from './{item_type}';
export type {name} = {item_type}[];
"#
),
)
.await?;
Ok(name)
}
/// Generate a TypeScript representation of a JSON schema value
///
/// Returns a literal to the type of value.
fn typescript_value(value: &Value) -> String {
match value {
Value::Null => "null".to_string(),
Value::Boolean(inner) => inner.to_string(),
Value::Integer(inner) => inner.to_string(),
Value::Number(inner) => inner.to_string(),
Value::String(inner) => inner.to_string(),
_ => "Unhandled value type".to_string(),
}
}
}
| typescript_object | identifier_name |
typescript.rs | //! Generation of Typescript types from Stencila Schema
use std::{
collections::HashSet,
fs::read_dir,
path::{Path, PathBuf},
};
use common::{
async_recursion::async_recursion,
eyre::{bail, Context, Report, Result},
futures::future::try_join_all,
inflector::Inflector,
itertools::Itertools,
tokio::fs::{create_dir_all, remove_file, write},
};
use crate::schemas::{Items, Schema, Schemas, Type, Value};
/// Comment to place at top of a files to indicate it is generated
const GENERATED_COMMENT: &str = "// Generated file; do not edit. See `../rust/schema-gen` crate.";
/// Modules that should not be generated
///
/// These modules are manually written, usually because they are
/// an alias for a native JavasScript type.
const NO_GENERATE_MODULE: &[&str] = &[
"Array",
"Boolean",
"Integer",
"Null",
"Number",
"Object",
"Primitive",
"String",
"TextValue",
"UnsignedInteger",
];
/// Types for which native to TypesScript types are used directly
/// Note that this excludes `Integer`, `UnsignedInteger` and `Object`
/// which although they are implemented as native types have different semantics.
const NATIVE_TYPES: &[&str] = &["null", "boolean", "number", "string"];
impl Schemas {
/// Generate a TypeScript module for each schema
pub async fn typescript(&self) -> Result<()> {
eprintln!("Generating TypeScript types");
// The top level destination
let dest = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../../typescript/src");
let dest = dest
.canonicalize()
.context(format!("can not find directory `{}`", dest.display()))?;
// The types directory that modules get generated into
let types = dest.join("types");
if types.exists() {
// Already exists, so clean up existing files, except for those that are not generated
for file in read_dir(&types)?.flatten() {
let path = file.path();
if NO_GENERATE_MODULE.contains(
&path
.file_name()
.unwrap()
.to_string_lossy()
.strip_suffix(".ts")
.unwrap(),
) {
continue;
}
remove_file(&path).await?
}
} else {
// Doesn't exist, so create it
create_dir_all(&types).await?;
}
// Create a module for each schema
let futures = self
.schemas
.values()
.map(|schema| Self::typescript_module(&types, schema));
try_join_all(futures).await?;
// Create an index.ts which export types from all modules (including those
// that are not generated)
let exports = read_dir(&types)
.wrap_err(format!("unable to read directory `{}`", types.display()))?
.flatten()
.map(|entry| {
entry
.path()
.file_name()
.unwrap()
.to_string_lossy()
.strip_suffix(".ts")
.unwrap()
.to_string()
})
.sorted()
.map(|module| format!("export * from './types/{module}';"))
.join("\n");
write(
dest.join("index.ts"),
format!(
r"{GENERATED_COMMENT}
{exports}
"
),
)
.await?;
Ok(())
}
/// Generate a TypeScript module for a schema
async fn typescript_module(dest: &Path, schema: &Schema) -> Result<()> |
/// Generate a TypeScript type for a schema
///
/// Returns the name of the type and whether:
/// - it is an array
/// - it is a type (rather than an enum variant)
#[async_recursion]
async fn typescript_type(dest: &Path, schema: &Schema) -> Result<(String, bool, bool)> {
use Type::*;
// If the Stencila Schema type name corresponds to a TypeScript
// native type then return the name of the native type, otherwise
// return the pascal cased name (e.g. `integer` -> `Integer`)
let maybe_native_type = |type_name: &str| {
let lower = type_name.to_lowercase();
if NATIVE_TYPES.contains(&lower.as_str()) {
lower
} else {
type_name.to_pascal_case()
}
};
let result = if let Some(r#type) = &schema.r#type {
match r#type {
Array => {
let items = match &schema.items {
Some(Items::Ref(inner)) => maybe_native_type(&inner.r#ref),
Some(Items::Type(inner)) => maybe_native_type(&inner.r#type),
Some(Items::AnyOf(inner)) => {
let schema = Schema {
any_of: Some(inner.any_of.clone()),
..Default::default()
};
Self::typescript_type(dest, &schema).await?.0
}
Some(Items::List(inner)) => {
let schema = Schema {
any_of: Some(inner.clone()),
..Default::default()
};
Self::typescript_type(dest, &schema).await?.0
}
None => "Unhandled".to_string(),
};
(items, true, true)
}
_ => (maybe_native_type(r#type.as_ref()), false, true),
}
} else if let Some(r#ref) = &schema.r#ref {
(maybe_native_type(r#ref), false, true)
} else if schema.any_of.is_some() {
(Self::typescript_any_of(dest, schema).await?, false, true)
} else if let Some(title) = &schema.title {
(title.to_string(), false, true)
} else if let Some(r#const) = &schema.r#const {
(Self::typescript_value(r#const), false, false)
} else {
("Unhandled".to_string(), false, true)
};
Ok(result)
}
/// Generate a TypeScript `class` for an object schema with `properties`
///
/// Returns the name of the generated `class`.
async fn typescript_object(dest: &Path, title: &String, schema: &Schema) -> Result<String> {
let path = dest.join(format!("{}.ts", title));
if path.exists() {
return Ok(title.to_string());
}
let description = schema
.description
.as_ref()
.unwrap_or(title)
.trim_end_matches('\n')
.replace('\n', "\n // ");
let mut props = Vec::new();
let mut required_props = Vec::new();
let mut used_types = HashSet::new();
for (name, property) in schema.properties.iter().flatten() {
let description = property
.description
.as_ref()
.unwrap_or(name)
.trim_end_matches('\n')
.replace('\n', "\n // ");
let name = name.to_camel_case();
// Early return for "type" property
if name == "type" {
props.push(format!(" type = \"{title}\";"));
continue;
}
let mut prop = name.clone();
// Determine Typescript type of the property
let (mut prop_type, is_array, ..) = Self::typescript_type(dest, property).await?;
used_types.insert(prop_type.clone());
// Is the property optional?
if !property.is_required {
prop.push('?');
}
prop.push_str(": ");
// Is the property an array?
if is_array {
prop_type.push_str("[]");
};
prop.push_str(&prop_type);
// If the property is required, add it to the constructor args.
if property.is_required {
// An argument can not be named `arguments` so deal with that
// special case here.
required_props.push(if name == "arguments" {
(
format!("this.{name} = args;"),
format!("args: {prop_type}, "),
)
} else {
(
format!("this.{name} = {name};"),
format!("{name}: {prop_type}, "),
)
});
}
// Does the property have a default?
if let Some(default) = property.default.as_ref() {
let default = Self::typescript_value(default);
prop.push_str(&format!(" = {default}"));
};
props.push(format!(" // {description}\n {prop};"));
}
let props = props.join("\n\n");
let required_args = required_props.iter().map(|(.., arg)| arg).join("");
let required_assignments = required_props
.iter()
.map(|(assignment, ..)| assignment)
.join("\n ");
let mut imports = used_types
.into_iter()
.filter(|used_type| {
used_type != title && !NATIVE_TYPES.contains(&used_type.to_lowercase().as_str())
})
.sorted()
.map(|used_type| format!("import {{ {used_type} }} from './{used_type}';"))
.join("\n");
if !imports.is_empty() {
imports.push_str("\n\n");
}
write(
path,
&format!(
r#"{GENERATED_COMMENT}
{imports}// {description}
export class {title} {{
{props}
constructor({required_args}options?: {title}) {{
if (options) Object.assign(this, options)
{required_assignments}
}}
}}
"#
),
)
.await?;
Ok(title.to_string())
}
/// Generate a TypeScript discriminated union `type` for an `anyOf` root schema or property schema
///
/// Returns the name of the generated enum.
async fn typescript_any_of(dest: &Path, schema: &Schema) -> Result<String> {
let Some(any_of) = &schema.any_of else {
bail!("Schema has no anyOf");
};
let (alternatives, are_types): (Vec<_>, Vec<_>) =
try_join_all(any_of.iter().map(|schema| async {
let (typ, is_array, is_type) = Self::typescript_type(dest, schema).await?;
let typ = if is_array {
Self::typescript_array_of(dest, &typ).await?
} else {
typ
};
Ok::<_, Report>((typ, is_type))
}))
.await?
.into_iter()
.unzip();
let name = schema.title.clone().unwrap_or_else(|| {
alternatives
.iter()
.map(|name| name.to_pascal_case())
.join("Or")
});
let path = dest.join(format!("{}.ts", name));
if path.exists() {
return Ok(name);
}
let description = if let Some(title) = &schema.title {
schema
.description
.clone()
.unwrap_or(title.clone())
.trim_end_matches('\n')
.replace('\n', "\n // ")
} else {
alternatives
.iter()
.map(|variant| format!("`{variant}`"))
.join(" or ")
};
let alternatives = alternatives
.into_iter()
.zip(are_types.into_iter())
.collect_vec();
let mut imports = alternatives
.iter()
.sorted()
.filter_map(|(name, is_type)| {
(*is_type && !NATIVE_TYPES.contains(&name.to_lowercase().as_str()))
.then_some(format!("import {{ {name} }} from './{name}'",))
})
.join("\n");
if !imports.is_empty() {
imports.push_str("\n\n");
}
let variants = alternatives
.into_iter()
.map(|(variant, is_type)| {
if is_type {
variant
} else {
format!("'{variant}'")
}
})
.join(" |\n ");
write(
path,
format!(
r#"{GENERATED_COMMENT}
{imports}// {description}
export type {name} =
{variants};
"#
),
)
.await?;
Ok(name)
}
/// Generate a TypeScript `type` for an "array of" type
///
/// Returns the name of the generated type which will be the plural
/// of the type of the array items.
async fn typescript_array_of(dest: &Path, item_type: &str) -> Result<String> {
let name = item_type.to_plural();
let path = dest.join(format!("{}.ts", name));
if path.exists() {
return Ok(name);
}
write(
path,
format!(
r#"{GENERATED_COMMENT}
import {{ {item_type} }} from './{item_type}';
export type {name} = {item_type}[];
"#
),
)
.await?;
Ok(name)
}
/// Generate a TypeScript representation of a JSON schema value
///
/// Returns a literal to the type of value.
fn typescript_value(value: &Value) -> String {
match value {
Value::Null => "null".to_string(),
Value::Boolean(inner) => inner.to_string(),
Value::Integer(inner) => inner.to_string(),
Value::Number(inner) => inner.to_string(),
Value::String(inner) => inner.to_string(),
_ => "Unhandled value type".to_string(),
}
}
}
| {
let Some(title) = &schema.title else {
bail!("Schema has no title");
};
if NO_GENERATE_MODULE.contains(&title.as_str()) || schema.r#abstract {
return Ok(());
}
if schema.any_of.is_some() {
Self::typescript_any_of(dest, schema).await?;
} else if schema.r#type.is_none() {
Self::typescript_object(dest, title, schema).await?;
}
Ok(())
} | identifier_body |
typescript.rs | //! Generation of Typescript types from Stencila Schema
use std::{
collections::HashSet,
fs::read_dir,
path::{Path, PathBuf},
};
use common::{
async_recursion::async_recursion,
eyre::{bail, Context, Report, Result},
futures::future::try_join_all,
inflector::Inflector,
itertools::Itertools,
tokio::fs::{create_dir_all, remove_file, write},
};
use crate::schemas::{Items, Schema, Schemas, Type, Value};
/// Comment to place at top of a files to indicate it is generated
const GENERATED_COMMENT: &str = "// Generated file; do not edit. See `../rust/schema-gen` crate.";
/// Modules that should not be generated
///
/// These modules are manually written, usually because they are
/// an alias for a native JavasScript type.
const NO_GENERATE_MODULE: &[&str] = &[
"Array",
"Boolean",
"Integer", | "Object",
"Primitive",
"String",
"TextValue",
"UnsignedInteger",
];
/// Types for which native to TypesScript types are used directly
/// Note that this excludes `Integer`, `UnsignedInteger` and `Object`
/// which although they are implemented as native types have different semantics.
const NATIVE_TYPES: &[&str] = &["null", "boolean", "number", "string"];
impl Schemas {
/// Generate a TypeScript module for each schema
pub async fn typescript(&self) -> Result<()> {
eprintln!("Generating TypeScript types");
// The top level destination
let dest = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../../typescript/src");
let dest = dest
.canonicalize()
.context(format!("can not find directory `{}`", dest.display()))?;
// The types directory that modules get generated into
let types = dest.join("types");
if types.exists() {
// Already exists, so clean up existing files, except for those that are not generated
for file in read_dir(&types)?.flatten() {
let path = file.path();
if NO_GENERATE_MODULE.contains(
&path
.file_name()
.unwrap()
.to_string_lossy()
.strip_suffix(".ts")
.unwrap(),
) {
continue;
}
remove_file(&path).await?
}
} else {
// Doesn't exist, so create it
create_dir_all(&types).await?;
}
// Create a module for each schema
let futures = self
.schemas
.values()
.map(|schema| Self::typescript_module(&types, schema));
try_join_all(futures).await?;
// Create an index.ts which export types from all modules (including those
// that are not generated)
let exports = read_dir(&types)
.wrap_err(format!("unable to read directory `{}`", types.display()))?
.flatten()
.map(|entry| {
entry
.path()
.file_name()
.unwrap()
.to_string_lossy()
.strip_suffix(".ts")
.unwrap()
.to_string()
})
.sorted()
.map(|module| format!("export * from './types/{module}';"))
.join("\n");
write(
dest.join("index.ts"),
format!(
r"{GENERATED_COMMENT}
{exports}
"
),
)
.await?;
Ok(())
}
/// Generate a TypeScript module for a schema
async fn typescript_module(dest: &Path, schema: &Schema) -> Result<()> {
let Some(title) = &schema.title else {
bail!("Schema has no title");
};
if NO_GENERATE_MODULE.contains(&title.as_str()) || schema.r#abstract {
return Ok(());
}
if schema.any_of.is_some() {
Self::typescript_any_of(dest, schema).await?;
} else if schema.r#type.is_none() {
Self::typescript_object(dest, title, schema).await?;
}
Ok(())
}
/// Generate a TypeScript type for a schema
///
/// Returns the name of the type and whether:
/// - it is an array
/// - it is a type (rather than an enum variant)
#[async_recursion]
async fn typescript_type(dest: &Path, schema: &Schema) -> Result<(String, bool, bool)> {
use Type::*;
// If the Stencila Schema type name corresponds to a TypeScript
// native type then return the name of the native type, otherwise
// return the pascal cased name (e.g. `integer` -> `Integer`)
let maybe_native_type = |type_name: &str| {
let lower = type_name.to_lowercase();
if NATIVE_TYPES.contains(&lower.as_str()) {
lower
} else {
type_name.to_pascal_case()
}
};
let result = if let Some(r#type) = &schema.r#type {
match r#type {
Array => {
let items = match &schema.items {
Some(Items::Ref(inner)) => maybe_native_type(&inner.r#ref),
Some(Items::Type(inner)) => maybe_native_type(&inner.r#type),
Some(Items::AnyOf(inner)) => {
let schema = Schema {
any_of: Some(inner.any_of.clone()),
..Default::default()
};
Self::typescript_type(dest, &schema).await?.0
}
Some(Items::List(inner)) => {
let schema = Schema {
any_of: Some(inner.clone()),
..Default::default()
};
Self::typescript_type(dest, &schema).await?.0
}
None => "Unhandled".to_string(),
};
(items, true, true)
}
_ => (maybe_native_type(r#type.as_ref()), false, true),
}
} else if let Some(r#ref) = &schema.r#ref {
(maybe_native_type(r#ref), false, true)
} else if schema.any_of.is_some() {
(Self::typescript_any_of(dest, schema).await?, false, true)
} else if let Some(title) = &schema.title {
(title.to_string(), false, true)
} else if let Some(r#const) = &schema.r#const {
(Self::typescript_value(r#const), false, false)
} else {
("Unhandled".to_string(), false, true)
};
Ok(result)
}
/// Generate a TypeScript `class` for an object schema with `properties`
///
/// Returns the name of the generated `class`.
async fn typescript_object(dest: &Path, title: &String, schema: &Schema) -> Result<String> {
let path = dest.join(format!("{}.ts", title));
if path.exists() {
return Ok(title.to_string());
}
let description = schema
.description
.as_ref()
.unwrap_or(title)
.trim_end_matches('\n')
.replace('\n', "\n // ");
let mut props = Vec::new();
let mut required_props = Vec::new();
let mut used_types = HashSet::new();
for (name, property) in schema.properties.iter().flatten() {
let description = property
.description
.as_ref()
.unwrap_or(name)
.trim_end_matches('\n')
.replace('\n', "\n // ");
let name = name.to_camel_case();
// Early return for "type" property
if name == "type" {
props.push(format!(" type = \"{title}\";"));
continue;
}
let mut prop = name.clone();
// Determine Typescript type of the property
let (mut prop_type, is_array, ..) = Self::typescript_type(dest, property).await?;
used_types.insert(prop_type.clone());
// Is the property optional?
if !property.is_required {
prop.push('?');
}
prop.push_str(": ");
// Is the property an array?
if is_array {
prop_type.push_str("[]");
};
prop.push_str(&prop_type);
// If the property is required, add it to the constructor args.
if property.is_required {
// An argument can not be named `arguments` so deal with that
// special case here.
required_props.push(if name == "arguments" {
(
format!("this.{name} = args;"),
format!("args: {prop_type}, "),
)
} else {
(
format!("this.{name} = {name};"),
format!("{name}: {prop_type}, "),
)
});
}
// Does the property have a default?
if let Some(default) = property.default.as_ref() {
let default = Self::typescript_value(default);
prop.push_str(&format!(" = {default}"));
};
props.push(format!(" // {description}\n {prop};"));
}
let props = props.join("\n\n");
let required_args = required_props.iter().map(|(.., arg)| arg).join("");
let required_assignments = required_props
.iter()
.map(|(assignment, ..)| assignment)
.join("\n ");
let mut imports = used_types
.into_iter()
.filter(|used_type| {
used_type != title && !NATIVE_TYPES.contains(&used_type.to_lowercase().as_str())
})
.sorted()
.map(|used_type| format!("import {{ {used_type} }} from './{used_type}';"))
.join("\n");
if !imports.is_empty() {
imports.push_str("\n\n");
}
write(
path,
&format!(
r#"{GENERATED_COMMENT}
{imports}// {description}
export class {title} {{
{props}
constructor({required_args}options?: {title}) {{
if (options) Object.assign(this, options)
{required_assignments}
}}
}}
"#
),
)
.await?;
Ok(title.to_string())
}
/// Generate a TypeScript discriminated union `type` for an `anyOf` root schema or property schema
///
/// Returns the name of the generated enum.
async fn typescript_any_of(dest: &Path, schema: &Schema) -> Result<String> {
let Some(any_of) = &schema.any_of else {
bail!("Schema has no anyOf");
};
let (alternatives, are_types): (Vec<_>, Vec<_>) =
try_join_all(any_of.iter().map(|schema| async {
let (typ, is_array, is_type) = Self::typescript_type(dest, schema).await?;
let typ = if is_array {
Self::typescript_array_of(dest, &typ).await?
} else {
typ
};
Ok::<_, Report>((typ, is_type))
}))
.await?
.into_iter()
.unzip();
let name = schema.title.clone().unwrap_or_else(|| {
alternatives
.iter()
.map(|name| name.to_pascal_case())
.join("Or")
});
let path = dest.join(format!("{}.ts", name));
if path.exists() {
return Ok(name);
}
let description = if let Some(title) = &schema.title {
schema
.description
.clone()
.unwrap_or(title.clone())
.trim_end_matches('\n')
.replace('\n', "\n // ")
} else {
alternatives
.iter()
.map(|variant| format!("`{variant}`"))
.join(" or ")
};
let alternatives = alternatives
.into_iter()
.zip(are_types.into_iter())
.collect_vec();
let mut imports = alternatives
.iter()
.sorted()
.filter_map(|(name, is_type)| {
(*is_type && !NATIVE_TYPES.contains(&name.to_lowercase().as_str()))
.then_some(format!("import {{ {name} }} from './{name}'",))
})
.join("\n");
if !imports.is_empty() {
imports.push_str("\n\n");
}
let variants = alternatives
.into_iter()
.map(|(variant, is_type)| {
if is_type {
variant
} else {
format!("'{variant}'")
}
})
.join(" |\n ");
write(
path,
format!(
r#"{GENERATED_COMMENT}
{imports}// {description}
export type {name} =
{variants};
"#
),
)
.await?;
Ok(name)
}
/// Generate a TypeScript `type` for an "array of" type
///
/// Returns the name of the generated type which will be the plural
/// of the type of the array items.
async fn typescript_array_of(dest: &Path, item_type: &str) -> Result<String> {
let name = item_type.to_plural();
let path = dest.join(format!("{}.ts", name));
if path.exists() {
return Ok(name);
}
write(
path,
format!(
r#"{GENERATED_COMMENT}
import {{ {item_type} }} from './{item_type}';
export type {name} = {item_type}[];
"#
),
)
.await?;
Ok(name)
}
/// Generate a TypeScript representation of a JSON schema value
///
/// Returns a literal to the type of value.
fn typescript_value(value: &Value) -> String {
match value {
Value::Null => "null".to_string(),
Value::Boolean(inner) => inner.to_string(),
Value::Integer(inner) => inner.to_string(),
Value::Number(inner) => inner.to_string(),
Value::String(inner) => inner.to_string(),
_ => "Unhandled value type".to_string(),
}
}
} | "Null",
"Number", | random_line_split |
server.go | package shardmaster
import "net"
import "fmt"
import "net/rpc"
import "log"
import "paxos"
import "sync"
import "os"
import "syscall"
import "encoding/gob"
import "math/rand"
import "time"
import "strconv"
import "sort"
import "reflect"
const (
Debug = false
InitTimeout = 10 * time.Millisecond
)
type ShardAlloc struct {
GID int64
NumShards int
Shards []int
}
// implements sort.Interface for []ShardAlloc
type ByNumShardsInc []ShardAlloc
func (a ByNumShardsInc) Len() int { return len(a) }
func (a ByNumShardsInc) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByNumShardsInc) Less(i, j int) bool { return a[i].NumShards < a[j].NumShards }
type ByNumShardsDec []ShardAlloc
func (a ByNumShardsDec) Len() int { return len(a) }
func (a ByNumShardsDec) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByNumShardsDec) Less(i, j int) bool { return a[i].NumShards > a[j].NumShards }
type ShardMaster struct {
mu sync.Mutex
l net.Listener
me int
dead bool // for testing
unreliable bool // for testing
px *paxos.Paxos
configs []Config // indexed by config num
lastAppliedSeq int
}
const (
OK = "OK"
ErrNoop = "ErrNoop"
ErrInvalid = "ErrInvalid"
ErrGIDAlreadyJoined = "ErrGIDAlreadyJoined"
ErrGIDAlreadyLeft = "ErrGIDAlreadyLeft"
)
type Err string
const (
Join = "Join"
Leave = "Leave"
Move = "Move"
Query = "Query"
Noop = "Noop"
)
type Operation string
type Op struct {
Operation Operation
Args interface{}
}
func RandMTime() time.Duration {
return time.Duration(rand.Int()%100) * time.Millisecond
}
func MakeConfig(num int, shards [NShards]int64, groups map[int64][]string) *Config {
config := &Config{}
config.Num = num
config.Shards = shards
config.Groups = groups
return config
}
func CopyGroups(src map[int64][]string) map[int64][]string {
dst := make(map[int64][]string)
for k, v := range src {
dst[k] = v
}
return dst
}
func CopyShards(src [NShards]int64) [NShards]int64 {
var dst [NShards]int64
for shard, gid := range src {
dst[shard] = gid
}
return dst
}
func MakeShardAllocs(groups map[int64][]string, shards [NShards]int64) map[int64]*ShardAlloc {
availGids := make(map[int64]*ShardAlloc)
for gid, _ := range groups {
availGids[gid] = &ShardAlloc{GID: gid, NumShards: 0, Shards: []int{}}
}
for shard, gid := range shards {
alloc, exists := availGids[gid]
// Invariant: gid should always exist in shard
if exists {
alloc.NumShards += 1
alloc.Shards = append(alloc.Shards, shard)
}
}
return availGids
}
func SortShardAllocs(availGids map[int64]*ShardAlloc, order string) []ShardAlloc {
var shardAllocs []ShardAlloc
for _, v := range availGids {
shardAllocs = append(shardAllocs, *v)
}
if order == "dec" {
sort.Sort(ByNumShardsDec(shardAllocs))
} else {
sort.Sort(ByNumShardsInc(shardAllocs))
}
return shardAllocs
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) error {
sm.log("Join RPC: gid=%d, servers=%s", args.GID, args.Servers)
op := Op{Operation: Join, Args: *args}
sm.resolveOp(op)
return nil
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) error {
sm.log("Leave RPC: gid=%d", args.GID)
op := Op{Operation: Leave, Args: *args}
sm.resolveOp(op)
return nil
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) error {
sm.log("Move RPC: shard=%d, to_gid=%d", args.Shard, args.GID)
op := Op{Operation: Move, Args: *args}
sm.resolveOp(op)
return nil
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) error {
sm.log("Query RPC: config_num=%d", args.Num)
op := Op{Operation: Query, Args: *args}
config := sm.resolveOp(op)
reply.Config = config
return nil
}
func (sm *ShardMaster) resolveOp(op Op) Config {
seq := sm.px.Max() + 1
sm.log("Resolve init seq=%d", seq)
sm.px.Start(seq, op)
timeout := InitTimeout
time.Sleep(timeout)
decided, val := sm.px.Status(seq)
var valOp Op
if val != nil {
valOp = val.(Op)
}
for !decided || !reflect.DeepEqual(valOp, op) {
if (decided && !reflect.DeepEqual(valOp, op)) || (seq <= sm.lastAppliedSeq) {
sm.log("Seq=%d already decided", seq)
seq = sm.px.Max() + 1
sm.px.Start(seq, op)
}
sm.log("Retry w/ seq=%d", seq)
time.Sleep(timeout + RandMTime())
if timeout < 100*time.Millisecond {
timeout *= 2
}
decided, val = sm.px.Status(seq)
if val != nil {
valOp = val.(Op)
}
}
sm.log("Seq=%d decided!", seq)
// block until seq op has been applied
for sm.lastAppliedSeq < seq {
time.Sleep(InitTimeout)
}
sm.px.Done(seq)
if op.Operation == Query {
num := op.Args.(QueryArgs).Num
config, _ := sm.applyQuery(num)
return *config
}
return Config{}
}
func (sm *ShardMaster) applyJoin(joinGid int64, servers []string) (*Config, Err) {
// Get newest config
config, num := sm.newestConfig()
newConfigNum := num + 1
_, exists := config.Groups[joinGid]
if exists {
return &Config{}, ErrGIDAlreadyJoined
}
// Add new gid into groups mapping
groups := CopyGroups(config.Groups)
groups[joinGid] = servers
shards := CopyShards(config.Shards)
if len(groups) == 1 {
for i, _ := range shards {
shards[i] = joinGid
}
} else {
// Redistribute shards
shardAllocs := MakeShardAllocs(config.Groups, config.Shards)
sorted := SortShardAllocs(shardAllocs, "dec")
shardsPerGroup := NShards / len(groups)
for i := 0; i < shardsPerGroup; i += 1 {
index := i % len(sorted)
shardAlloc := sorted[index]
s := shardAlloc.Shards
sorted[index].Shards = s[1:len(s)]
shard := s[0]
shards[shard] = joinGid
}
}
return MakeConfig(newConfigNum, shards, groups), OK
}
func (sm *ShardMaster) applyLeave(leaveGid int64) (*Config, Err) {
// Get newest config
config, num := sm.newestConfig()
newConfigNum := num + 1
_, exists := config.Groups[leaveGid]
if !exists {
return &Config{}, ErrGIDAlreadyLeft
}
// Remove gid from groups mapping
groups := CopyGroups(config.Groups)
delete(groups, leaveGid)
shards := CopyShards(config.Shards)
if len(groups) == 0 {
for i, _ := range shards {
shards[i] = 0
}
} else {
shardAllocs := MakeShardAllocs(config.Groups, config.Shards)
sorted := SortShardAllocs(shardAllocs, "inc")
freeShards := shardAllocs[leaveGid].Shards
// Assign shard to new gid
rotate := 0
for _, shard := range freeShards {
gid := leaveGid
for gid == leaveGid {
gid = sorted[rotate%len(sorted)].GID
rotate += 1
}
shards[shard] = gid
}
}
return MakeConfig(newConfigNum, shards, groups), OK
}
func (sm *ShardMaster) applyMove(movingShard int, newGid int64) (*Config, Err) {
// Get newest config
config, num := sm.newestConfig()
newConfigNum := num + 1
_, exists := config.Groups[newGid]
if !exists {
return &Config{}, ErrGIDAlreadyLeft
}
groups := CopyGroups(config.Groups)
shards := CopyShards(config.Shards)
// Move shard into new gid
shards[movingShard] = newGid
return MakeConfig(newConfigNum, shards, groups), OK
}
func (sm *ShardMaster) applyQuery(num int) (*Config, Err) {
// Get newest config if requested config num out of range
if num < 0 || num >= len(sm.configs) {
config, _ := sm.newestConfig()
return &config, OK
}
return &sm.configs[num], OK
}
func (sm *ShardMaster) applyOp(op *Op) (*Config, Err) {
// Return early for a noop
if op.Operation == Noop {
return &Config{}, ErrNoop
}
switch op.Operation {
case Join:
args := op.Args.(JoinArgs)
return sm.applyJoin(args.GID, args.Servers)
case Leave:
args := op.Args.(LeaveArgs)
return sm.applyLeave(args.GID)
case Move:
args := op.Args.(MoveArgs)
return sm.applyMove(args.Shard, args.GID)
case Query:
args := op.Args.(QueryArgs)
return sm.applyQuery(args.Num)
}
// Should not reach this point
return &Config{}, ErrInvalid
}
func (sm *ShardMaster) newestConfig() (Config, int) {
i := len(sm.configs) - 1
return sm.configs[i], i
}
func (sm *ShardMaster) tick() {
timeout := InitTimeout
for sm.dead == false {
seq := sm.lastAppliedSeq + 1
decided, result := sm.px.Status(seq)
if decided {
// apply the operation
op, _ := result.(Op)
sm.log("Applying %s from seq=%d", op.Operation, seq)
config, err := sm.applyOp(&op)
sm.lastAppliedSeq += 1
sm.mu.Lock()
if op.Operation != Query && err == OK {
sm.configs = append(sm.configs, *config)
}
sm.mu.Unlock()
// reset timeout
timeout = InitTimeout
} else {
// sm.log("Retry for seq=%d", seq)
if timeout >= 1*time.Second {
sm.log("Try noop for seq=%d", seq)
sm.px.Start(seq, Op{Operation: Noop})
// wait for noop to return
noopDone := false
for !noopDone {
noopDone, _ = sm.px.Status(seq)
time.Sleep(100 * time.Millisecond)
}
} else {
// wait before retrying
time.Sleep(timeout)
if timeout < 1*time.Second {
// expotential backoff
timeout *= 2
}
}
}
}
}
func (sm *ShardMaster) log(format string, a ...interface{}) (n int, err error) {
if Debug {
addr := "Srv#" + strconv.Itoa(sm.me)
n, err = fmt.Printf(addr+": "+format+"\n", a...)
}
return
}
// please don't change this function.
func (sm *ShardMaster) Kill() {
sm.dead = true
sm.l.Close()
sm.px.Kill()
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []string, me int) *ShardMaster {
gob.Register(Op{})
gob.Register(JoinArgs{})
gob.Register(LeaveArgs{})
gob.Register(MoveArgs{})
gob.Register(QueryArgs{})
| sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = make(map[int64][]string)
sm.lastAppliedSeq = -1
rpcs := rpc.NewServer()
rpcs.Register(sm)
sm.px = paxos.Make(servers, me, rpcs)
os.Remove(servers[me])
l, e := net.Listen("unix", servers[me])
if e != nil {
log.Fatal("listen error: ", e)
}
sm.l = l
// please do not change any of the following code,
// or do anything to subvert it.
go func() {
for sm.dead == false {
conn, err := sm.l.Accept()
if err == nil && sm.dead == false {
if sm.unreliable && (rand.Int63()%1000) < 100 {
// discard the request.
conn.Close()
} else if sm.unreliable && (rand.Int63()%1000) < 200 {
// process the request but force discard of reply.
c1 := conn.(*net.UnixConn)
f, _ := c1.File()
err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
if err != nil {
fmt.Printf("shutdown: %v\n", err)
}
go rpcs.ServeConn(conn)
} else {
go rpcs.ServeConn(conn)
}
} else if err == nil {
conn.Close()
}
if err != nil && sm.dead == false {
fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error())
sm.Kill()
}
}
}()
go sm.tick()
return sm
} | random_line_split | |
server.go | package shardmaster
import "net"
import "fmt"
import "net/rpc"
import "log"
import "paxos"
import "sync"
import "os"
import "syscall"
import "encoding/gob"
import "math/rand"
import "time"
import "strconv"
import "sort"
import "reflect"
const (
Debug = false
InitTimeout = 10 * time.Millisecond
)
type ShardAlloc struct {
GID int64
NumShards int
Shards []int
}
// implements sort.Interface for []ShardAlloc
type ByNumShardsInc []ShardAlloc
func (a ByNumShardsInc) Len() int { return len(a) }
func (a ByNumShardsInc) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByNumShardsInc) Less(i, j int) bool { return a[i].NumShards < a[j].NumShards }
type ByNumShardsDec []ShardAlloc
func (a ByNumShardsDec) Len() int { return len(a) }
func (a ByNumShardsDec) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByNumShardsDec) Less(i, j int) bool { return a[i].NumShards > a[j].NumShards }
type ShardMaster struct {
mu sync.Mutex
l net.Listener
me int
dead bool // for testing
unreliable bool // for testing
px *paxos.Paxos
configs []Config // indexed by config num
lastAppliedSeq int
}
const (
OK = "OK"
ErrNoop = "ErrNoop"
ErrInvalid = "ErrInvalid"
ErrGIDAlreadyJoined = "ErrGIDAlreadyJoined"
ErrGIDAlreadyLeft = "ErrGIDAlreadyLeft"
)
type Err string
const (
Join = "Join"
Leave = "Leave"
Move = "Move"
Query = "Query"
Noop = "Noop"
)
type Operation string
type Op struct {
Operation Operation
Args interface{}
}
func RandMTime() time.Duration {
return time.Duration(rand.Int()%100) * time.Millisecond
}
func MakeConfig(num int, shards [NShards]int64, groups map[int64][]string) *Config {
config := &Config{}
config.Num = num
config.Shards = shards
config.Groups = groups
return config
}
func CopyGroups(src map[int64][]string) map[int64][]string {
dst := make(map[int64][]string)
for k, v := range src {
dst[k] = v
}
return dst
}
func CopyShards(src [NShards]int64) [NShards]int64 {
var dst [NShards]int64
for shard, gid := range src {
dst[shard] = gid
}
return dst
}
func MakeShardAllocs(groups map[int64][]string, shards [NShards]int64) map[int64]*ShardAlloc {
availGids := make(map[int64]*ShardAlloc)
for gid, _ := range groups {
availGids[gid] = &ShardAlloc{GID: gid, NumShards: 0, Shards: []int{}}
}
for shard, gid := range shards {
alloc, exists := availGids[gid]
// Invariant: gid should always exist in shard
if exists {
alloc.NumShards += 1
alloc.Shards = append(alloc.Shards, shard)
}
}
return availGids
}
func SortShardAllocs(availGids map[int64]*ShardAlloc, order string) []ShardAlloc {
var shardAllocs []ShardAlloc
for _, v := range availGids {
shardAllocs = append(shardAllocs, *v)
}
if order == "dec" {
sort.Sort(ByNumShardsDec(shardAllocs))
} else {
sort.Sort(ByNumShardsInc(shardAllocs))
}
return shardAllocs
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) error {
sm.log("Join RPC: gid=%d, servers=%s", args.GID, args.Servers)
op := Op{Operation: Join, Args: *args}
sm.resolveOp(op)
return nil
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) error {
sm.log("Leave RPC: gid=%d", args.GID)
op := Op{Operation: Leave, Args: *args}
sm.resolveOp(op)
return nil
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) error {
sm.log("Move RPC: shard=%d, to_gid=%d", args.Shard, args.GID)
op := Op{Operation: Move, Args: *args}
sm.resolveOp(op)
return nil
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) error {
sm.log("Query RPC: config_num=%d", args.Num)
op := Op{Operation: Query, Args: *args}
config := sm.resolveOp(op)
reply.Config = config
return nil
}
func (sm *ShardMaster) resolveOp(op Op) Config {
seq := sm.px.Max() + 1
sm.log("Resolve init seq=%d", seq)
sm.px.Start(seq, op)
timeout := InitTimeout
time.Sleep(timeout)
decided, val := sm.px.Status(seq)
var valOp Op
if val != nil {
valOp = val.(Op)
}
for !decided || !reflect.DeepEqual(valOp, op) {
if (decided && !reflect.DeepEqual(valOp, op)) || (seq <= sm.lastAppliedSeq) {
sm.log("Seq=%d already decided", seq)
seq = sm.px.Max() + 1
sm.px.Start(seq, op)
}
sm.log("Retry w/ seq=%d", seq)
time.Sleep(timeout + RandMTime())
if timeout < 100*time.Millisecond {
timeout *= 2
}
decided, val = sm.px.Status(seq)
if val != nil {
valOp = val.(Op)
}
}
sm.log("Seq=%d decided!", seq)
// block until seq op has been applied
for sm.lastAppliedSeq < seq {
time.Sleep(InitTimeout)
}
sm.px.Done(seq)
if op.Operation == Query {
num := op.Args.(QueryArgs).Num
config, _ := sm.applyQuery(num)
return *config
}
return Config{}
}
func (sm *ShardMaster) applyJoin(joinGid int64, servers []string) (*Config, Err) {
// Get newest config
config, num := sm.newestConfig()
newConfigNum := num + 1
_, exists := config.Groups[joinGid]
if exists {
return &Config{}, ErrGIDAlreadyJoined
}
// Add new gid into groups mapping
groups := CopyGroups(config.Groups)
groups[joinGid] = servers
shards := CopyShards(config.Shards)
if len(groups) == 1 {
for i, _ := range shards {
shards[i] = joinGid
}
} else {
// Redistribute shards
shardAllocs := MakeShardAllocs(config.Groups, config.Shards)
sorted := SortShardAllocs(shardAllocs, "dec")
shardsPerGroup := NShards / len(groups)
for i := 0; i < shardsPerGroup; i += 1 {
index := i % len(sorted)
shardAlloc := sorted[index]
s := shardAlloc.Shards
sorted[index].Shards = s[1:len(s)]
shard := s[0]
shards[shard] = joinGid
}
}
return MakeConfig(newConfigNum, shards, groups), OK
}
func (sm *ShardMaster) applyLeave(leaveGid int64) (*Config, Err) {
// Get newest config
config, num := sm.newestConfig()
newConfigNum := num + 1
_, exists := config.Groups[leaveGid]
if !exists {
return &Config{}, ErrGIDAlreadyLeft
}
// Remove gid from groups mapping
groups := CopyGroups(config.Groups)
delete(groups, leaveGid)
shards := CopyShards(config.Shards)
if len(groups) == 0 {
for i, _ := range shards {
shards[i] = 0
}
} else {
shardAllocs := MakeShardAllocs(config.Groups, config.Shards)
sorted := SortShardAllocs(shardAllocs, "inc")
freeShards := shardAllocs[leaveGid].Shards
// Assign shard to new gid
rotate := 0
for _, shard := range freeShards {
gid := leaveGid
for gid == leaveGid {
gid = sorted[rotate%len(sorted)].GID
rotate += 1
}
shards[shard] = gid
}
}
return MakeConfig(newConfigNum, shards, groups), OK
}
func (sm *ShardMaster) applyMove(movingShard int, newGid int64) (*Config, Err) {
// Get newest config
config, num := sm.newestConfig()
newConfigNum := num + 1
_, exists := config.Groups[newGid]
if !exists {
return &Config{}, ErrGIDAlreadyLeft
}
groups := CopyGroups(config.Groups)
shards := CopyShards(config.Shards)
// Move shard into new gid
shards[movingShard] = newGid
return MakeConfig(newConfigNum, shards, groups), OK
}
func (sm *ShardMaster) applyQuery(num int) (*Config, Err) {
// Get newest config if requested config num out of range
if num < 0 || num >= len(sm.configs) {
config, _ := sm.newestConfig()
return &config, OK
}
return &sm.configs[num], OK
}
func (sm *ShardMaster) applyOp(op *Op) (*Config, Err) {
// Return early for a noop
if op.Operation == Noop |
switch op.Operation {
case Join:
args := op.Args.(JoinArgs)
return sm.applyJoin(args.GID, args.Servers)
case Leave:
args := op.Args.(LeaveArgs)
return sm.applyLeave(args.GID)
case Move:
args := op.Args.(MoveArgs)
return sm.applyMove(args.Shard, args.GID)
case Query:
args := op.Args.(QueryArgs)
return sm.applyQuery(args.Num)
}
// Should not reach this point
return &Config{}, ErrInvalid
}
func (sm *ShardMaster) newestConfig() (Config, int) {
i := len(sm.configs) - 1
return sm.configs[i], i
}
func (sm *ShardMaster) tick() {
timeout := InitTimeout
for sm.dead == false {
seq := sm.lastAppliedSeq + 1
decided, result := sm.px.Status(seq)
if decided {
// apply the operation
op, _ := result.(Op)
sm.log("Applying %s from seq=%d", op.Operation, seq)
config, err := sm.applyOp(&op)
sm.lastAppliedSeq += 1
sm.mu.Lock()
if op.Operation != Query && err == OK {
sm.configs = append(sm.configs, *config)
}
sm.mu.Unlock()
// reset timeout
timeout = InitTimeout
} else {
// sm.log("Retry for seq=%d", seq)
if timeout >= 1*time.Second {
sm.log("Try noop for seq=%d", seq)
sm.px.Start(seq, Op{Operation: Noop})
// wait for noop to return
noopDone := false
for !noopDone {
noopDone, _ = sm.px.Status(seq)
time.Sleep(100 * time.Millisecond)
}
} else {
// wait before retrying
time.Sleep(timeout)
if timeout < 1*time.Second {
// expotential backoff
timeout *= 2
}
}
}
}
}
func (sm *ShardMaster) log(format string, a ...interface{}) (n int, err error) {
if Debug {
addr := "Srv#" + strconv.Itoa(sm.me)
n, err = fmt.Printf(addr+": "+format+"\n", a...)
}
return
}
// please don't change this function.
func (sm *ShardMaster) Kill() {
sm.dead = true
sm.l.Close()
sm.px.Kill()
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []string, me int) *ShardMaster {
gob.Register(Op{})
gob.Register(JoinArgs{})
gob.Register(LeaveArgs{})
gob.Register(MoveArgs{})
gob.Register(QueryArgs{})
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = make(map[int64][]string)
sm.lastAppliedSeq = -1
rpcs := rpc.NewServer()
rpcs.Register(sm)
sm.px = paxos.Make(servers, me, rpcs)
os.Remove(servers[me])
l, e := net.Listen("unix", servers[me])
if e != nil {
log.Fatal("listen error: ", e)
}
sm.l = l
// please do not change any of the following code,
// or do anything to subvert it.
go func() {
for sm.dead == false {
conn, err := sm.l.Accept()
if err == nil && sm.dead == false {
if sm.unreliable && (rand.Int63()%1000) < 100 {
// discard the request.
conn.Close()
} else if sm.unreliable && (rand.Int63()%1000) < 200 {
// process the request but force discard of reply.
c1 := conn.(*net.UnixConn)
f, _ := c1.File()
err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
if err != nil {
fmt.Printf("shutdown: %v\n", err)
}
go rpcs.ServeConn(conn)
} else {
go rpcs.ServeConn(conn)
}
} else if err == nil {
conn.Close()
}
if err != nil && sm.dead == false {
fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error())
sm.Kill()
}
}
}()
go sm.tick()
return sm
}
| {
return &Config{}, ErrNoop
} | conditional_block |
server.go | package shardmaster
import "net"
import "fmt"
import "net/rpc"
import "log"
import "paxos"
import "sync"
import "os"
import "syscall"
import "encoding/gob"
import "math/rand"
import "time"
import "strconv"
import "sort"
import "reflect"
const (
Debug = false
InitTimeout = 10 * time.Millisecond
)
type ShardAlloc struct {
GID int64
NumShards int
Shards []int
}
// implements sort.Interface for []ShardAlloc
type ByNumShardsInc []ShardAlloc
func (a ByNumShardsInc) Len() int { return len(a) }
func (a ByNumShardsInc) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByNumShardsInc) Less(i, j int) bool { return a[i].NumShards < a[j].NumShards }
type ByNumShardsDec []ShardAlloc
func (a ByNumShardsDec) Len() int { return len(a) }
func (a ByNumShardsDec) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByNumShardsDec) | (i, j int) bool { return a[i].NumShards > a[j].NumShards }
type ShardMaster struct {
mu sync.Mutex
l net.Listener
me int
dead bool // for testing
unreliable bool // for testing
px *paxos.Paxos
configs []Config // indexed by config num
lastAppliedSeq int
}
const (
OK = "OK"
ErrNoop = "ErrNoop"
ErrInvalid = "ErrInvalid"
ErrGIDAlreadyJoined = "ErrGIDAlreadyJoined"
ErrGIDAlreadyLeft = "ErrGIDAlreadyLeft"
)
type Err string
const (
Join = "Join"
Leave = "Leave"
Move = "Move"
Query = "Query"
Noop = "Noop"
)
type Operation string
type Op struct {
Operation Operation
Args interface{}
}
func RandMTime() time.Duration {
return time.Duration(rand.Int()%100) * time.Millisecond
}
func MakeConfig(num int, shards [NShards]int64, groups map[int64][]string) *Config {
config := &Config{}
config.Num = num
config.Shards = shards
config.Groups = groups
return config
}
func CopyGroups(src map[int64][]string) map[int64][]string {
dst := make(map[int64][]string)
for k, v := range src {
dst[k] = v
}
return dst
}
func CopyShards(src [NShards]int64) [NShards]int64 {
var dst [NShards]int64
for shard, gid := range src {
dst[shard] = gid
}
return dst
}
func MakeShardAllocs(groups map[int64][]string, shards [NShards]int64) map[int64]*ShardAlloc {
availGids := make(map[int64]*ShardAlloc)
for gid, _ := range groups {
availGids[gid] = &ShardAlloc{GID: gid, NumShards: 0, Shards: []int{}}
}
for shard, gid := range shards {
alloc, exists := availGids[gid]
// Invariant: gid should always exist in shard
if exists {
alloc.NumShards += 1
alloc.Shards = append(alloc.Shards, shard)
}
}
return availGids
}
func SortShardAllocs(availGids map[int64]*ShardAlloc, order string) []ShardAlloc {
var shardAllocs []ShardAlloc
for _, v := range availGids {
shardAllocs = append(shardAllocs, *v)
}
if order == "dec" {
sort.Sort(ByNumShardsDec(shardAllocs))
} else {
sort.Sort(ByNumShardsInc(shardAllocs))
}
return shardAllocs
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) error {
sm.log("Join RPC: gid=%d, servers=%s", args.GID, args.Servers)
op := Op{Operation: Join, Args: *args}
sm.resolveOp(op)
return nil
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) error {
sm.log("Leave RPC: gid=%d", args.GID)
op := Op{Operation: Leave, Args: *args}
sm.resolveOp(op)
return nil
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) error {
sm.log("Move RPC: shard=%d, to_gid=%d", args.Shard, args.GID)
op := Op{Operation: Move, Args: *args}
sm.resolveOp(op)
return nil
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) error {
sm.log("Query RPC: config_num=%d", args.Num)
op := Op{Operation: Query, Args: *args}
config := sm.resolveOp(op)
reply.Config = config
return nil
}
func (sm *ShardMaster) resolveOp(op Op) Config {
seq := sm.px.Max() + 1
sm.log("Resolve init seq=%d", seq)
sm.px.Start(seq, op)
timeout := InitTimeout
time.Sleep(timeout)
decided, val := sm.px.Status(seq)
var valOp Op
if val != nil {
valOp = val.(Op)
}
for !decided || !reflect.DeepEqual(valOp, op) {
if (decided && !reflect.DeepEqual(valOp, op)) || (seq <= sm.lastAppliedSeq) {
sm.log("Seq=%d already decided", seq)
seq = sm.px.Max() + 1
sm.px.Start(seq, op)
}
sm.log("Retry w/ seq=%d", seq)
time.Sleep(timeout + RandMTime())
if timeout < 100*time.Millisecond {
timeout *= 2
}
decided, val = sm.px.Status(seq)
if val != nil {
valOp = val.(Op)
}
}
sm.log("Seq=%d decided!", seq)
// block until seq op has been applied
for sm.lastAppliedSeq < seq {
time.Sleep(InitTimeout)
}
sm.px.Done(seq)
if op.Operation == Query {
num := op.Args.(QueryArgs).Num
config, _ := sm.applyQuery(num)
return *config
}
return Config{}
}
func (sm *ShardMaster) applyJoin(joinGid int64, servers []string) (*Config, Err) {
// Get newest config
config, num := sm.newestConfig()
newConfigNum := num + 1
_, exists := config.Groups[joinGid]
if exists {
return &Config{}, ErrGIDAlreadyJoined
}
// Add new gid into groups mapping
groups := CopyGroups(config.Groups)
groups[joinGid] = servers
shards := CopyShards(config.Shards)
if len(groups) == 1 {
for i, _ := range shards {
shards[i] = joinGid
}
} else {
// Redistribute shards
shardAllocs := MakeShardAllocs(config.Groups, config.Shards)
sorted := SortShardAllocs(shardAllocs, "dec")
shardsPerGroup := NShards / len(groups)
for i := 0; i < shardsPerGroup; i += 1 {
index := i % len(sorted)
shardAlloc := sorted[index]
s := shardAlloc.Shards
sorted[index].Shards = s[1:len(s)]
shard := s[0]
shards[shard] = joinGid
}
}
return MakeConfig(newConfigNum, shards, groups), OK
}
func (sm *ShardMaster) applyLeave(leaveGid int64) (*Config, Err) {
// Get newest config
config, num := sm.newestConfig()
newConfigNum := num + 1
_, exists := config.Groups[leaveGid]
if !exists {
return &Config{}, ErrGIDAlreadyLeft
}
// Remove gid from groups mapping
groups := CopyGroups(config.Groups)
delete(groups, leaveGid)
shards := CopyShards(config.Shards)
if len(groups) == 0 {
for i, _ := range shards {
shards[i] = 0
}
} else {
shardAllocs := MakeShardAllocs(config.Groups, config.Shards)
sorted := SortShardAllocs(shardAllocs, "inc")
freeShards := shardAllocs[leaveGid].Shards
// Assign shard to new gid
rotate := 0
for _, shard := range freeShards {
gid := leaveGid
for gid == leaveGid {
gid = sorted[rotate%len(sorted)].GID
rotate += 1
}
shards[shard] = gid
}
}
return MakeConfig(newConfigNum, shards, groups), OK
}
func (sm *ShardMaster) applyMove(movingShard int, newGid int64) (*Config, Err) {
// Get newest config
config, num := sm.newestConfig()
newConfigNum := num + 1
_, exists := config.Groups[newGid]
if !exists {
return &Config{}, ErrGIDAlreadyLeft
}
groups := CopyGroups(config.Groups)
shards := CopyShards(config.Shards)
// Move shard into new gid
shards[movingShard] = newGid
return MakeConfig(newConfigNum, shards, groups), OK
}
func (sm *ShardMaster) applyQuery(num int) (*Config, Err) {
// Get newest config if requested config num out of range
if num < 0 || num >= len(sm.configs) {
config, _ := sm.newestConfig()
return &config, OK
}
return &sm.configs[num], OK
}
func (sm *ShardMaster) applyOp(op *Op) (*Config, Err) {
// Return early for a noop
if op.Operation == Noop {
return &Config{}, ErrNoop
}
switch op.Operation {
case Join:
args := op.Args.(JoinArgs)
return sm.applyJoin(args.GID, args.Servers)
case Leave:
args := op.Args.(LeaveArgs)
return sm.applyLeave(args.GID)
case Move:
args := op.Args.(MoveArgs)
return sm.applyMove(args.Shard, args.GID)
case Query:
args := op.Args.(QueryArgs)
return sm.applyQuery(args.Num)
}
// Should not reach this point
return &Config{}, ErrInvalid
}
func (sm *ShardMaster) newestConfig() (Config, int) {
i := len(sm.configs) - 1
return sm.configs[i], i
}
func (sm *ShardMaster) tick() {
timeout := InitTimeout
for sm.dead == false {
seq := sm.lastAppliedSeq + 1
decided, result := sm.px.Status(seq)
if decided {
// apply the operation
op, _ := result.(Op)
sm.log("Applying %s from seq=%d", op.Operation, seq)
config, err := sm.applyOp(&op)
sm.lastAppliedSeq += 1
sm.mu.Lock()
if op.Operation != Query && err == OK {
sm.configs = append(sm.configs, *config)
}
sm.mu.Unlock()
// reset timeout
timeout = InitTimeout
} else {
// sm.log("Retry for seq=%d", seq)
if timeout >= 1*time.Second {
sm.log("Try noop for seq=%d", seq)
sm.px.Start(seq, Op{Operation: Noop})
// wait for noop to return
noopDone := false
for !noopDone {
noopDone, _ = sm.px.Status(seq)
time.Sleep(100 * time.Millisecond)
}
} else {
// wait before retrying
time.Sleep(timeout)
if timeout < 1*time.Second {
// expotential backoff
timeout *= 2
}
}
}
}
}
func (sm *ShardMaster) log(format string, a ...interface{}) (n int, err error) {
if Debug {
addr := "Srv#" + strconv.Itoa(sm.me)
n, err = fmt.Printf(addr+": "+format+"\n", a...)
}
return
}
// please don't change this function.
func (sm *ShardMaster) Kill() {
sm.dead = true
sm.l.Close()
sm.px.Kill()
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []string, me int) *ShardMaster {
gob.Register(Op{})
gob.Register(JoinArgs{})
gob.Register(LeaveArgs{})
gob.Register(MoveArgs{})
gob.Register(QueryArgs{})
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = make(map[int64][]string)
sm.lastAppliedSeq = -1
rpcs := rpc.NewServer()
rpcs.Register(sm)
sm.px = paxos.Make(servers, me, rpcs)
os.Remove(servers[me])
l, e := net.Listen("unix", servers[me])
if e != nil {
log.Fatal("listen error: ", e)
}
sm.l = l
// please do not change any of the following code,
// or do anything to subvert it.
go func() {
for sm.dead == false {
conn, err := sm.l.Accept()
if err == nil && sm.dead == false {
if sm.unreliable && (rand.Int63()%1000) < 100 {
// discard the request.
conn.Close()
} else if sm.unreliable && (rand.Int63()%1000) < 200 {
// process the request but force discard of reply.
c1 := conn.(*net.UnixConn)
f, _ := c1.File()
err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
if err != nil {
fmt.Printf("shutdown: %v\n", err)
}
go rpcs.ServeConn(conn)
} else {
go rpcs.ServeConn(conn)
}
} else if err == nil {
conn.Close()
}
if err != nil && sm.dead == false {
fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error())
sm.Kill()
}
}
}()
go sm.tick()
return sm
}
| Less | identifier_name |
server.go | package shardmaster
import "net"
import "fmt"
import "net/rpc"
import "log"
import "paxos"
import "sync"
import "os"
import "syscall"
import "encoding/gob"
import "math/rand"
import "time"
import "strconv"
import "sort"
import "reflect"
const (
Debug = false
InitTimeout = 10 * time.Millisecond
)
type ShardAlloc struct {
GID int64
NumShards int
Shards []int
}
// implements sort.Interface for []ShardAlloc
type ByNumShardsInc []ShardAlloc
func (a ByNumShardsInc) Len() int { return len(a) }
func (a ByNumShardsInc) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByNumShardsInc) Less(i, j int) bool { return a[i].NumShards < a[j].NumShards }
type ByNumShardsDec []ShardAlloc
func (a ByNumShardsDec) Len() int { return len(a) }
func (a ByNumShardsDec) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByNumShardsDec) Less(i, j int) bool { return a[i].NumShards > a[j].NumShards }
type ShardMaster struct {
mu sync.Mutex
l net.Listener
me int
dead bool // for testing
unreliable bool // for testing
px *paxos.Paxos
configs []Config // indexed by config num
lastAppliedSeq int
}
const (
OK = "OK"
ErrNoop = "ErrNoop"
ErrInvalid = "ErrInvalid"
ErrGIDAlreadyJoined = "ErrGIDAlreadyJoined"
ErrGIDAlreadyLeft = "ErrGIDAlreadyLeft"
)
type Err string
const (
Join = "Join"
Leave = "Leave"
Move = "Move"
Query = "Query"
Noop = "Noop"
)
type Operation string
type Op struct {
Operation Operation
Args interface{}
}
func RandMTime() time.Duration |
func MakeConfig(num int, shards [NShards]int64, groups map[int64][]string) *Config {
config := &Config{}
config.Num = num
config.Shards = shards
config.Groups = groups
return config
}
func CopyGroups(src map[int64][]string) map[int64][]string {
dst := make(map[int64][]string)
for k, v := range src {
dst[k] = v
}
return dst
}
func CopyShards(src [NShards]int64) [NShards]int64 {
var dst [NShards]int64
for shard, gid := range src {
dst[shard] = gid
}
return dst
}
func MakeShardAllocs(groups map[int64][]string, shards [NShards]int64) map[int64]*ShardAlloc {
availGids := make(map[int64]*ShardAlloc)
for gid, _ := range groups {
availGids[gid] = &ShardAlloc{GID: gid, NumShards: 0, Shards: []int{}}
}
for shard, gid := range shards {
alloc, exists := availGids[gid]
// Invariant: gid should always exist in shard
if exists {
alloc.NumShards += 1
alloc.Shards = append(alloc.Shards, shard)
}
}
return availGids
}
func SortShardAllocs(availGids map[int64]*ShardAlloc, order string) []ShardAlloc {
var shardAllocs []ShardAlloc
for _, v := range availGids {
shardAllocs = append(shardAllocs, *v)
}
if order == "dec" {
sort.Sort(ByNumShardsDec(shardAllocs))
} else {
sort.Sort(ByNumShardsInc(shardAllocs))
}
return shardAllocs
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) error {
sm.log("Join RPC: gid=%d, servers=%s", args.GID, args.Servers)
op := Op{Operation: Join, Args: *args}
sm.resolveOp(op)
return nil
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) error {
sm.log("Leave RPC: gid=%d", args.GID)
op := Op{Operation: Leave, Args: *args}
sm.resolveOp(op)
return nil
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) error {
sm.log("Move RPC: shard=%d, to_gid=%d", args.Shard, args.GID)
op := Op{Operation: Move, Args: *args}
sm.resolveOp(op)
return nil
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) error {
sm.log("Query RPC: config_num=%d", args.Num)
op := Op{Operation: Query, Args: *args}
config := sm.resolveOp(op)
reply.Config = config
return nil
}
func (sm *ShardMaster) resolveOp(op Op) Config {
seq := sm.px.Max() + 1
sm.log("Resolve init seq=%d", seq)
sm.px.Start(seq, op)
timeout := InitTimeout
time.Sleep(timeout)
decided, val := sm.px.Status(seq)
var valOp Op
if val != nil {
valOp = val.(Op)
}
for !decided || !reflect.DeepEqual(valOp, op) {
if (decided && !reflect.DeepEqual(valOp, op)) || (seq <= sm.lastAppliedSeq) {
sm.log("Seq=%d already decided", seq)
seq = sm.px.Max() + 1
sm.px.Start(seq, op)
}
sm.log("Retry w/ seq=%d", seq)
time.Sleep(timeout + RandMTime())
if timeout < 100*time.Millisecond {
timeout *= 2
}
decided, val = sm.px.Status(seq)
if val != nil {
valOp = val.(Op)
}
}
sm.log("Seq=%d decided!", seq)
// block until seq op has been applied
for sm.lastAppliedSeq < seq {
time.Sleep(InitTimeout)
}
sm.px.Done(seq)
if op.Operation == Query {
num := op.Args.(QueryArgs).Num
config, _ := sm.applyQuery(num)
return *config
}
return Config{}
}
func (sm *ShardMaster) applyJoin(joinGid int64, servers []string) (*Config, Err) {
// Get newest config
config, num := sm.newestConfig()
newConfigNum := num + 1
_, exists := config.Groups[joinGid]
if exists {
return &Config{}, ErrGIDAlreadyJoined
}
// Add new gid into groups mapping
groups := CopyGroups(config.Groups)
groups[joinGid] = servers
shards := CopyShards(config.Shards)
if len(groups) == 1 {
for i, _ := range shards {
shards[i] = joinGid
}
} else {
// Redistribute shards
shardAllocs := MakeShardAllocs(config.Groups, config.Shards)
sorted := SortShardAllocs(shardAllocs, "dec")
shardsPerGroup := NShards / len(groups)
for i := 0; i < shardsPerGroup; i += 1 {
index := i % len(sorted)
shardAlloc := sorted[index]
s := shardAlloc.Shards
sorted[index].Shards = s[1:len(s)]
shard := s[0]
shards[shard] = joinGid
}
}
return MakeConfig(newConfigNum, shards, groups), OK
}
func (sm *ShardMaster) applyLeave(leaveGid int64) (*Config, Err) {
// Get newest config
config, num := sm.newestConfig()
newConfigNum := num + 1
_, exists := config.Groups[leaveGid]
if !exists {
return &Config{}, ErrGIDAlreadyLeft
}
// Remove gid from groups mapping
groups := CopyGroups(config.Groups)
delete(groups, leaveGid)
shards := CopyShards(config.Shards)
if len(groups) == 0 {
for i, _ := range shards {
shards[i] = 0
}
} else {
shardAllocs := MakeShardAllocs(config.Groups, config.Shards)
sorted := SortShardAllocs(shardAllocs, "inc")
freeShards := shardAllocs[leaveGid].Shards
// Assign shard to new gid
rotate := 0
for _, shard := range freeShards {
gid := leaveGid
for gid == leaveGid {
gid = sorted[rotate%len(sorted)].GID
rotate += 1
}
shards[shard] = gid
}
}
return MakeConfig(newConfigNum, shards, groups), OK
}
func (sm *ShardMaster) applyMove(movingShard int, newGid int64) (*Config, Err) {
// Get newest config
config, num := sm.newestConfig()
newConfigNum := num + 1
_, exists := config.Groups[newGid]
if !exists {
return &Config{}, ErrGIDAlreadyLeft
}
groups := CopyGroups(config.Groups)
shards := CopyShards(config.Shards)
// Move shard into new gid
shards[movingShard] = newGid
return MakeConfig(newConfigNum, shards, groups), OK
}
func (sm *ShardMaster) applyQuery(num int) (*Config, Err) {
// Get newest config if requested config num out of range
if num < 0 || num >= len(sm.configs) {
config, _ := sm.newestConfig()
return &config, OK
}
return &sm.configs[num], OK
}
func (sm *ShardMaster) applyOp(op *Op) (*Config, Err) {
// Return early for a noop
if op.Operation == Noop {
return &Config{}, ErrNoop
}
switch op.Operation {
case Join:
args := op.Args.(JoinArgs)
return sm.applyJoin(args.GID, args.Servers)
case Leave:
args := op.Args.(LeaveArgs)
return sm.applyLeave(args.GID)
case Move:
args := op.Args.(MoveArgs)
return sm.applyMove(args.Shard, args.GID)
case Query:
args := op.Args.(QueryArgs)
return sm.applyQuery(args.Num)
}
// Should not reach this point
return &Config{}, ErrInvalid
}
func (sm *ShardMaster) newestConfig() (Config, int) {
i := len(sm.configs) - 1
return sm.configs[i], i
}
func (sm *ShardMaster) tick() {
timeout := InitTimeout
for sm.dead == false {
seq := sm.lastAppliedSeq + 1
decided, result := sm.px.Status(seq)
if decided {
// apply the operation
op, _ := result.(Op)
sm.log("Applying %s from seq=%d", op.Operation, seq)
config, err := sm.applyOp(&op)
sm.lastAppliedSeq += 1
sm.mu.Lock()
if op.Operation != Query && err == OK {
sm.configs = append(sm.configs, *config)
}
sm.mu.Unlock()
// reset timeout
timeout = InitTimeout
} else {
// sm.log("Retry for seq=%d", seq)
if timeout >= 1*time.Second {
sm.log("Try noop for seq=%d", seq)
sm.px.Start(seq, Op{Operation: Noop})
// wait for noop to return
noopDone := false
for !noopDone {
noopDone, _ = sm.px.Status(seq)
time.Sleep(100 * time.Millisecond)
}
} else {
// wait before retrying
time.Sleep(timeout)
if timeout < 1*time.Second {
// expotential backoff
timeout *= 2
}
}
}
}
}
func (sm *ShardMaster) log(format string, a ...interface{}) (n int, err error) {
if Debug {
addr := "Srv#" + strconv.Itoa(sm.me)
n, err = fmt.Printf(addr+": "+format+"\n", a...)
}
return
}
// please don't change this function.
func (sm *ShardMaster) Kill() {
sm.dead = true
sm.l.Close()
sm.px.Kill()
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []string, me int) *ShardMaster {
gob.Register(Op{})
gob.Register(JoinArgs{})
gob.Register(LeaveArgs{})
gob.Register(MoveArgs{})
gob.Register(QueryArgs{})
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = make(map[int64][]string)
sm.lastAppliedSeq = -1
rpcs := rpc.NewServer()
rpcs.Register(sm)
sm.px = paxos.Make(servers, me, rpcs)
os.Remove(servers[me])
l, e := net.Listen("unix", servers[me])
if e != nil {
log.Fatal("listen error: ", e)
}
sm.l = l
// please do not change any of the following code,
// or do anything to subvert it.
go func() {
for sm.dead == false {
conn, err := sm.l.Accept()
if err == nil && sm.dead == false {
if sm.unreliable && (rand.Int63()%1000) < 100 {
// discard the request.
conn.Close()
} else if sm.unreliable && (rand.Int63()%1000) < 200 {
// process the request but force discard of reply.
c1 := conn.(*net.UnixConn)
f, _ := c1.File()
err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)
if err != nil {
fmt.Printf("shutdown: %v\n", err)
}
go rpcs.ServeConn(conn)
} else {
go rpcs.ServeConn(conn)
}
} else if err == nil {
conn.Close()
}
if err != nil && sm.dead == false {
fmt.Printf("ShardMaster(%v) accept: %v\n", me, err.Error())
sm.Kill()
}
}
}()
go sm.tick()
return sm
}
| {
return time.Duration(rand.Int()%100) * time.Millisecond
} | identifier_body |
main.js | /**
* ===================================================================
* Main js
*
* -------------------------------------------------------------------
*/
(function($) {
"use strict";
/* --------------------------------------------------- */
/* Preloader
------------------------------------------------------ */
$(window).load(function() {
// will first fade out the loading animation
$("#loader").fadeOut("slow", function(){
// will fade out the whole DIV that covers the website.
$("#preloader").delay(300).fadeOut("slow");
});
})
/*---------------------------------------------------- */
/* FitVids
------------------------------------------------------ */
$(".fluid-video-wrapper").fitVids();
/* --------------------------------------------------- */
/* Vegas Slideshow
------------------------------------------------------ */
$(".home-slides").vegas({
transition: 'fade',
transitionDuration: 2500,
delay: 5000,
slides: [
{ src: "images/slides/03.jpg" },
{ src: "images/slides/02.jpg" },
{ src: "images/slides/01.jpg" }
]
});
/* --------------------------------------------------- */
/* Particle JS
------------------------------------------------------ */
// $('.home-particles').particleground({
// dotColor: '#fff',
// lineColor: '#555555',
// particleRadius: 6,
// curveLines: true,
// density: 10000,
// proximity: 110
// });
/*-----------------------------------------------------*/
/* tabs
-------------------------------------------------------*/
$(".tab-content").hide();
$(".tab-content").first().show();
$("ul.tabs li").click(function () {
$("ul.tabs li").removeClass("active");
$(this).addClass("active");
$(".tab-content").hide();
var activeTab = $(this).attr("data-id");
$("#" + activeTab).fadeIn(700);
});
/*----------------------------------------------------*/
/* Smooth Scrolling
------------------------------------------------------*/
$('.smoothscroll').on('click', function (e) {
e.preventDefault();
var target = this.hash,
$target = $(target);
$('html, body').stop().animate({
'scrollTop': $target.offset().top
}, 800, 'swing', function () {
window.location.hash = target;
});
});
/* --------------------------------------------------- */
/* Placeholder Plugin Settings
------------------------------------------------------ */
$('input, textarea, select').placeholder()
/*---------------------------------------------------- */
/* ajaxchimp
------------------------------------------------------ */
// Example MailChimp url: http://xxx.xxx.list-manage.com/subscribe/post?u=xxx&id=xxx
var mailChimpURL = 'http://facebook.us8.list-manage.com/subscribe/post?u=cdb7b577e41181934ed6a6a44&id=e65110b38d'
$('#mc-form').ajaxChimp({
language: 'pl',
url: mailChimpURL
});
// Mailchimp translation
//
// Defaults:
// 'submit': 'Submitting...',
// 0: 'We have sent you a confirmation email',
// 1: 'Please enter a value',
// 2: 'An email address must contain a single @',
// 3: 'The domain portion of the email address is invalid (the portion after the @: )',
// 4: 'The username portion of the email address is invalid (the portion before the @: )',
// 5: 'This email address looks fake or invalid. Please enter a real email address'
$.ajaxChimp.translations.es = {
'submit': 'Submitting...',
0: '<i class="fa fa-check"></i> Wysłaliśmy Ci e-mail z potwierdzeniem',
1: '<i class="fa fa-warning"></i> Musisz wprowadzić prawidłowy adres email.',
2: '<i class="fa fa-warning"></i> Adres e-mail jest nieprawidłowy.',
3: '<i class="fa fa-warning"></i> Adres e-mail jest nieprawidłowy.',
4: '<i class="fa fa-warning"></i> Adres e-mail jest nieprawidłowy.',
5: '<i class="fa fa-warning"></i> Adres e-mail jest nieprawidłowy.'
}
/*---------------------------------------------------- */
/* contact form
------------------------------------------------------ */
/* local validation */
$('#contactForm').validate({
/* submit via ajax */
submitHandler: function(form) {
var sLoader = $('#submit-loader');
$.ajax({
type: "POST",
url: "inc/sendEmail.php",
data: $(form).serialize(),
beforeSend: function() {
sLoader.fadeIn();
},
success: function(msg) {
// Message was sent
if (msg == 'OK') {
sLoader.fadeOut();
$('#message-warning').hide();
$('#contactForm').fadeOut();
$('#message-success').fadeIn();
}
// There was an error
else {
sLoader.fadeOut();
$('#message-warning').html(msg);
$('#message-warning').fadeIn();
}
},
error: function() {
sLoader.fadeOut();
$('#message-warning').html("Coś poszło nie tak. Proszę spróbuj ponownie.");
$('#message-warning').fadeIn();
}
});
}
});
/*----------------------------------------------------*/
/* Final Countdown Settings
------------------------------------------------------ */
var finalDate = '2017/11/09';
$('div#counter').countdown(finalDate)
.on('update.countdown', function(event) {
$(this).html(event.strftime('<div class=\"half\">' +
'<span>%D <sup>dni</sup></span>' +
'<span>%H <sup>godzin</sup></span>' +
'</div>' +
'<div class=\"half\">' +
'<span>%M <sup>minut</sup></span>' +
'<span>%S <sup>sekund</sup></span>' +
'</div>'));
});
})(jQuery);
/*
* File: jquery.simplePopup.js
* Version: 1.0.0
* Description: Create a simple popup to display content
* Author: 9bit Studios
* Copyright 2012, 9bit Studios
* http://www.9bitstudios.com
* Free to use and abuse under the MIT license.
* http://www.opensource.org/licenses/mit-license.php
*/
(function ($) {
$.fn.simplePopup = function (options) {
var defaults = $.extend({
centerPopup: true,
open: function() {},
closed: function() {}
}, options);
/******************************
Private Variables
*******************************/
var object = $(this);
var settings = $.extend(defaults, options);
/******************************
Public Methods
*******************************/
var methods = {
init: function() {
return this.each(function () {
methods.appendHTML();
methods.setEventHandlers();
methods.showPopup();
});
},
/******************************
| appendHTML: function() {
// if this has already been added we don't need to add it again
if ($('.simplePopupBackground').length === 0) {
var background = '<div class="simplePopupBackground"></div>';
$('body').prepend(background);
}
if(object.find('.simplePopupClose').length === 0) {
var close = '<div class="simplePopupClose"><i class="fa fa-times fa-lg" aria-hidden="true"></i></div>';
object.prepend(close);
}
},
/******************************
Set Event Handlers
*******************************/
setEventHandlers: function() {
$(".simplePopupClose, .simplePopupBackground").on("click", function (event) {
methods.hidePopup();
});
$(window).on("resize", function(event){
if(settings.centerPopup) {
methods.positionPopup();
}
});
},
removeEventListners: function() {
$(".simplePopupClose, .simplePopupBackground").off("click");
},
showPopup: function() {
$(".simplePopupBackground").css({
"opacity": "0.8"
});
$(".simplePopupBackground").fadeIn("fast");
object.fadeIn("slow", function(){
settings.open();
});
if(settings.centerPopup) {
methods.positionPopup();
}
},
hidePopup: function() {
$(".simplePopupBackground").fadeOut("fast");
object.fadeOut("fast", function(){
methods.removeEventListners();
settings.closed();
});
},
// positionPopup: function() {
// var windowWidth = $(window).width();
// var windowHeight = $(window).height();
// var popupWidth = object.width();
// var popupHeight = object.height();
//
// var topPos = (windowHeight / 2) - (popupHeight / 2);
// var leftPos = (windowWidth / 2) - (popupWidth / 2);
// if(topPos < 30) topPos = 30;
//
// object.css({
// "position": "absolute",
// "top": topPos,
// "left": leftPos
// });
// },
};
if (methods[options]) { // $("#element").pluginName('methodName', 'arg1', 'arg2');
return methods[options].apply(this, Array.prototype.slice.call(arguments, 1));
} else if (typeof options === 'object' || !options) { // $("#element").pluginName({ option: 1, option:2 });
return methods.init.apply(this);
} else {
$.error( 'Method "' + method + '" does not exist in simple popup plugin!');
}
};
})(jQuery); | Append HTML
*******************************/
| random_line_split |
main.js | /**
* ===================================================================
* Main js
*
* -------------------------------------------------------------------
*/
(function($) {
"use strict";
/* --------------------------------------------------- */
/* Preloader
------------------------------------------------------ */
$(window).load(function() {
// will first fade out the loading animation
$("#loader").fadeOut("slow", function(){
// will fade out the whole DIV that covers the website.
$("#preloader").delay(300).fadeOut("slow");
});
})
/*---------------------------------------------------- */
/* FitVids
------------------------------------------------------ */
$(".fluid-video-wrapper").fitVids();
/* --------------------------------------------------- */
/* Vegas Slideshow
------------------------------------------------------ */
$(".home-slides").vegas({
transition: 'fade',
transitionDuration: 2500,
delay: 5000,
slides: [
{ src: "images/slides/03.jpg" },
{ src: "images/slides/02.jpg" },
{ src: "images/slides/01.jpg" }
]
});
/* --------------------------------------------------- */
/* Particle JS
------------------------------------------------------ */
// $('.home-particles').particleground({
// dotColor: '#fff',
// lineColor: '#555555',
// particleRadius: 6,
// curveLines: true,
// density: 10000,
// proximity: 110
// });
/*-----------------------------------------------------*/
/* tabs
-------------------------------------------------------*/
$(".tab-content").hide();
$(".tab-content").first().show();
$("ul.tabs li").click(function () {
$("ul.tabs li").removeClass("active");
$(this).addClass("active");
$(".tab-content").hide();
var activeTab = $(this).attr("data-id");
$("#" + activeTab).fadeIn(700);
});
/*----------------------------------------------------*/
/* Smooth Scrolling
------------------------------------------------------*/
$('.smoothscroll').on('click', function (e) {
e.preventDefault();
var target = this.hash,
$target = $(target);
$('html, body').stop().animate({
'scrollTop': $target.offset().top
}, 800, 'swing', function () {
window.location.hash = target;
});
});
/* --------------------------------------------------- */
/* Placeholder Plugin Settings
------------------------------------------------------ */
$('input, textarea, select').placeholder()
/*---------------------------------------------------- */
/* ajaxchimp
------------------------------------------------------ */
// Example MailChimp url: http://xxx.xxx.list-manage.com/subscribe/post?u=xxx&id=xxx
var mailChimpURL = 'http://facebook.us8.list-manage.com/subscribe/post?u=cdb7b577e41181934ed6a6a44&id=e65110b38d'
$('#mc-form').ajaxChimp({
language: 'pl',
url: mailChimpURL
});
// Mailchimp translation
//
// Defaults:
// 'submit': 'Submitting...',
// 0: 'We have sent you a confirmation email',
// 1: 'Please enter a value',
// 2: 'An email address must contain a single @',
// 3: 'The domain portion of the email address is invalid (the portion after the @: )',
// 4: 'The username portion of the email address is invalid (the portion before the @: )',
// 5: 'This email address looks fake or invalid. Please enter a real email address'
$.ajaxChimp.translations.es = {
'submit': 'Submitting...',
0: '<i class="fa fa-check"></i> Wysłaliśmy Ci e-mail z potwierdzeniem',
1: '<i class="fa fa-warning"></i> Musisz wprowadzić prawidłowy adres email.',
2: '<i class="fa fa-warning"></i> Adres e-mail jest nieprawidłowy.',
3: '<i class="fa fa-warning"></i> Adres e-mail jest nieprawidłowy.',
4: '<i class="fa fa-warning"></i> Adres e-mail jest nieprawidłowy.',
5: '<i class="fa fa-warning"></i> Adres e-mail jest nieprawidłowy.'
}
/*---------------------------------------------------- */
/* contact form
------------------------------------------------------ */
/* local validation */
$('#contactForm').validate({
/* submit via ajax */
submitHandler: function(form) {
var sLoader = $('#submit-loader');
$.ajax({
type: "POST",
url: "inc/sendEmail.php",
data: $(form).serialize(),
beforeSend: function() {
sLoader.fadeIn();
},
success: function(msg) {
// Message was sent
if (msg == 'OK') {
| // There was an error
else {
sLoader.fadeOut();
$('#message-warning').html(msg);
$('#message-warning').fadeIn();
}
},
error: function() {
sLoader.fadeOut();
$('#message-warning').html("Coś poszło nie tak. Proszę spróbuj ponownie.");
$('#message-warning').fadeIn();
}
});
}
});
/*----------------------------------------------------*/
/* Final Countdown Settings
------------------------------------------------------ */
var finalDate = '2017/11/09';
$('div#counter').countdown(finalDate)
.on('update.countdown', function(event) {
$(this).html(event.strftime('<div class=\"half\">' +
'<span>%D <sup>dni</sup></span>' +
'<span>%H <sup>godzin</sup></span>' +
'</div>' +
'<div class=\"half\">' +
'<span>%M <sup>minut</sup></span>' +
'<span>%S <sup>sekund</sup></span>' +
'</div>'));
});
})(jQuery);
/*
* File: jquery.simplePopup.js
* Version: 1.0.0
* Description: Create a simple popup to display content
* Author: 9bit Studios
* Copyright 2012, 9bit Studios
* http://www.9bitstudios.com
* Free to use and abuse under the MIT license.
* http://www.opensource.org/licenses/mit-license.php
*/
(function ($) {
$.fn.simplePopup = function (options) {
var defaults = $.extend({
centerPopup: true,
open: function() {},
closed: function() {}
}, options);
/******************************
Private Variables
*******************************/
var object = $(this);
var settings = $.extend(defaults, options);
/******************************
Public Methods
*******************************/
var methods = {
init: function() {
return this.each(function () {
methods.appendHTML();
methods.setEventHandlers();
methods.showPopup();
});
},
/******************************
Append HTML
*******************************/
appendHTML: function() {
// if this has already been added we don't need to add it again
if ($('.simplePopupBackground').length === 0) {
var background = '<div class="simplePopupBackground"></div>';
$('body').prepend(background);
}
if(object.find('.simplePopupClose').length === 0) {
var close = '<div class="simplePopupClose"><i class="fa fa-times fa-lg" aria-hidden="true"></i></div>';
object.prepend(close);
}
},
/******************************
Set Event Handlers
*******************************/
setEventHandlers: function() {
$(".simplePopupClose, .simplePopupBackground").on("click", function (event) {
methods.hidePopup();
});
$(window).on("resize", function(event){
if(settings.centerPopup) {
methods.positionPopup();
}
});
},
removeEventListners: function() {
$(".simplePopupClose, .simplePopupBackground").off("click");
},
showPopup: function() {
$(".simplePopupBackground").css({
"opacity": "0.8"
});
$(".simplePopupBackground").fadeIn("fast");
object.fadeIn("slow", function(){
settings.open();
});
if(settings.centerPopup) {
methods.positionPopup();
}
},
hidePopup: function() {
$(".simplePopupBackground").fadeOut("fast");
object.fadeOut("fast", function(){
methods.removeEventListners();
settings.closed();
});
},
// positionPopup: function() {
// var windowWidth = $(window).width();
// var windowHeight = $(window).height();
// var popupWidth = object.width();
// var popupHeight = object.height();
//
// var topPos = (windowHeight / 2) - (popupHeight / 2);
// var leftPos = (windowWidth / 2) - (popupWidth / 2);
// if(topPos < 30) topPos = 30;
//
// object.css({
// "position": "absolute",
// "top": topPos,
// "left": leftPos
// });
// },
};
if (methods[options]) { // $("#element").pluginName('methodName', 'arg1', 'arg2');
return methods[options].apply(this, Array.prototype.slice.call(arguments, 1));
} else if (typeof options === 'object' || !options) { // $("#element").pluginName({ option: 1, option:2 });
return methods.init.apply(this);
} else {
$.error( 'Method "' + method + '" does not exist in simple popup plugin!');
}
};
})(jQuery); | sLoader.fadeOut();
$('#message-warning').hide();
$('#contactForm').fadeOut();
$('#message-success').fadeIn();
}
| conditional_block |
version_info.rs | /*!
Version Information.
See [Microsoft's documentation](https://docs.microsoft.com/en-us/windows/desktop/menurc/version-information) for more information.
*/
use std::{char, cmp, fmt, mem, slice};
use std::collections::HashMap;
use crate::image::VS_FIXEDFILEINFO;
use crate::{Error, Result, _Pod as Pod};
use crate::util::{AlignTo, wstrn};
//----------------------------------------------------------------
/// Language and charset pair.
///
/// References [langID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#langID) and [charsetID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#charsetID).
#[derive(Copy, Clone, Debug, Pod, Eq, PartialEq)]
#[repr(C)]
pub struct Language {
pub lang_id: u16,
pub charset_id: u16,
}
impl Language {
/// Parse language hex strings.
pub fn parse(lang: &[u16]) -> std::result::Result<Language, &[u16]> {
if lang.len() != 8 {
return Err(lang);
}
fn digit(word: u16) -> u16 {
let num = word.wrapping_sub('0' as u16);
let upper = word.wrapping_sub('A' as u16).wrapping_add(10);
let lower = word.wrapping_sub('a' as u16).wrapping_add(10);
if word >= 'a' as u16 { lower }
else if word >= 'A' as u16 { upper }
else { num }
}
let mut digits = [0u16; 8];
for i in 0..8 {
digits[i] = digit(lang[i]);
}
let lang_id = (digits[0] << 12) | (digits[1] << 8) | (digits[2] << 4) | digits[3];
let charset_id = (digits[4] << 12) | (digits[5] << 8) | (digits[6] << 4) | digits[7];
Ok(Language { lang_id, charset_id })
}
}
impl fmt::Display for Language {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:04X}{:04X}", self.lang_id, self.charset_id)
}
}
//----------------------------------------------------------------
/// Version Information.
#[derive(Copy, Clone, Debug)]
pub struct VersionInfo<'a> {
bytes: &'a [u8],
}
impl<'a> VersionInfo<'a> {
pub fn try_from(bytes: &'a [u8]) -> Result<VersionInfo<'a>> {
// Alignment of 4 bytes is assumed everywhere,
// unsafe code in this module relies on this
if !bytes.as_ptr().aligned_to(4) {
return Err(Error::Misaligned);
}
Ok(VersionInfo { bytes })
}
/// Gets the fixed file information if available.
pub fn fixed(self) -> Option<&'a VS_FIXEDFILEINFO> {
let mut fixed = None;
self.visit(&mut fixed);
fixed
}
/// Queries a string value by name.
///
/// The returned string is UTF-16 encoded, convert to UTF-8 with `String::from_utf16` and friends.
pub fn query_value<S: AsRef<str>>(self, key: &S) -> Option<&'a [u16]> {
let mut this = QueryValue {
key: key.as_ref(),
value: None,
};
self.visit(&mut this);
this.value
}
/// Iterates over all the strings.
///
/// The closure's arguments are the lang, name and value for each string pair in the version information.
pub fn for_each_string<F: FnMut(&'a [u16], &'a [u16], &'a [u16])>(self, mut f: F) |
/// Gets the strings in a hash map.
pub fn to_hash_map(self) -> HashMap<String, String> {
let mut hash_map = HashMap::new();
self.visit(&mut hash_map);
hash_map
}
/// Parse the version information.
///
/// Because of the super convoluted format, the visitor pattern is used.
/// Implement the [`Visit` trait](trait.Visit.html) to get the desired information.
///
/// To keep the API simple all errors are ignored, any invalid or corrupted data is skipped.
pub fn visit(self, visit: &mut dyn Visit<'a>) {
let words = unsafe { slice::from_raw_parts(self.bytes.as_ptr() as *const u16, self.bytes.len() / 2) };
for version_info_r in Parser::new_bytes(words) {
if let Ok(version_info) = version_info_r {
const VS_FIXEDFILEINFO_SIZEOF: usize = mem::size_of::<VS_FIXEDFILEINFO>();
let fixed = match mem::size_of_val(version_info.value) {
0 => None,
VS_FIXEDFILEINFO_SIZEOF => {
let value = unsafe { &*(version_info.value.as_ptr() as *const VS_FIXEDFILEINFO) };
Some(value)
},
_ => None,//return Err(Error::Invalid),
};
if !visit.version_info(version_info.key, fixed) {
continue;
}
// MS docs: This member is always equal to zero.
for file_info_r in Parser::new_zero(version_info.children) {
if let Ok(file_info) = file_info_r {
if !visit.file_info(file_info.key) {
continue;
}
// MS docs: L"StringFileInfo"
if file_info.key == &self::strings::StringFileInfo {
// MS docs: This member is always equal to zero.
for string_table_r in Parser::new_zero(file_info.children) {
if let Ok(string_table) = string_table_r {
if !visit.string_table(string_table.key) {
continue;
}
for string_r in Parser::new_words(string_table.children) {
if let Ok(string) = string_r {
// Strip the nul terminator...
let value = if string.value.last() != Some(&0) { string.value }
else { &string.value[..string.value.len() - 1] };
visit.string(string_table.key, string.key, value);
}
}
}
}
}
// MS docs: L"VarFileInfo"
else if file_info.key == &self::strings::VarFileInfo {
for var_r in Parser::new_bytes(file_info.children) {
if let Ok(var) = var_r {
visit.var(var.key, var.value);
}
}
}
}
}
}
}
}
}
//----------------------------------------------------------------
/// Visitor pattern to view the version information details.
#[allow(unused_variables)]
pub trait Visit<'a> {
fn version_info(&mut self, key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool { true }
fn file_info(&mut self, key: &'a [u16]) -> bool { true }
fn string_table(&mut self, lang: &'a [u16]) -> bool { true }
fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {}
fn var(&mut self, key: &'a [u16], pairs: &'a [u16]) {}
}
impl<'a> Visit<'a> for HashMap<String, String> {
fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {
self.insert(
String::from_utf16_lossy(key),
String::from_utf16_lossy(value),
);
}
}
impl<'a> Visit<'a> for Option<&'a VS_FIXEDFILEINFO> {
fn version_info(&mut self, _key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool {
*self = fixed;
false
}
}
struct ForEachString<F>(F);
impl<'a, F: FnMut(&'a [u16], &'a [u16], &'a [u16])> Visit<'a> for ForEachString<F> {
fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {
(self.0)(lang, key, value);
}
}
struct QueryValue<'a, 's> {
key: &'s str,
value: Option<&'a [u16]>,
}
impl<'a, 's> Visit<'a> for QueryValue<'a, 's> {
fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {
if Iterator::eq(self.key.chars().map(Ok), char::decode_utf16(key.iter().cloned())) {
self.value = Some(value);
}
}
}
//----------------------------------------------------------------
/*
"version_info": {
"fixed": { .. },
"strings": { .. },
},
*/
#[cfg(feature = "serde")]
mod serde {
use crate::util::serde_helper::*;
use super::{VersionInfo};
impl<'a> Serialize for VersionInfo<'a> {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut state = serializer.serialize_struct("VersionInfo", 2)?;
state.serialize_field("fixed", &self.fixed())?;
state.serialize_field("strings", &self.to_hash_map())?;
state.end()
}
}
}
//----------------------------------------------------------------
mod strings {
#![allow(non_upper_case_globals)]
// static VS_VERSION_INFO: [u16; 15] = [86u16, 83, 95, 86, 69, 82, 83, 73, 79, 78, 95, 73, 78, 70, 79];
pub(crate) static StringFileInfo: [u16; 14] = [83u16, 116, 114, 105, 110, 103, 70, 105, 108, 101, 73, 110, 102, 111];
pub(crate) static VarFileInfo: [u16; 11] = [86u16, 97, 114, 70, 105, 108, 101, 73, 110, 102, 111];
// static Translation: [u16; 11] = [84u16, 114, 97, 110, 115, 108, 97, 116, 105, 111, 110];
// static Comments: [u16; 8] = [67u16, 111, 109, 109, 101, 110, 116, 115];
// static CompanyName: [u16; 11] = [67u16, 111, 109, 112, 97, 110, 121, 78, 97, 109, 101];
// static FileDescription: [u16; 15] = [70u16, 105, 108, 101, 68, 101, 115, 99, 114, 105, 112, 116, 105, 111, 110];
// static FileVersion: [u16; 11] = [70u16, 105, 108, 101, 86, 101, 114, 115, 105, 111, 110];
// static InternalName: [u16; 12] = [73u16, 110, 116, 101, 114, 110, 97, 108, 78, 97, 109, 101];
// static LegalCopyright: [u16; 14] = [76u16, 101, 103, 97, 108, 67, 111, 112, 121, 114, 105, 103, 104, 116];
// static LegalTrademarks: [u16; 15] = [76u16, 101, 103, 97, 108, 84, 114, 97, 100, 101, 109, 97, 114, 107, 115];
// static OriginalFilename: [u16; 16] = [79u16, 114, 105, 103, 105, 110, 97, 108, 70, 105, 108, 101, 110, 97, 109, 101];
// static PrivateBuild: [u16; 12] = [80u16, 114, 105, 118, 97, 116, 101, 66, 117, 105, 108, 100];
// static ProductName: [u16; 11] = [80u16, 114, 111, 100, 117, 99, 116, 78, 97, 109, 101];
// static ProductVersion: [u16; 14] = [80u16, 114, 111, 100, 117, 99, 116, 86, 101, 114, 115, 105, 111, 110];
// static SpecialBuild: [u16; 12] = [83u16, 112, 101, 99, 105, 97, 108, 66, 117, 105, 108, 100];
}
//----------------------------------------------------------------
#[cfg(test)]
pub(crate) fn test(version_info: VersionInfo<'_>) {
let _fixed = version_info.fixed();
let _hash_map = version_info.to_hash_map();
}
//----------------------------------------------------------------
/// Fixed file info constants.
pub mod image {
pub const VS_FF_DEBUG: u32 = 0x01;
pub const VS_FF_PRERELEASE: u32 = 0x02;
pub const VS_FF_PATCHED: u32 = 0x04;
pub const VS_FF_PRIVATEBUILD: u32 = 0x08;
pub const VS_FF_INFOINFERRED: u32 = 0x10;
pub const VS_FF_SPECIALBUILD: u32 = 0x20;
pub const VOS_UNKNOWN: u32 = 0x00000000;
pub const VOS_DOS: u32 = 0x00010000;
pub const VOS_OS216: u32 = 0x00020000;
pub const VOS_OS232: u32 = 0x00030000;
pub const VOS_NT: u32 = 0x00040000;
pub const VOS__WINDOWS16: u32 = 0x00000001;
pub const VOS__PM16: u32 = 0x00000002;
pub const VOS__PM32: u32 = 0x00000003;
pub const VOS__WINDOWS32: u32 = 0x00000004;
pub const VFT_UNKNOWN: u32 = 0x00000000;
pub const VFT_APP: u32 = 0x00000001;
pub const VFT_DLL: u32 = 0x00000002;
pub const VFT_DRV: u32 = 0x00000003;
pub const VFT_FONT: u32 = 0x00000004;
pub const VFT_VXD: u32 = 0x00000005;
pub const VFT_STATIC_LIB: u32 = 0x00000007;
pub const VFT2_UNKNOWN: u32 = 0x00000000;
pub const VFT2_DRV_PRINTER: u32 = 0x00000001;
pub const VFT2_DRV_KEYBOARD: u32 = 0x00000002;
pub const VFT2_DRV_LANGUAGE: u32 = 0x00000003;
pub const VFT2_DRV_DISPLAY: u32 = 0x00000004;
pub const VFT2_DRV_MOUSE: u32 = 0x00000005;
pub const VFT2_DRV_NETWORK: u32 = 0x00000006;
pub const VFT2_DRV_SYSTEM: u32 = 0x00000007;
pub const VFT2_DRV_INSTALLABLE: u32 = 0x00000008;
pub const VFT2_DRV_SOUND: u32 = 0x00000009;
pub const VFT2_DRV_COMM: u32 = 0x0000000A;
pub const VFT2_DRV_VERSIONED_PRINTER: u32 = 0x0000000C;
pub const VFT2_FONT_RASTER: u32 = 0x00000001;
pub const VFT2_FONT_VECTOR: u32 = 0x00000002;
pub const VFT2_FONT_TRUETYPE: u32 = 0x00000003;
}
//----------------------------------------------------------------
// This is an absolutely god awful format...
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct TLV<'a> {
pub key: &'a [u16],
pub value: &'a [u16], // DWORD aligned
pub children: &'a [u16], // DWORD aligned
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum ValueLengthType { Zero, Bytes, Words }
#[derive(Clone)]
struct Parser<'a> {
words: &'a [u16],
vlt: ValueLengthType,
}
impl<'a> Iterator for Parser<'a> {
type Item = Result<TLV<'a>>;
fn next(&mut self) -> Option<Result<TLV<'a>>> {
if self.words.len() == 0 {
return None;
}
let result = parse_tlv(self);
// If the parser errors, ensure the Iterator stops
if result.is_err() {
self.words = &self.words[self.words.len()..];
}
Some(result)
}
}
impl<'a> Parser<'a> {
pub(crate) fn new_zero(words: &'a [u16]) -> Parser<'a> {
Parser { words, vlt: ValueLengthType::Zero }
}
pub(crate) fn new_bytes(words: &'a [u16]) -> Parser<'a> {
Parser { words, vlt: ValueLengthType::Bytes }
}
pub(crate) fn new_words(words: &'a [u16]) -> Parser<'a> {
Parser { words, vlt: ValueLengthType::Words }
}
}
fn parse_tlv<'a>(state: &mut Parser<'a>) -> Result<TLV<'a>> {
let mut words = state.words;
// Parse the first three words from the TLV structure:
// wLength, wValueLength and wType (plus at least zero terminator of szKey)
if words.len() < 4 {
return Err(Error::Invalid);
}
// This is tricky, the struct contains a fixed and variable length parts
// However the length field includes the size of the fixed part
// Further complicating things, if the variable length part is absent the total length is set to zero (?!)
let length = cmp::max(4, words[0] as usize / 2).align_to(2);
// Oh god why, interpret the value_length
let value_length = match state.vlt {
ValueLengthType::Zero if words[1] == 0 => 0,
ValueLengthType::Zero => return Err(Error::Invalid),
ValueLengthType::Bytes => words[1] as usize / 2,
ValueLengthType::Words => words[1] as usize,
};
// let wType = words[2];
// Split the input where this structure ends and the next sibling begins
if length > words.len() {
return Err(Error::Invalid);
}
state.words = &words[length..];
words = &words[..length];
// Parse the nul terminated szKey
let key = wstrn(&words[3..]);
if words[3..].len() == key.len() {
return Err(Error::Invalid);
}
// Padding for the Value
words = &words[key.len().align_to(2) + 4..];
// Split the remaining words between the Value and Children
if value_length > words.len() {
return Err(Error::Invalid);
}
let value = &words[..value_length];
let children = &words[value.len().align_to(2)..];
Ok(TLV { key, value, children })
}
#[test]
fn test_parse_tlv_oob()
{
let mut parser;
// TLV header too short
parser = Parser::new_zero(&[0, 0]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
// TLV length field larger than the data
parser = Parser::new_zero(&[12, 0, 0, 0]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
// TLV key not nul terminated
parser = Parser::new_zero(&[16, 0, 1, 20, 20, 20, 20, 20]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
// TLV value field larger than the data
parser = Parser::new_zero(&[8, 10, 0, 0, 0, 0]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
}
| {
self.visit(&mut ForEachString(&mut f));
} | identifier_body |
version_info.rs | /*!
Version Information.
See [Microsoft's documentation](https://docs.microsoft.com/en-us/windows/desktop/menurc/version-information) for more information.
*/
use std::{char, cmp, fmt, mem, slice};
use std::collections::HashMap;
use crate::image::VS_FIXEDFILEINFO;
use crate::{Error, Result, _Pod as Pod};
use crate::util::{AlignTo, wstrn};
//----------------------------------------------------------------
/// Language and charset pair.
///
/// References [langID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#langID) and [charsetID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#charsetID).
#[derive(Copy, Clone, Debug, Pod, Eq, PartialEq)]
#[repr(C)]
pub struct Language {
pub lang_id: u16,
pub charset_id: u16,
}
impl Language {
/// Parse language hex strings.
pub fn parse(lang: &[u16]) -> std::result::Result<Language, &[u16]> {
if lang.len() != 8 {
return Err(lang);
}
fn digit(word: u16) -> u16 {
let num = word.wrapping_sub('0' as u16);
let upper = word.wrapping_sub('A' as u16).wrapping_add(10);
let lower = word.wrapping_sub('a' as u16).wrapping_add(10);
if word >= 'a' as u16 { lower }
else if word >= 'A' as u16 { upper }
else { num }
}
let mut digits = [0u16; 8];
for i in 0..8 {
digits[i] = digit(lang[i]);
}
let lang_id = (digits[0] << 12) | (digits[1] << 8) | (digits[2] << 4) | digits[3];
let charset_id = (digits[4] << 12) | (digits[5] << 8) | (digits[6] << 4) | digits[7];
Ok(Language { lang_id, charset_id })
}
}
impl fmt::Display for Language {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:04X}{:04X}", self.lang_id, self.charset_id)
}
}
//----------------------------------------------------------------
/// Version Information.
#[derive(Copy, Clone, Debug)]
pub struct VersionInfo<'a> {
bytes: &'a [u8],
}
impl<'a> VersionInfo<'a> {
pub fn try_from(bytes: &'a [u8]) -> Result<VersionInfo<'a>> {
// Alignment of 4 bytes is assumed everywhere,
// unsafe code in this module relies on this
if !bytes.as_ptr().aligned_to(4) {
return Err(Error::Misaligned);
}
Ok(VersionInfo { bytes })
}
/// Gets the fixed file information if available.
pub fn fixed(self) -> Option<&'a VS_FIXEDFILEINFO> {
let mut fixed = None;
self.visit(&mut fixed);
fixed
}
/// Queries a string value by name.
///
/// The returned string is UTF-16 encoded, convert to UTF-8 with `String::from_utf16` and friends.
pub fn query_value<S: AsRef<str>>(self, key: &S) -> Option<&'a [u16]> {
let mut this = QueryValue {
key: key.as_ref(),
value: None,
};
self.visit(&mut this);
this.value
}
/// Iterates over all the strings.
///
/// The closure's arguments are the lang, name and value for each string pair in the version information.
pub fn for_each_string<F: FnMut(&'a [u16], &'a [u16], &'a [u16])>(self, mut f: F) {
self.visit(&mut ForEachString(&mut f));
}
/// Gets the strings in a hash map.
pub fn to_hash_map(self) -> HashMap<String, String> {
let mut hash_map = HashMap::new();
self.visit(&mut hash_map);
hash_map
}
/// Parse the version information.
///
/// Because of the super convoluted format, the visitor pattern is used.
/// Implement the [`Visit` trait](trait.Visit.html) to get the desired information.
///
/// To keep the API simple all errors are ignored, any invalid or corrupted data is skipped.
pub fn visit(self, visit: &mut dyn Visit<'a>) {
let words = unsafe { slice::from_raw_parts(self.bytes.as_ptr() as *const u16, self.bytes.len() / 2) };
for version_info_r in Parser::new_bytes(words) {
if let Ok(version_info) = version_info_r {
const VS_FIXEDFILEINFO_SIZEOF: usize = mem::size_of::<VS_FIXEDFILEINFO>();
let fixed = match mem::size_of_val(version_info.value) {
0 => None,
VS_FIXEDFILEINFO_SIZEOF => {
let value = unsafe { &*(version_info.value.as_ptr() as *const VS_FIXEDFILEINFO) };
Some(value)
},
_ => None,//return Err(Error::Invalid),
};
if !visit.version_info(version_info.key, fixed) {
continue;
}
// MS docs: This member is always equal to zero.
for file_info_r in Parser::new_zero(version_info.children) {
if let Ok(file_info) = file_info_r {
if !visit.file_info(file_info.key) {
continue;
}
// MS docs: L"StringFileInfo"
if file_info.key == &self::strings::StringFileInfo {
// MS docs: This member is always equal to zero.
for string_table_r in Parser::new_zero(file_info.children) {
if let Ok(string_table) = string_table_r {
if !visit.string_table(string_table.key) {
continue;
}
for string_r in Parser::new_words(string_table.children) {
if let Ok(string) = string_r {
// Strip the nul terminator...
let value = if string.value.last() != Some(&0) { string.value }
else { &string.value[..string.value.len() - 1] };
visit.string(string_table.key, string.key, value);
}
}
}
}
}
// MS docs: L"VarFileInfo"
else if file_info.key == &self::strings::VarFileInfo {
for var_r in Parser::new_bytes(file_info.children) {
if let Ok(var) = var_r {
visit.var(var.key, var.value);
}
}
}
}
}
}
}
}
}
//----------------------------------------------------------------
/// Visitor pattern to view the version information details.
#[allow(unused_variables)]
pub trait Visit<'a> {
fn version_info(&mut self, key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool { true }
fn file_info(&mut self, key: &'a [u16]) -> bool { true }
fn string_table(&mut self, lang: &'a [u16]) -> bool { true }
fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {}
fn var(&mut self, key: &'a [u16], pairs: &'a [u16]) {}
}
impl<'a> Visit<'a> for HashMap<String, String> {
fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {
self.insert(
String::from_utf16_lossy(key),
String::from_utf16_lossy(value),
);
}
}
impl<'a> Visit<'a> for Option<&'a VS_FIXEDFILEINFO> {
fn version_info(&mut self, _key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool {
*self = fixed;
false
}
}
struct ForEachString<F>(F);
impl<'a, F: FnMut(&'a [u16], &'a [u16], &'a [u16])> Visit<'a> for ForEachString<F> {
fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {
(self.0)(lang, key, value);
}
}
struct QueryValue<'a, 's> {
key: &'s str,
value: Option<&'a [u16]>,
}
impl<'a, 's> Visit<'a> for QueryValue<'a, 's> {
fn | (&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {
if Iterator::eq(self.key.chars().map(Ok), char::decode_utf16(key.iter().cloned())) {
self.value = Some(value);
}
}
}
//----------------------------------------------------------------
/*
"version_info": {
"fixed": { .. },
"strings": { .. },
},
*/
#[cfg(feature = "serde")]
mod serde {
use crate::util::serde_helper::*;
use super::{VersionInfo};
impl<'a> Serialize for VersionInfo<'a> {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut state = serializer.serialize_struct("VersionInfo", 2)?;
state.serialize_field("fixed", &self.fixed())?;
state.serialize_field("strings", &self.to_hash_map())?;
state.end()
}
}
}
//----------------------------------------------------------------
mod strings {
#![allow(non_upper_case_globals)]
// static VS_VERSION_INFO: [u16; 15] = [86u16, 83, 95, 86, 69, 82, 83, 73, 79, 78, 95, 73, 78, 70, 79];
pub(crate) static StringFileInfo: [u16; 14] = [83u16, 116, 114, 105, 110, 103, 70, 105, 108, 101, 73, 110, 102, 111];
pub(crate) static VarFileInfo: [u16; 11] = [86u16, 97, 114, 70, 105, 108, 101, 73, 110, 102, 111];
// static Translation: [u16; 11] = [84u16, 114, 97, 110, 115, 108, 97, 116, 105, 111, 110];
// static Comments: [u16; 8] = [67u16, 111, 109, 109, 101, 110, 116, 115];
// static CompanyName: [u16; 11] = [67u16, 111, 109, 112, 97, 110, 121, 78, 97, 109, 101];
// static FileDescription: [u16; 15] = [70u16, 105, 108, 101, 68, 101, 115, 99, 114, 105, 112, 116, 105, 111, 110];
// static FileVersion: [u16; 11] = [70u16, 105, 108, 101, 86, 101, 114, 115, 105, 111, 110];
// static InternalName: [u16; 12] = [73u16, 110, 116, 101, 114, 110, 97, 108, 78, 97, 109, 101];
// static LegalCopyright: [u16; 14] = [76u16, 101, 103, 97, 108, 67, 111, 112, 121, 114, 105, 103, 104, 116];
// static LegalTrademarks: [u16; 15] = [76u16, 101, 103, 97, 108, 84, 114, 97, 100, 101, 109, 97, 114, 107, 115];
// static OriginalFilename: [u16; 16] = [79u16, 114, 105, 103, 105, 110, 97, 108, 70, 105, 108, 101, 110, 97, 109, 101];
// static PrivateBuild: [u16; 12] = [80u16, 114, 105, 118, 97, 116, 101, 66, 117, 105, 108, 100];
// static ProductName: [u16; 11] = [80u16, 114, 111, 100, 117, 99, 116, 78, 97, 109, 101];
// static ProductVersion: [u16; 14] = [80u16, 114, 111, 100, 117, 99, 116, 86, 101, 114, 115, 105, 111, 110];
// static SpecialBuild: [u16; 12] = [83u16, 112, 101, 99, 105, 97, 108, 66, 117, 105, 108, 100];
}
//----------------------------------------------------------------
#[cfg(test)]
pub(crate) fn test(version_info: VersionInfo<'_>) {
let _fixed = version_info.fixed();
let _hash_map = version_info.to_hash_map();
}
//----------------------------------------------------------------
/// Fixed file info constants.
pub mod image {
pub const VS_FF_DEBUG: u32 = 0x01;
pub const VS_FF_PRERELEASE: u32 = 0x02;
pub const VS_FF_PATCHED: u32 = 0x04;
pub const VS_FF_PRIVATEBUILD: u32 = 0x08;
pub const VS_FF_INFOINFERRED: u32 = 0x10;
pub const VS_FF_SPECIALBUILD: u32 = 0x20;
pub const VOS_UNKNOWN: u32 = 0x00000000;
pub const VOS_DOS: u32 = 0x00010000;
pub const VOS_OS216: u32 = 0x00020000;
pub const VOS_OS232: u32 = 0x00030000;
pub const VOS_NT: u32 = 0x00040000;
pub const VOS__WINDOWS16: u32 = 0x00000001;
pub const VOS__PM16: u32 = 0x00000002;
pub const VOS__PM32: u32 = 0x00000003;
pub const VOS__WINDOWS32: u32 = 0x00000004;
pub const VFT_UNKNOWN: u32 = 0x00000000;
pub const VFT_APP: u32 = 0x00000001;
pub const VFT_DLL: u32 = 0x00000002;
pub const VFT_DRV: u32 = 0x00000003;
pub const VFT_FONT: u32 = 0x00000004;
pub const VFT_VXD: u32 = 0x00000005;
pub const VFT_STATIC_LIB: u32 = 0x00000007;
pub const VFT2_UNKNOWN: u32 = 0x00000000;
pub const VFT2_DRV_PRINTER: u32 = 0x00000001;
pub const VFT2_DRV_KEYBOARD: u32 = 0x00000002;
pub const VFT2_DRV_LANGUAGE: u32 = 0x00000003;
pub const VFT2_DRV_DISPLAY: u32 = 0x00000004;
pub const VFT2_DRV_MOUSE: u32 = 0x00000005;
pub const VFT2_DRV_NETWORK: u32 = 0x00000006;
pub const VFT2_DRV_SYSTEM: u32 = 0x00000007;
pub const VFT2_DRV_INSTALLABLE: u32 = 0x00000008;
pub const VFT2_DRV_SOUND: u32 = 0x00000009;
pub const VFT2_DRV_COMM: u32 = 0x0000000A;
pub const VFT2_DRV_VERSIONED_PRINTER: u32 = 0x0000000C;
pub const VFT2_FONT_RASTER: u32 = 0x00000001;
pub const VFT2_FONT_VECTOR: u32 = 0x00000002;
pub const VFT2_FONT_TRUETYPE: u32 = 0x00000003;
}
//----------------------------------------------------------------
// This is an absolutely god awful format...
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct TLV<'a> {
pub key: &'a [u16],
pub value: &'a [u16], // DWORD aligned
pub children: &'a [u16], // DWORD aligned
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum ValueLengthType { Zero, Bytes, Words }
#[derive(Clone)]
struct Parser<'a> {
words: &'a [u16],
vlt: ValueLengthType,
}
impl<'a> Iterator for Parser<'a> {
type Item = Result<TLV<'a>>;
fn next(&mut self) -> Option<Result<TLV<'a>>> {
if self.words.len() == 0 {
return None;
}
let result = parse_tlv(self);
// If the parser errors, ensure the Iterator stops
if result.is_err() {
self.words = &self.words[self.words.len()..];
}
Some(result)
}
}
impl<'a> Parser<'a> {
pub(crate) fn new_zero(words: &'a [u16]) -> Parser<'a> {
Parser { words, vlt: ValueLengthType::Zero }
}
pub(crate) fn new_bytes(words: &'a [u16]) -> Parser<'a> {
Parser { words, vlt: ValueLengthType::Bytes }
}
pub(crate) fn new_words(words: &'a [u16]) -> Parser<'a> {
Parser { words, vlt: ValueLengthType::Words }
}
}
fn parse_tlv<'a>(state: &mut Parser<'a>) -> Result<TLV<'a>> {
let mut words = state.words;
// Parse the first three words from the TLV structure:
// wLength, wValueLength and wType (plus at least zero terminator of szKey)
if words.len() < 4 {
return Err(Error::Invalid);
}
// This is tricky, the struct contains a fixed and variable length parts
// However the length field includes the size of the fixed part
// Further complicating things, if the variable length part is absent the total length is set to zero (?!)
let length = cmp::max(4, words[0] as usize / 2).align_to(2);
// Oh god why, interpret the value_length
let value_length = match state.vlt {
ValueLengthType::Zero if words[1] == 0 => 0,
ValueLengthType::Zero => return Err(Error::Invalid),
ValueLengthType::Bytes => words[1] as usize / 2,
ValueLengthType::Words => words[1] as usize,
};
// let wType = words[2];
// Split the input where this structure ends and the next sibling begins
if length > words.len() {
return Err(Error::Invalid);
}
state.words = &words[length..];
words = &words[..length];
// Parse the nul terminated szKey
let key = wstrn(&words[3..]);
if words[3..].len() == key.len() {
return Err(Error::Invalid);
}
// Padding for the Value
words = &words[key.len().align_to(2) + 4..];
// Split the remaining words between the Value and Children
if value_length > words.len() {
return Err(Error::Invalid);
}
let value = &words[..value_length];
let children = &words[value.len().align_to(2)..];
Ok(TLV { key, value, children })
}
#[test]
fn test_parse_tlv_oob()
{
let mut parser;
// TLV header too short
parser = Parser::new_zero(&[0, 0]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
// TLV length field larger than the data
parser = Parser::new_zero(&[12, 0, 0, 0]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
// TLV key not nul terminated
parser = Parser::new_zero(&[16, 0, 1, 20, 20, 20, 20, 20]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
// TLV value field larger than the data
parser = Parser::new_zero(&[8, 10, 0, 0, 0, 0]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
}
| string | identifier_name |
version_info.rs | /*!
Version Information.
See [Microsoft's documentation](https://docs.microsoft.com/en-us/windows/desktop/menurc/version-information) for more information.
*/
use std::{char, cmp, fmt, mem, slice};
use std::collections::HashMap;
use crate::image::VS_FIXEDFILEINFO;
use crate::{Error, Result, _Pod as Pod};
use crate::util::{AlignTo, wstrn};
//----------------------------------------------------------------
/// Language and charset pair.
///
/// References [langID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#langID) and [charsetID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#charsetID).
#[derive(Copy, Clone, Debug, Pod, Eq, PartialEq)]
#[repr(C)]
pub struct Language {
pub lang_id: u16,
pub charset_id: u16,
}
impl Language {
/// Parse language hex strings.
pub fn parse(lang: &[u16]) -> std::result::Result<Language, &[u16]> {
if lang.len() != 8 {
return Err(lang);
}
fn digit(word: u16) -> u16 {
let num = word.wrapping_sub('0' as u16);
let upper = word.wrapping_sub('A' as u16).wrapping_add(10);
let lower = word.wrapping_sub('a' as u16).wrapping_add(10);
if word >= 'a' as u16 { lower }
else if word >= 'A' as u16 { upper }
else { num }
}
let mut digits = [0u16; 8];
for i in 0..8 {
digits[i] = digit(lang[i]);
}
let lang_id = (digits[0] << 12) | (digits[1] << 8) | (digits[2] << 4) | digits[3];
let charset_id = (digits[4] << 12) | (digits[5] << 8) | (digits[6] << 4) | digits[7];
Ok(Language { lang_id, charset_id })
}
}
impl fmt::Display for Language {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:04X}{:04X}", self.lang_id, self.charset_id)
}
}
//----------------------------------------------------------------
/// Version Information.
#[derive(Copy, Clone, Debug)]
pub struct VersionInfo<'a> {
bytes: &'a [u8],
}
impl<'a> VersionInfo<'a> {
pub fn try_from(bytes: &'a [u8]) -> Result<VersionInfo<'a>> {
// Alignment of 4 bytes is assumed everywhere,
// unsafe code in this module relies on this
if !bytes.as_ptr().aligned_to(4) {
return Err(Error::Misaligned);
}
Ok(VersionInfo { bytes })
}
/// Gets the fixed file information if available.
pub fn fixed(self) -> Option<&'a VS_FIXEDFILEINFO> {
let mut fixed = None;
self.visit(&mut fixed);
fixed
}
/// Queries a string value by name.
///
/// The returned string is UTF-16 encoded, convert to UTF-8 with `String::from_utf16` and friends.
pub fn query_value<S: AsRef<str>>(self, key: &S) -> Option<&'a [u16]> {
let mut this = QueryValue {
key: key.as_ref(),
value: None,
};
self.visit(&mut this);
this.value
}
/// Iterates over all the strings.
///
/// The closure's arguments are the lang, name and value for each string pair in the version information.
pub fn for_each_string<F: FnMut(&'a [u16], &'a [u16], &'a [u16])>(self, mut f: F) {
self.visit(&mut ForEachString(&mut f));
}
/// Gets the strings in a hash map.
pub fn to_hash_map(self) -> HashMap<String, String> {
let mut hash_map = HashMap::new();
self.visit(&mut hash_map);
hash_map
}
/// Parse the version information.
///
/// Because of the super convoluted format, the visitor pattern is used.
/// Implement the [`Visit` trait](trait.Visit.html) to get the desired information.
///
/// To keep the API simple all errors are ignored, any invalid or corrupted data is skipped.
pub fn visit(self, visit: &mut dyn Visit<'a>) {
let words = unsafe { slice::from_raw_parts(self.bytes.as_ptr() as *const u16, self.bytes.len() / 2) };
for version_info_r in Parser::new_bytes(words) {
if let Ok(version_info) = version_info_r {
const VS_FIXEDFILEINFO_SIZEOF: usize = mem::size_of::<VS_FIXEDFILEINFO>();
let fixed = match mem::size_of_val(version_info.value) {
0 => None,
VS_FIXEDFILEINFO_SIZEOF => {
let value = unsafe { &*(version_info.value.as_ptr() as *const VS_FIXEDFILEINFO) };
Some(value)
},
_ => None,//return Err(Error::Invalid),
};
if !visit.version_info(version_info.key, fixed) {
continue;
}
// MS docs: This member is always equal to zero.
for file_info_r in Parser::new_zero(version_info.children) {
if let Ok(file_info) = file_info_r {
if !visit.file_info(file_info.key) {
continue;
}
// MS docs: L"StringFileInfo"
if file_info.key == &self::strings::StringFileInfo {
// MS docs: This member is always equal to zero.
for string_table_r in Parser::new_zero(file_info.children) {
if let Ok(string_table) = string_table_r {
if !visit.string_table(string_table.key) {
continue;
}
for string_r in Parser::new_words(string_table.children) {
if let Ok(string) = string_r {
// Strip the nul terminator...
let value = if string.value.last() != Some(&0) { string.value }
else { &string.value[..string.value.len() - 1] };
visit.string(string_table.key, string.key, value);
}
}
}
}
}
// MS docs: L"VarFileInfo"
else if file_info.key == &self::strings::VarFileInfo {
for var_r in Parser::new_bytes(file_info.children) {
if let Ok(var) = var_r {
visit.var(var.key, var.value);
}
}
}
}
}
}
}
}
}
//----------------------------------------------------------------
/// Visitor pattern to view the version information details.
#[allow(unused_variables)]
pub trait Visit<'a> {
fn version_info(&mut self, key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool { true }
fn file_info(&mut self, key: &'a [u16]) -> bool { true }
fn string_table(&mut self, lang: &'a [u16]) -> bool { true }
fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {}
fn var(&mut self, key: &'a [u16], pairs: &'a [u16]) {}
}
impl<'a> Visit<'a> for HashMap<String, String> {
fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {
self.insert(
String::from_utf16_lossy(key),
String::from_utf16_lossy(value),
);
}
}
impl<'a> Visit<'a> for Option<&'a VS_FIXEDFILEINFO> {
fn version_info(&mut self, _key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool {
*self = fixed;
false
}
}
struct ForEachString<F>(F);
impl<'a, F: FnMut(&'a [u16], &'a [u16], &'a [u16])> Visit<'a> for ForEachString<F> {
fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {
(self.0)(lang, key, value);
}
}
struct QueryValue<'a, 's> {
key: &'s str,
value: Option<&'a [u16]>,
}
impl<'a, 's> Visit<'a> for QueryValue<'a, 's> {
fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {
if Iterator::eq(self.key.chars().map(Ok), char::decode_utf16(key.iter().cloned())) {
self.value = Some(value);
}
}
}
//----------------------------------------------------------------
/*
"version_info": {
"fixed": { .. },
"strings": { .. },
},
*/
#[cfg(feature = "serde")]
mod serde {
use crate::util::serde_helper::*;
use super::{VersionInfo};
impl<'a> Serialize for VersionInfo<'a> {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut state = serializer.serialize_struct("VersionInfo", 2)?;
state.serialize_field("fixed", &self.fixed())?;
state.serialize_field("strings", &self.to_hash_map())?;
state.end()
}
}
}
//----------------------------------------------------------------
mod strings {
#![allow(non_upper_case_globals)]
// static VS_VERSION_INFO: [u16; 15] = [86u16, 83, 95, 86, 69, 82, 83, 73, 79, 78, 95, 73, 78, 70, 79];
pub(crate) static StringFileInfo: [u16; 14] = [83u16, 116, 114, 105, 110, 103, 70, 105, 108, 101, 73, 110, 102, 111];
pub(crate) static VarFileInfo: [u16; 11] = [86u16, 97, 114, 70, 105, 108, 101, 73, 110, 102, 111];
// static Translation: [u16; 11] = [84u16, 114, 97, 110, 115, 108, 97, 116, 105, 111, 110];
// static Comments: [u16; 8] = [67u16, 111, 109, 109, 101, 110, 116, 115];
// static CompanyName: [u16; 11] = [67u16, 111, 109, 112, 97, 110, 121, 78, 97, 109, 101];
// static FileDescription: [u16; 15] = [70u16, 105, 108, 101, 68, 101, 115, 99, 114, 105, 112, 116, 105, 111, 110];
// static FileVersion: [u16; 11] = [70u16, 105, 108, 101, 86, 101, 114, 115, 105, 111, 110];
// static InternalName: [u16; 12] = [73u16, 110, 116, 101, 114, 110, 97, 108, 78, 97, 109, 101];
// static LegalCopyright: [u16; 14] = [76u16, 101, 103, 97, 108, 67, 111, 112, 121, 114, 105, 103, 104, 116];
// static LegalTrademarks: [u16; 15] = [76u16, 101, 103, 97, 108, 84, 114, 97, 100, 101, 109, 97, 114, 107, 115];
// static OriginalFilename: [u16; 16] = [79u16, 114, 105, 103, 105, 110, 97, 108, 70, 105, 108, 101, 110, 97, 109, 101];
// static PrivateBuild: [u16; 12] = [80u16, 114, 105, 118, 97, 116, 101, 66, 117, 105, 108, 100];
// static ProductName: [u16; 11] = [80u16, 114, 111, 100, 117, 99, 116, 78, 97, 109, 101];
// static ProductVersion: [u16; 14] = [80u16, 114, 111, 100, 117, 99, 116, 86, 101, 114, 115, 105, 111, 110];
// static SpecialBuild: [u16; 12] = [83u16, 112, 101, 99, 105, 97, 108, 66, 117, 105, 108, 100];
}
//----------------------------------------------------------------
#[cfg(test)]
pub(crate) fn test(version_info: VersionInfo<'_>) {
let _fixed = version_info.fixed();
let _hash_map = version_info.to_hash_map();
}
//----------------------------------------------------------------
/// Fixed file info constants.
pub mod image {
pub const VS_FF_DEBUG: u32 = 0x01;
pub const VS_FF_PRERELEASE: u32 = 0x02;
pub const VS_FF_PATCHED: u32 = 0x04;
pub const VS_FF_PRIVATEBUILD: u32 = 0x08;
pub const VS_FF_INFOINFERRED: u32 = 0x10;
pub const VS_FF_SPECIALBUILD: u32 = 0x20;
pub const VOS_UNKNOWN: u32 = 0x00000000;
pub const VOS_DOS: u32 = 0x00010000;
pub const VOS_OS216: u32 = 0x00020000;
pub const VOS_OS232: u32 = 0x00030000;
pub const VOS_NT: u32 = 0x00040000;
pub const VOS__WINDOWS16: u32 = 0x00000001;
pub const VOS__PM16: u32 = 0x00000002;
pub const VOS__PM32: u32 = 0x00000003;
pub const VOS__WINDOWS32: u32 = 0x00000004;
pub const VFT_UNKNOWN: u32 = 0x00000000;
pub const VFT_APP: u32 = 0x00000001;
pub const VFT_DLL: u32 = 0x00000002;
pub const VFT_DRV: u32 = 0x00000003;
pub const VFT_FONT: u32 = 0x00000004;
pub const VFT_VXD: u32 = 0x00000005;
pub const VFT_STATIC_LIB: u32 = 0x00000007;
pub const VFT2_UNKNOWN: u32 = 0x00000000;
pub const VFT2_DRV_PRINTER: u32 = 0x00000001;
pub const VFT2_DRV_KEYBOARD: u32 = 0x00000002;
pub const VFT2_DRV_LANGUAGE: u32 = 0x00000003;
pub const VFT2_DRV_DISPLAY: u32 = 0x00000004;
pub const VFT2_DRV_MOUSE: u32 = 0x00000005;
pub const VFT2_DRV_NETWORK: u32 = 0x00000006;
pub const VFT2_DRV_SYSTEM: u32 = 0x00000007;
pub const VFT2_DRV_INSTALLABLE: u32 = 0x00000008;
pub const VFT2_DRV_SOUND: u32 = 0x00000009;
pub const VFT2_DRV_COMM: u32 = 0x0000000A;
pub const VFT2_DRV_VERSIONED_PRINTER: u32 = 0x0000000C;
pub const VFT2_FONT_RASTER: u32 = 0x00000001;
pub const VFT2_FONT_VECTOR: u32 = 0x00000002;
pub const VFT2_FONT_TRUETYPE: u32 = 0x00000003;
}
//----------------------------------------------------------------
// This is an absolutely god awful format...
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct TLV<'a> {
pub key: &'a [u16],
pub value: &'a [u16], // DWORD aligned
pub children: &'a [u16], // DWORD aligned
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum ValueLengthType { Zero, Bytes, Words }
#[derive(Clone)]
struct Parser<'a> {
words: &'a [u16],
vlt: ValueLengthType,
}
impl<'a> Iterator for Parser<'a> {
type Item = Result<TLV<'a>>;
fn next(&mut self) -> Option<Result<TLV<'a>>> {
if self.words.len() == 0 {
return None;
}
let result = parse_tlv(self);
// If the parser errors, ensure the Iterator stops
if result.is_err() {
self.words = &self.words[self.words.len()..];
}
Some(result)
}
}
impl<'a> Parser<'a> {
pub(crate) fn new_zero(words: &'a [u16]) -> Parser<'a> {
Parser { words, vlt: ValueLengthType::Zero }
}
pub(crate) fn new_bytes(words: &'a [u16]) -> Parser<'a> {
Parser { words, vlt: ValueLengthType::Bytes }
}
pub(crate) fn new_words(words: &'a [u16]) -> Parser<'a> {
Parser { words, vlt: ValueLengthType::Words }
}
}
fn parse_tlv<'a>(state: &mut Parser<'a>) -> Result<TLV<'a>> {
let mut words = state.words;
// Parse the first three words from the TLV structure:
// wLength, wValueLength and wType (plus at least zero terminator of szKey)
if words.len() < 4 {
return Err(Error::Invalid);
}
// This is tricky, the struct contains a fixed and variable length parts
// However the length field includes the size of the fixed part
// Further complicating things, if the variable length part is absent the total length is set to zero (?!)
let length = cmp::max(4, words[0] as usize / 2).align_to(2);
// Oh god why, interpret the value_length
let value_length = match state.vlt {
ValueLengthType::Zero if words[1] == 0 => 0,
ValueLengthType::Zero => return Err(Error::Invalid),
ValueLengthType::Bytes => words[1] as usize / 2,
ValueLengthType::Words => words[1] as usize,
};
// let wType = words[2];
// Split the input where this structure ends and the next sibling begins
if length > words.len() {
return Err(Error::Invalid);
}
state.words = &words[length..];
words = &words[..length];
// Parse the nul terminated szKey
let key = wstrn(&words[3..]);
if words[3..].len() == key.len() {
return Err(Error::Invalid);
}
// Padding for the Value
words = &words[key.len().align_to(2) + 4..];
// Split the remaining words between the Value and Children
if value_length > words.len() {
return Err(Error::Invalid);
}
let value = &words[..value_length];
let children = &words[value.len().align_to(2)..];
Ok(TLV { key, value, children })
}
#[test]
fn test_parse_tlv_oob()
{
let mut parser;
// TLV header too short | // TLV length field larger than the data
parser = Parser::new_zero(&[12, 0, 0, 0]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
// TLV key not nul terminated
parser = Parser::new_zero(&[16, 0, 1, 20, 20, 20, 20, 20]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
// TLV value field larger than the data
parser = Parser::new_zero(&[8, 10, 0, 0, 0, 0]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
} | parser = Parser::new_zero(&[0, 0]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
| random_line_split |
version_info.rs | /*!
Version Information.
See [Microsoft's documentation](https://docs.microsoft.com/en-us/windows/desktop/menurc/version-information) for more information.
*/
use std::{char, cmp, fmt, mem, slice};
use std::collections::HashMap;
use crate::image::VS_FIXEDFILEINFO;
use crate::{Error, Result, _Pod as Pod};
use crate::util::{AlignTo, wstrn};
//----------------------------------------------------------------
/// Language and charset pair.
///
/// References [langID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#langID) and [charsetID](https://docs.microsoft.com/en-us/windows/desktop/menurc/versioninfo-resource#charsetID).
#[derive(Copy, Clone, Debug, Pod, Eq, PartialEq)]
#[repr(C)]
pub struct Language {
pub lang_id: u16,
pub charset_id: u16,
}
impl Language {
/// Parse language hex strings.
pub fn parse(lang: &[u16]) -> std::result::Result<Language, &[u16]> {
if lang.len() != 8 {
return Err(lang);
}
fn digit(word: u16) -> u16 {
let num = word.wrapping_sub('0' as u16);
let upper = word.wrapping_sub('A' as u16).wrapping_add(10);
let lower = word.wrapping_sub('a' as u16).wrapping_add(10);
if word >= 'a' as u16 { lower }
else if word >= 'A' as u16 { upper }
else |
}
let mut digits = [0u16; 8];
for i in 0..8 {
digits[i] = digit(lang[i]);
}
let lang_id = (digits[0] << 12) | (digits[1] << 8) | (digits[2] << 4) | digits[3];
let charset_id = (digits[4] << 12) | (digits[5] << 8) | (digits[6] << 4) | digits[7];
Ok(Language { lang_id, charset_id })
}
}
impl fmt::Display for Language {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:04X}{:04X}", self.lang_id, self.charset_id)
}
}
//----------------------------------------------------------------
/// Version Information.
#[derive(Copy, Clone, Debug)]
pub struct VersionInfo<'a> {
bytes: &'a [u8],
}
impl<'a> VersionInfo<'a> {
pub fn try_from(bytes: &'a [u8]) -> Result<VersionInfo<'a>> {
// Alignment of 4 bytes is assumed everywhere,
// unsafe code in this module relies on this
if !bytes.as_ptr().aligned_to(4) {
return Err(Error::Misaligned);
}
Ok(VersionInfo { bytes })
}
/// Gets the fixed file information if available.
pub fn fixed(self) -> Option<&'a VS_FIXEDFILEINFO> {
let mut fixed = None;
self.visit(&mut fixed);
fixed
}
/// Queries a string value by name.
///
/// The returned string is UTF-16 encoded, convert to UTF-8 with `String::from_utf16` and friends.
pub fn query_value<S: AsRef<str>>(self, key: &S) -> Option<&'a [u16]> {
let mut this = QueryValue {
key: key.as_ref(),
value: None,
};
self.visit(&mut this);
this.value
}
/// Iterates over all the strings.
///
/// The closure's arguments are the lang, name and value for each string pair in the version information.
pub fn for_each_string<F: FnMut(&'a [u16], &'a [u16], &'a [u16])>(self, mut f: F) {
self.visit(&mut ForEachString(&mut f));
}
/// Gets the strings in a hash map.
pub fn to_hash_map(self) -> HashMap<String, String> {
let mut hash_map = HashMap::new();
self.visit(&mut hash_map);
hash_map
}
/// Parse the version information.
///
/// Because of the super convoluted format, the visitor pattern is used.
/// Implement the [`Visit` trait](trait.Visit.html) to get the desired information.
///
/// To keep the API simple all errors are ignored, any invalid or corrupted data is skipped.
pub fn visit(self, visit: &mut dyn Visit<'a>) {
let words = unsafe { slice::from_raw_parts(self.bytes.as_ptr() as *const u16, self.bytes.len() / 2) };
for version_info_r in Parser::new_bytes(words) {
if let Ok(version_info) = version_info_r {
const VS_FIXEDFILEINFO_SIZEOF: usize = mem::size_of::<VS_FIXEDFILEINFO>();
let fixed = match mem::size_of_val(version_info.value) {
0 => None,
VS_FIXEDFILEINFO_SIZEOF => {
let value = unsafe { &*(version_info.value.as_ptr() as *const VS_FIXEDFILEINFO) };
Some(value)
},
_ => None,//return Err(Error::Invalid),
};
if !visit.version_info(version_info.key, fixed) {
continue;
}
// MS docs: This member is always equal to zero.
for file_info_r in Parser::new_zero(version_info.children) {
if let Ok(file_info) = file_info_r {
if !visit.file_info(file_info.key) {
continue;
}
// MS docs: L"StringFileInfo"
if file_info.key == &self::strings::StringFileInfo {
// MS docs: This member is always equal to zero.
for string_table_r in Parser::new_zero(file_info.children) {
if let Ok(string_table) = string_table_r {
if !visit.string_table(string_table.key) {
continue;
}
for string_r in Parser::new_words(string_table.children) {
if let Ok(string) = string_r {
// Strip the nul terminator...
let value = if string.value.last() != Some(&0) { string.value }
else { &string.value[..string.value.len() - 1] };
visit.string(string_table.key, string.key, value);
}
}
}
}
}
// MS docs: L"VarFileInfo"
else if file_info.key == &self::strings::VarFileInfo {
for var_r in Parser::new_bytes(file_info.children) {
if let Ok(var) = var_r {
visit.var(var.key, var.value);
}
}
}
}
}
}
}
}
}
//----------------------------------------------------------------
/// Visitor pattern to view the version information details.
#[allow(unused_variables)]
pub trait Visit<'a> {
fn version_info(&mut self, key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool { true }
fn file_info(&mut self, key: &'a [u16]) -> bool { true }
fn string_table(&mut self, lang: &'a [u16]) -> bool { true }
fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {}
fn var(&mut self, key: &'a [u16], pairs: &'a [u16]) {}
}
impl<'a> Visit<'a> for HashMap<String, String> {
fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {
self.insert(
String::from_utf16_lossy(key),
String::from_utf16_lossy(value),
);
}
}
impl<'a> Visit<'a> for Option<&'a VS_FIXEDFILEINFO> {
fn version_info(&mut self, _key: &'a [u16], fixed: Option<&'a VS_FIXEDFILEINFO>) -> bool {
*self = fixed;
false
}
}
struct ForEachString<F>(F);
impl<'a, F: FnMut(&'a [u16], &'a [u16], &'a [u16])> Visit<'a> for ForEachString<F> {
fn string(&mut self, lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {
(self.0)(lang, key, value);
}
}
struct QueryValue<'a, 's> {
key: &'s str,
value: Option<&'a [u16]>,
}
impl<'a, 's> Visit<'a> for QueryValue<'a, 's> {
fn string(&mut self, _lang: &'a [u16], key: &'a [u16], value: &'a [u16]) {
if Iterator::eq(self.key.chars().map(Ok), char::decode_utf16(key.iter().cloned())) {
self.value = Some(value);
}
}
}
//----------------------------------------------------------------
/*
"version_info": {
"fixed": { .. },
"strings": { .. },
},
*/
#[cfg(feature = "serde")]
mod serde {
use crate::util::serde_helper::*;
use super::{VersionInfo};
impl<'a> Serialize for VersionInfo<'a> {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut state = serializer.serialize_struct("VersionInfo", 2)?;
state.serialize_field("fixed", &self.fixed())?;
state.serialize_field("strings", &self.to_hash_map())?;
state.end()
}
}
}
//----------------------------------------------------------------
mod strings {
#![allow(non_upper_case_globals)]
// static VS_VERSION_INFO: [u16; 15] = [86u16, 83, 95, 86, 69, 82, 83, 73, 79, 78, 95, 73, 78, 70, 79];
pub(crate) static StringFileInfo: [u16; 14] = [83u16, 116, 114, 105, 110, 103, 70, 105, 108, 101, 73, 110, 102, 111];
pub(crate) static VarFileInfo: [u16; 11] = [86u16, 97, 114, 70, 105, 108, 101, 73, 110, 102, 111];
// static Translation: [u16; 11] = [84u16, 114, 97, 110, 115, 108, 97, 116, 105, 111, 110];
// static Comments: [u16; 8] = [67u16, 111, 109, 109, 101, 110, 116, 115];
// static CompanyName: [u16; 11] = [67u16, 111, 109, 112, 97, 110, 121, 78, 97, 109, 101];
// static FileDescription: [u16; 15] = [70u16, 105, 108, 101, 68, 101, 115, 99, 114, 105, 112, 116, 105, 111, 110];
// static FileVersion: [u16; 11] = [70u16, 105, 108, 101, 86, 101, 114, 115, 105, 111, 110];
// static InternalName: [u16; 12] = [73u16, 110, 116, 101, 114, 110, 97, 108, 78, 97, 109, 101];
// static LegalCopyright: [u16; 14] = [76u16, 101, 103, 97, 108, 67, 111, 112, 121, 114, 105, 103, 104, 116];
// static LegalTrademarks: [u16; 15] = [76u16, 101, 103, 97, 108, 84, 114, 97, 100, 101, 109, 97, 114, 107, 115];
// static OriginalFilename: [u16; 16] = [79u16, 114, 105, 103, 105, 110, 97, 108, 70, 105, 108, 101, 110, 97, 109, 101];
// static PrivateBuild: [u16; 12] = [80u16, 114, 105, 118, 97, 116, 101, 66, 117, 105, 108, 100];
// static ProductName: [u16; 11] = [80u16, 114, 111, 100, 117, 99, 116, 78, 97, 109, 101];
// static ProductVersion: [u16; 14] = [80u16, 114, 111, 100, 117, 99, 116, 86, 101, 114, 115, 105, 111, 110];
// static SpecialBuild: [u16; 12] = [83u16, 112, 101, 99, 105, 97, 108, 66, 117, 105, 108, 100];
}
//----------------------------------------------------------------
#[cfg(test)]
pub(crate) fn test(version_info: VersionInfo<'_>) {
let _fixed = version_info.fixed();
let _hash_map = version_info.to_hash_map();
}
//----------------------------------------------------------------
/// Fixed file info constants.
pub mod image {
pub const VS_FF_DEBUG: u32 = 0x01;
pub const VS_FF_PRERELEASE: u32 = 0x02;
pub const VS_FF_PATCHED: u32 = 0x04;
pub const VS_FF_PRIVATEBUILD: u32 = 0x08;
pub const VS_FF_INFOINFERRED: u32 = 0x10;
pub const VS_FF_SPECIALBUILD: u32 = 0x20;
pub const VOS_UNKNOWN: u32 = 0x00000000;
pub const VOS_DOS: u32 = 0x00010000;
pub const VOS_OS216: u32 = 0x00020000;
pub const VOS_OS232: u32 = 0x00030000;
pub const VOS_NT: u32 = 0x00040000;
pub const VOS__WINDOWS16: u32 = 0x00000001;
pub const VOS__PM16: u32 = 0x00000002;
pub const VOS__PM32: u32 = 0x00000003;
pub const VOS__WINDOWS32: u32 = 0x00000004;
pub const VFT_UNKNOWN: u32 = 0x00000000;
pub const VFT_APP: u32 = 0x00000001;
pub const VFT_DLL: u32 = 0x00000002;
pub const VFT_DRV: u32 = 0x00000003;
pub const VFT_FONT: u32 = 0x00000004;
pub const VFT_VXD: u32 = 0x00000005;
pub const VFT_STATIC_LIB: u32 = 0x00000007;
pub const VFT2_UNKNOWN: u32 = 0x00000000;
pub const VFT2_DRV_PRINTER: u32 = 0x00000001;
pub const VFT2_DRV_KEYBOARD: u32 = 0x00000002;
pub const VFT2_DRV_LANGUAGE: u32 = 0x00000003;
pub const VFT2_DRV_DISPLAY: u32 = 0x00000004;
pub const VFT2_DRV_MOUSE: u32 = 0x00000005;
pub const VFT2_DRV_NETWORK: u32 = 0x00000006;
pub const VFT2_DRV_SYSTEM: u32 = 0x00000007;
pub const VFT2_DRV_INSTALLABLE: u32 = 0x00000008;
pub const VFT2_DRV_SOUND: u32 = 0x00000009;
pub const VFT2_DRV_COMM: u32 = 0x0000000A;
pub const VFT2_DRV_VERSIONED_PRINTER: u32 = 0x0000000C;
pub const VFT2_FONT_RASTER: u32 = 0x00000001;
pub const VFT2_FONT_VECTOR: u32 = 0x00000002;
pub const VFT2_FONT_TRUETYPE: u32 = 0x00000003;
}
//----------------------------------------------------------------
// This is an absolutely god awful format...
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
struct TLV<'a> {
pub key: &'a [u16],
pub value: &'a [u16], // DWORD aligned
pub children: &'a [u16], // DWORD aligned
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum ValueLengthType { Zero, Bytes, Words }
#[derive(Clone)]
struct Parser<'a> {
words: &'a [u16],
vlt: ValueLengthType,
}
impl<'a> Iterator for Parser<'a> {
type Item = Result<TLV<'a>>;
fn next(&mut self) -> Option<Result<TLV<'a>>> {
if self.words.len() == 0 {
return None;
}
let result = parse_tlv(self);
// If the parser errors, ensure the Iterator stops
if result.is_err() {
self.words = &self.words[self.words.len()..];
}
Some(result)
}
}
impl<'a> Parser<'a> {
pub(crate) fn new_zero(words: &'a [u16]) -> Parser<'a> {
Parser { words, vlt: ValueLengthType::Zero }
}
pub(crate) fn new_bytes(words: &'a [u16]) -> Parser<'a> {
Parser { words, vlt: ValueLengthType::Bytes }
}
pub(crate) fn new_words(words: &'a [u16]) -> Parser<'a> {
Parser { words, vlt: ValueLengthType::Words }
}
}
fn parse_tlv<'a>(state: &mut Parser<'a>) -> Result<TLV<'a>> {
let mut words = state.words;
// Parse the first three words from the TLV structure:
// wLength, wValueLength and wType (plus at least zero terminator of szKey)
if words.len() < 4 {
return Err(Error::Invalid);
}
// This is tricky, the struct contains a fixed and variable length parts
// However the length field includes the size of the fixed part
// Further complicating things, if the variable length part is absent the total length is set to zero (?!)
let length = cmp::max(4, words[0] as usize / 2).align_to(2);
// Oh god why, interpret the value_length
let value_length = match state.vlt {
ValueLengthType::Zero if words[1] == 0 => 0,
ValueLengthType::Zero => return Err(Error::Invalid),
ValueLengthType::Bytes => words[1] as usize / 2,
ValueLengthType::Words => words[1] as usize,
};
// let wType = words[2];
// Split the input where this structure ends and the next sibling begins
if length > words.len() {
return Err(Error::Invalid);
}
state.words = &words[length..];
words = &words[..length];
// Parse the nul terminated szKey
let key = wstrn(&words[3..]);
if words[3..].len() == key.len() {
return Err(Error::Invalid);
}
// Padding for the Value
words = &words[key.len().align_to(2) + 4..];
// Split the remaining words between the Value and Children
if value_length > words.len() {
return Err(Error::Invalid);
}
let value = &words[..value_length];
let children = &words[value.len().align_to(2)..];
Ok(TLV { key, value, children })
}
#[test]
fn test_parse_tlv_oob()
{
let mut parser;
// TLV header too short
parser = Parser::new_zero(&[0, 0]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
// TLV length field larger than the data
parser = Parser::new_zero(&[12, 0, 0, 0]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
// TLV key not nul terminated
parser = Parser::new_zero(&[16, 0, 1, 20, 20, 20, 20, 20]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
// TLV value field larger than the data
parser = Parser::new_zero(&[8, 10, 0, 0, 0, 0]);
assert_eq!(parser.next(), Some(Err(Error::Invalid)));
assert_eq!(parser.next(), None);
}
| { num } | conditional_block |
symbols.py | # Copyright (c) 2021, nla group, manchester
# All rights reserved.
# Redistribution and use in source and binary forms, with or without | # modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import collections
import pandas as pd
import numpy as np
from scipy.stats import norm
from sklearn.cluster import KMeans
def symbolsAssign(clusters):
""" automatically assign symbols to different clusters, start with '!'
from https://github.com/nla-group/fABBA.
Parameters
----------
clusters - list or pd.Series or array
the list of clusters.
----------
Return:
symbols(list of string), inverse_hash(dict): repectively for the
corresponding symbolic sequence and the hashmap for inverse transform.
"""
alphabet = ['A','a','B','b','C','c','D','d','E','e',
'F','f','G','g','H','h','I','i','J','j',
'K','k','L','l','M','m','N','n','O','o',
'P','p','Q','q','R','r','S','s','T','t',
'U','u','V','v','W','w','X','x','Y','y','Z','z']
clusters = pd.Series(clusters)
N = len(clusters.unique())
cluster_sort = [0] * N
counter = collections.Counter(clusters)
for ind, el in enumerate(counter.most_common()):
cluster_sort[ind] = el[0]
if N >= len(alphabet):
alphabet = [chr(i+33) for i in range(0, N)]
else:
alphabet = alphabet[:N]
hashm = dict(zip(cluster_sort + alphabet, alphabet + cluster_sort))
strings = [hashm[i] for i in clusters]
return strings, hashm
class SAX:
"""Modified from https://github.com/nla-group/TARZAN"""
def __init__(self, *, width = 2, n_paa_segments=None, k = 5, return_list=False, verbose=True):
if n_paa_segments is not None:
# if verbose == True:
# warnings.warn("Set width to ``len(ts) // n_paa_segments''")
self.n_paa_segments = n_paa_segments
self.width = None
else:
self.n_paa_segments = None
self.width = width
self.number_of_symbols = k
self.return_list = return_list
self.mu, self.std = 0, 1
def transform(self, time_series):
if self.width is None:
self.width = len(time_series) // self.n_paa_segments
self.mu = np.mean(time_series)
self.std = np.std(time_series)
if self.std == 0:
self.std = 1
time_series = (time_series - self.mu)/self.std
compressed_time_series = self.paa_mean(time_series)
symbolic_time_series = self._digitize(compressed_time_series)
return symbolic_time_series
def inverse_transform(self, symbolic_time_series):
compressed_time_series = self._reverse_digitize(symbolic_time_series)
time_series = self._reconstruct(compressed_time_series)
time_series = time_series*self.std + self.mu
return time_series
def paa_mean(self, ts):
if len(ts) % self.width != 0:
warnings.warn("Result truncates, width does not divide length")
return [np.mean(ts[i*self.width:np.min([len(ts), (i+1)*self.width])]) for i in range(int(np.floor(len(ts)/self.width)))]
def _digitize(self, ts):
symbolic_ts = self._gaussian_breakpoints(ts)
return symbolic_ts
def _gaussian_breakpoints(self, ts):
# Construct Breakpoints
breakpoints = np.hstack(
(norm.ppf([float(a) / self.number_of_symbols for a in range(1, self.number_of_symbols)], scale=1),
np.inf))
labels = []
for i in ts:
for j in range(len(breakpoints)):
if i < breakpoints[j]:
labels.append(j)
break
strings, self.hashm = symbolsAssign(labels)
if not self.return_list:
strings = "".join(strings)
return strings
def _reconstruct(self, reduced_ts):
return self._reverse_pca(reduced_ts)
def _reverse_pca(self, ts):
return np.kron(ts, np.ones([1,self.width])[0])
def _reverse_digitize(self, symbolic_ts):
return self._reverse_gaussian_breakpoints(symbolic_ts)
def _reverse_gaussian_breakpoints(self, symbols):
breakpoint_values = norm.ppf([float(a) / (2 * self.number_of_symbols) for a in range(1, 2 * self.number_of_symbols, 2)], scale=1)
ts = []
for s in symbols:
j = self.hashm[s]
ts.append(breakpoint_values[j])
return ts
# python implementation for aggregation
def aggregate(data, sorting="2-norm", tol=0.5): # , verbose=1
"""aggregate the data
Parameters
----------
data : numpy.ndarray
the input that is array-like of shape (n_samples,).
sorting : str
the sorting method for aggregation, default='2-norm', alternative option: '1-norm' and 'lexi'.
tol : float
the tolerance to control the aggregation. if the distance between the starting point
of a group and another data point is less than or equal to the tolerance,
the point is allocated to that group.
Returns
-------
labels (numpy.ndarray) :
the group categories of the data after aggregation
splist (list) :
the list of the starting points
nr_dist (int) :
number of pairwise distance calculations
"""
splist = list() # store the starting points
len_ind = data.shape[0]
if sorting == "2-norm":
sort_vals = np.linalg.norm(data, ord=2, axis=1)
ind = np.argsort(sort_vals)
elif sorting == "1-norm":
sort_vals = np.linalg.norm(data, ord=1, axis=1)
ind = np.argsort(sort_vals)
else:
ind = np.lexsort((data[:,1], data[:,0]), axis=0)
lab = 0
labels = [-1]*len_ind
nr_dist = 0
for i in range(len_ind): # tqdm(range(len_ind), disable=not verbose)
sp = ind[i] # starting point
if labels[sp] >= 0:
continue
else:
clustc = data[sp,:]
labels[sp] = lab
num_group = 1
for j in ind[i:]:
if labels[j] >= 0:
continue
dat = clustc - data[j,:]
dist = np.inner(dat, dat)
nr_dist += 1
if dist <= tol**2:
num_group += 1
labels[j] = lab
else: # apply early stopping
if sorting == "2-norm" or sorting == "1-norm":
if (sort_vals[j] - sort_vals[sp] > tol):
break
else:
if ((data[j,0] - data[sp,0] == tol) and (data[j,1] > data[sp,1])) or (data[j,0] - data[sp,0] > tol):
break
splist.append([sp, lab] + [num_group] + list(data[sp,:]) ) # respectively store starting point
# index, label, number of neighbor objects, center (starting point).
lab += 1
return np.array(labels), splist, nr_dist
def compress(ts, tol=0.5, max_len=np.inf):
"""
Approximate a time series using a continuous piecewise linear function.
Parameters
----------
ts - numpy ndarray
Time series as input of numpy array
Returns
-------
pieces - numpy array
Numpy ndarray with three columns, each row contains length, increment, error for the segment.
"""
start = 0
end = 1
pieces = list() # np.empty([0, 3])
x = np.arange(0, len(ts))
epsilon = np.finfo(float).eps
while end < len(ts):
inc = ts[end] - ts[start]
err = ts[start] + (inc/(end-start))*x[0:end-start+1] - ts[start:end+1]
err = np.inner(err, err)
if (err <= tol*(end-start-1) + epsilon) and (end-start-1 < max_len):
(lastinc, lasterr) = (inc, err)
end += 1
else:
# pieces = np.vstack([pieces, np.array([end-start-1, lastinc, lasterr])])
pieces.append([end-start-1, lastinc, lasterr])
start = end - 1
# pieces = np.vstack([pieces, np.array([end-start-1, lastinc, lasterr])])
pieces.append([end-start-1, lastinc, lasterr])
return pieces
class fABBA:
def __init__ (self, tol=0.1, alpha=0.5, sorting='2-norm', scl=1, verbose=1, max_len=np.inf):
"""
Parameters
----------
tol - float
Control tolerence for compression, default as 0.1.
scl - int
Scale for length, default as 1, means 2d-digitization, otherwise implement 1d-digitization.
verbose - int
Control logs print, default as 1, print logs.
max_len - int
The max length for each segment, default as np.inf.
"""
self.tol = tol
self.scl = scl
self.verbose = verbose
self.max_len = max_len
self.alpha = alpha
self.sorting = sorting
self.compression_rate = None
self.digitization_rate = None
def fit_transform(self, series):
"""
Compress and digitize the time series together.
Parameters
----------
series - array or list
Time series.
alpha - float
Control tolerence for digitization, default as 0.5.
string_form - boolean
Whether to return with string form, default as True.
"""
series = np.array(series).astype(np.float64)
pieces = np.array(compress(ts=series, tol=self.tol, max_len=self.max_len))
strings = self.digitize(pieces[:,0:2])
self.compression_rate = pieces.shape[0] / series.shape[0]
self.digitization_rate = self.centers.shape[0] / pieces.shape[0]
if self.verbose in [1, 2]:
print("""Compression: Reduced series of length {0} to {1} segments.""".format(series.shape[0], pieces.shape[0]),
"""Digitization: Reduced {} pieces""".format(len(strings)), "to", self.centers.shape[0], "symbols.")
# strings = ''.join(strings)
return strings
def digitize(self, pieces, early_stopping=True):
"""
Greedy 2D clustering of pieces (a Nx2 numpy array),
using tolernce tol and len/inc scaling parameter scl.
In this variant, a 'temporary' cluster center is used
when assigning pieces to clusters. This temporary cluster
is the first piece available after appropriate scaling
and sorting of all pieces. It is *not* necessarily the
mean of all pieces in that cluster and hence the final
cluster centers, which are just the means, might achieve
a smaller within-cluster tol.
"""
if self.sorting not in ['2-norm', '1-norm', 'lexi']:
raise ValueError("Please refer to a specific and correct sorting way, namely '2-norm', '1-norm' and 'lexi'")
_std = np.std(pieces, axis=0) # prevent zero-division
if _std[0] == 0:
_std[0] = 1
if _std[1] == 0:
_std[1] = 1
npieces = pieces * np.array([self.scl, 1]) / _std
labels, self.splist, self.nr_dist = aggregate(npieces, self.sorting, self.alpha) # other two variables are used for experiment
centers = np.zeros((0,2))
for c in range(len(self.splist)):
indc = np.argwhere(labels==c)
center = np.mean(pieces[indc,:], axis=0)
centers = np.r_[ centers, center ]
self.centers = centers
strings, self.hashmap = symbolsAssign(labels)
return strings
def inverse_transform(self, strings, start=0):
pieces = self.inverse_digitize(strings, self.centers, self.hashmap)
pieces = self.quantize(pieces)
time_series = self.inverse_compress(pieces, start)
return time_series
def inverse_digitize(self, strings, centers, hashmap):
pieces = np.empty([0,2])
for p in strings:
pc = centers[int(hashmap[p])]
pieces = np.vstack([pieces, pc])
return pieces[:,0:2]
def quantize(self, pieces):
if len(pieces) == 1:
pieces[0,0] = round(pieces[0,0])
else:
for p in range(len(pieces)-1):
corr = round(pieces[p,0]) - pieces[p,0]
pieces[p,0] = round(pieces[p,0] + corr)
pieces[p+1,0] = pieces[p+1,0] - corr
if pieces[p,0] == 0:
pieces[p,0] = 1
pieces[p+1,0] -= 1
pieces[-1,0] = round(pieces[-1,0],0)
return pieces
def inverse_compress(self, pieces, start):
"""Modified from ABBA package, please see ABBA package to see guidance."""
time_series = [start]
# stitch linear piece onto last
for j in range(0, len(pieces)):
x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]
#print(x)
y = time_series[-1] + x
time_series = time_series + y[1:].tolist()
return time_series
class ABBA:
def __init__ (self, tol=0.1, k_cluster=10, verbose=1, max_len=np.inf):
"""
Parameters
----------
tol - float
Control tolerence for compression, default as 0.1.
k_cluster - int
Number of symbols used for digitization.
verbose - int
Control logs print, default as 1, print logs.
max_len - int
The max length for each segment, default as np.inf.
"""
self.tol = tol
self.verbose = verbose
self.max_len = max_len
self.k_cluster = k_cluster
self.compression_rate = None
self.digitization_rate = None
def fit_transform(self, series):
"""
Compress and digitize the time series together.
Parameters
----------
series - array or list
Time series.
alpha - float
Control tolerence for digitization, default as 0.5.
string_form - boolean
Whether to return with string form, default as True.
"""
series = np.array(series).astype(np.float64)
pieces = np.array(compress(ts=series, tol=self.tol, max_len=self.max_len))
strings = self.digitize(pieces[:,0:2])
self.compression_rate = pieces.shape[0] / series.shape[0]
self.digitization_rate = self.centers.shape[0] / pieces.shape[0]
if self.verbose in [1, 2]:
print("""Compression: Reduced series of length {0} to {1} segments.""".format(series.shape[0], pieces.shape[0]),
"""Digitization: Reduced {} pieces""".format(len(strings)), "to", self.centers.shape[0], "symbols.")
# strings = ''.join(strings)
return strings
def digitize(self, pieces, early_stopping=True):
"""
Greedy 2D clustering of pieces (a Nx2 numpy array),
using tolernce tol and len/inc scaling parameter scl.
In this variant, a 'temporary' cluster center is used
when assigning pieces to clusters. This temporary cluster
is the first piece available after appropriate scaling
and sorting of all pieces. It is *not* necessarily the
mean of all pieces in that cluster and hence the final
cluster centers, which are just the means, might achieve
a smaller within-cluster tol.
"""
_std = np.std(pieces, axis=0) # prevent zero-division
if _std[0] == 0:
_std[0] = 1
if _std[1] == 0:
_std[1] = 1
npieces = pieces / _std
print(npieces.shape)
kmeans = KMeans(n_clusters=self.k_cluster, random_state=0).fit(npieces)
labels = kmeans.labels_
self.centers = kmeans.cluster_centers_*_std
strings, self.hashmap = symbolsAssign(labels)
return strings
def inverse_transform(self, strings, start=0):
pieces = self.inverse_digitize(strings, self.centers, self.hashmap)
pieces = self.quantize(pieces)
time_series = self.inverse_compress(pieces, start)
return time_series
def inverse_digitize(self, strings, centers, hashmap):
pieces = np.empty([0,2])
for p in strings:
pc = centers[int(hashmap[p])]
pieces = np.vstack([pieces, pc])
return pieces[:,0:2]
def quantize(self, pieces):
if len(pieces) == 1:
pieces[0,0] = round(pieces[0,0])
else:
for p in range(len(pieces)-1):
corr = round(pieces[p,0]) - pieces[p,0]
pieces[p,0] = round(pieces[p,0] + corr)
pieces[p+1,0] = pieces[p+1,0] - corr
if pieces[p,0] == 0:
pieces[p,0] = 1
pieces[p+1,0] -= 1
pieces[-1,0] = round(pieces[-1,0],0)
return pieces
def inverse_compress(self, pieces, start):
"""Modified from ABBA package, please see ABBA package to see guidance."""
time_series = [start]
# stitch linear piece onto last
for j in range(0, len(pieces)):
x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]
#print(x)
y = time_series[-1] + x
time_series = time_series + y[1:].tolist()
return time_series | random_line_split | |
symbols.py | # Copyright (c) 2021, nla group, manchester
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import collections
import pandas as pd
import numpy as np
from scipy.stats import norm
from sklearn.cluster import KMeans
def symbolsAssign(clusters):
""" automatically assign symbols to different clusters, start with '!'
from https://github.com/nla-group/fABBA.
Parameters
----------
clusters - list or pd.Series or array
the list of clusters.
----------
Return:
symbols(list of string), inverse_hash(dict): repectively for the
corresponding symbolic sequence and the hashmap for inverse transform.
"""
alphabet = ['A','a','B','b','C','c','D','d','E','e',
'F','f','G','g','H','h','I','i','J','j',
'K','k','L','l','M','m','N','n','O','o',
'P','p','Q','q','R','r','S','s','T','t',
'U','u','V','v','W','w','X','x','Y','y','Z','z']
clusters = pd.Series(clusters)
N = len(clusters.unique())
cluster_sort = [0] * N
counter = collections.Counter(clusters)
for ind, el in enumerate(counter.most_common()):
cluster_sort[ind] = el[0]
if N >= len(alphabet):
alphabet = [chr(i+33) for i in range(0, N)]
else:
alphabet = alphabet[:N]
hashm = dict(zip(cluster_sort + alphabet, alphabet + cluster_sort))
strings = [hashm[i] for i in clusters]
return strings, hashm
class SAX:
"""Modified from https://github.com/nla-group/TARZAN"""
def __init__(self, *, width = 2, n_paa_segments=None, k = 5, return_list=False, verbose=True):
if n_paa_segments is not None:
# if verbose == True:
# warnings.warn("Set width to ``len(ts) // n_paa_segments''")
self.n_paa_segments = n_paa_segments
self.width = None
else:
|
self.number_of_symbols = k
self.return_list = return_list
self.mu, self.std = 0, 1
def transform(self, time_series):
if self.width is None:
self.width = len(time_series) // self.n_paa_segments
self.mu = np.mean(time_series)
self.std = np.std(time_series)
if self.std == 0:
self.std = 1
time_series = (time_series - self.mu)/self.std
compressed_time_series = self.paa_mean(time_series)
symbolic_time_series = self._digitize(compressed_time_series)
return symbolic_time_series
def inverse_transform(self, symbolic_time_series):
compressed_time_series = self._reverse_digitize(symbolic_time_series)
time_series = self._reconstruct(compressed_time_series)
time_series = time_series*self.std + self.mu
return time_series
def paa_mean(self, ts):
if len(ts) % self.width != 0:
warnings.warn("Result truncates, width does not divide length")
return [np.mean(ts[i*self.width:np.min([len(ts), (i+1)*self.width])]) for i in range(int(np.floor(len(ts)/self.width)))]
def _digitize(self, ts):
symbolic_ts = self._gaussian_breakpoints(ts)
return symbolic_ts
def _gaussian_breakpoints(self, ts):
# Construct Breakpoints
breakpoints = np.hstack(
(norm.ppf([float(a) / self.number_of_symbols for a in range(1, self.number_of_symbols)], scale=1),
np.inf))
labels = []
for i in ts:
for j in range(len(breakpoints)):
if i < breakpoints[j]:
labels.append(j)
break
strings, self.hashm = symbolsAssign(labels)
if not self.return_list:
strings = "".join(strings)
return strings
def _reconstruct(self, reduced_ts):
return self._reverse_pca(reduced_ts)
def _reverse_pca(self, ts):
return np.kron(ts, np.ones([1,self.width])[0])
def _reverse_digitize(self, symbolic_ts):
return self._reverse_gaussian_breakpoints(symbolic_ts)
def _reverse_gaussian_breakpoints(self, symbols):
breakpoint_values = norm.ppf([float(a) / (2 * self.number_of_symbols) for a in range(1, 2 * self.number_of_symbols, 2)], scale=1)
ts = []
for s in symbols:
j = self.hashm[s]
ts.append(breakpoint_values[j])
return ts
# python implementation for aggregation
def aggregate(data, sorting="2-norm", tol=0.5): # , verbose=1
"""aggregate the data
Parameters
----------
data : numpy.ndarray
the input that is array-like of shape (n_samples,).
sorting : str
the sorting method for aggregation, default='2-norm', alternative option: '1-norm' and 'lexi'.
tol : float
the tolerance to control the aggregation. if the distance between the starting point
of a group and another data point is less than or equal to the tolerance,
the point is allocated to that group.
Returns
-------
labels (numpy.ndarray) :
the group categories of the data after aggregation
splist (list) :
the list of the starting points
nr_dist (int) :
number of pairwise distance calculations
"""
splist = list() # store the starting points
len_ind = data.shape[0]
if sorting == "2-norm":
sort_vals = np.linalg.norm(data, ord=2, axis=1)
ind = np.argsort(sort_vals)
elif sorting == "1-norm":
sort_vals = np.linalg.norm(data, ord=1, axis=1)
ind = np.argsort(sort_vals)
else:
ind = np.lexsort((data[:,1], data[:,0]), axis=0)
lab = 0
labels = [-1]*len_ind
nr_dist = 0
for i in range(len_ind): # tqdm(range(len_ind), disable=not verbose)
sp = ind[i] # starting point
if labels[sp] >= 0:
continue
else:
clustc = data[sp,:]
labels[sp] = lab
num_group = 1
for j in ind[i:]:
if labels[j] >= 0:
continue
dat = clustc - data[j,:]
dist = np.inner(dat, dat)
nr_dist += 1
if dist <= tol**2:
num_group += 1
labels[j] = lab
else: # apply early stopping
if sorting == "2-norm" or sorting == "1-norm":
if (sort_vals[j] - sort_vals[sp] > tol):
break
else:
if ((data[j,0] - data[sp,0] == tol) and (data[j,1] > data[sp,1])) or (data[j,0] - data[sp,0] > tol):
break
splist.append([sp, lab] + [num_group] + list(data[sp,:]) ) # respectively store starting point
# index, label, number of neighbor objects, center (starting point).
lab += 1
return np.array(labels), splist, nr_dist
def compress(ts, tol=0.5, max_len=np.inf):
"""
Approximate a time series using a continuous piecewise linear function.
Parameters
----------
ts - numpy ndarray
Time series as input of numpy array
Returns
-------
pieces - numpy array
Numpy ndarray with three columns, each row contains length, increment, error for the segment.
"""
start = 0
end = 1
pieces = list() # np.empty([0, 3])
x = np.arange(0, len(ts))
epsilon = np.finfo(float).eps
while end < len(ts):
inc = ts[end] - ts[start]
err = ts[start] + (inc/(end-start))*x[0:end-start+1] - ts[start:end+1]
err = np.inner(err, err)
if (err <= tol*(end-start-1) + epsilon) and (end-start-1 < max_len):
(lastinc, lasterr) = (inc, err)
end += 1
else:
# pieces = np.vstack([pieces, np.array([end-start-1, lastinc, lasterr])])
pieces.append([end-start-1, lastinc, lasterr])
start = end - 1
# pieces = np.vstack([pieces, np.array([end-start-1, lastinc, lasterr])])
pieces.append([end-start-1, lastinc, lasterr])
return pieces
class fABBA:
def __init__ (self, tol=0.1, alpha=0.5, sorting='2-norm', scl=1, verbose=1, max_len=np.inf):
"""
Parameters
----------
tol - float
Control tolerence for compression, default as 0.1.
scl - int
Scale for length, default as 1, means 2d-digitization, otherwise implement 1d-digitization.
verbose - int
Control logs print, default as 1, print logs.
max_len - int
The max length for each segment, default as np.inf.
"""
self.tol = tol
self.scl = scl
self.verbose = verbose
self.max_len = max_len
self.alpha = alpha
self.sorting = sorting
self.compression_rate = None
self.digitization_rate = None
def fit_transform(self, series):
"""
Compress and digitize the time series together.
Parameters
----------
series - array or list
Time series.
alpha - float
Control tolerence for digitization, default as 0.5.
string_form - boolean
Whether to return with string form, default as True.
"""
series = np.array(series).astype(np.float64)
pieces = np.array(compress(ts=series, tol=self.tol, max_len=self.max_len))
strings = self.digitize(pieces[:,0:2])
self.compression_rate = pieces.shape[0] / series.shape[0]
self.digitization_rate = self.centers.shape[0] / pieces.shape[0]
if self.verbose in [1, 2]:
print("""Compression: Reduced series of length {0} to {1} segments.""".format(series.shape[0], pieces.shape[0]),
"""Digitization: Reduced {} pieces""".format(len(strings)), "to", self.centers.shape[0], "symbols.")
# strings = ''.join(strings)
return strings
def digitize(self, pieces, early_stopping=True):
"""
Greedy 2D clustering of pieces (a Nx2 numpy array),
using tolernce tol and len/inc scaling parameter scl.
In this variant, a 'temporary' cluster center is used
when assigning pieces to clusters. This temporary cluster
is the first piece available after appropriate scaling
and sorting of all pieces. It is *not* necessarily the
mean of all pieces in that cluster and hence the final
cluster centers, which are just the means, might achieve
a smaller within-cluster tol.
"""
if self.sorting not in ['2-norm', '1-norm', 'lexi']:
raise ValueError("Please refer to a specific and correct sorting way, namely '2-norm', '1-norm' and 'lexi'")
_std = np.std(pieces, axis=0) # prevent zero-division
if _std[0] == 0:
_std[0] = 1
if _std[1] == 0:
_std[1] = 1
npieces = pieces * np.array([self.scl, 1]) / _std
labels, self.splist, self.nr_dist = aggregate(npieces, self.sorting, self.alpha) # other two variables are used for experiment
centers = np.zeros((0,2))
for c in range(len(self.splist)):
indc = np.argwhere(labels==c)
center = np.mean(pieces[indc,:], axis=0)
centers = np.r_[ centers, center ]
self.centers = centers
strings, self.hashmap = symbolsAssign(labels)
return strings
def inverse_transform(self, strings, start=0):
pieces = self.inverse_digitize(strings, self.centers, self.hashmap)
pieces = self.quantize(pieces)
time_series = self.inverse_compress(pieces, start)
return time_series
def inverse_digitize(self, strings, centers, hashmap):
pieces = np.empty([0,2])
for p in strings:
pc = centers[int(hashmap[p])]
pieces = np.vstack([pieces, pc])
return pieces[:,0:2]
def quantize(self, pieces):
if len(pieces) == 1:
pieces[0,0] = round(pieces[0,0])
else:
for p in range(len(pieces)-1):
corr = round(pieces[p,0]) - pieces[p,0]
pieces[p,0] = round(pieces[p,0] + corr)
pieces[p+1,0] = pieces[p+1,0] - corr
if pieces[p,0] == 0:
pieces[p,0] = 1
pieces[p+1,0] -= 1
pieces[-1,0] = round(pieces[-1,0],0)
return pieces
def inverse_compress(self, pieces, start):
"""Modified from ABBA package, please see ABBA package to see guidance."""
time_series = [start]
# stitch linear piece onto last
for j in range(0, len(pieces)):
x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]
#print(x)
y = time_series[-1] + x
time_series = time_series + y[1:].tolist()
return time_series
class ABBA:
def __init__ (self, tol=0.1, k_cluster=10, verbose=1, max_len=np.inf):
"""
Parameters
----------
tol - float
Control tolerence for compression, default as 0.1.
k_cluster - int
Number of symbols used for digitization.
verbose - int
Control logs print, default as 1, print logs.
max_len - int
The max length for each segment, default as np.inf.
"""
self.tol = tol
self.verbose = verbose
self.max_len = max_len
self.k_cluster = k_cluster
self.compression_rate = None
self.digitization_rate = None
def fit_transform(self, series):
"""
Compress and digitize the time series together.
Parameters
----------
series - array or list
Time series.
alpha - float
Control tolerence for digitization, default as 0.5.
string_form - boolean
Whether to return with string form, default as True.
"""
series = np.array(series).astype(np.float64)
pieces = np.array(compress(ts=series, tol=self.tol, max_len=self.max_len))
strings = self.digitize(pieces[:,0:2])
self.compression_rate = pieces.shape[0] / series.shape[0]
self.digitization_rate = self.centers.shape[0] / pieces.shape[0]
if self.verbose in [1, 2]:
print("""Compression: Reduced series of length {0} to {1} segments.""".format(series.shape[0], pieces.shape[0]),
"""Digitization: Reduced {} pieces""".format(len(strings)), "to", self.centers.shape[0], "symbols.")
# strings = ''.join(strings)
return strings
def digitize(self, pieces, early_stopping=True):
"""
Greedy 2D clustering of pieces (a Nx2 numpy array),
using tolernce tol and len/inc scaling parameter scl.
In this variant, a 'temporary' cluster center is used
when assigning pieces to clusters. This temporary cluster
is the first piece available after appropriate scaling
and sorting of all pieces. It is *not* necessarily the
mean of all pieces in that cluster and hence the final
cluster centers, which are just the means, might achieve
a smaller within-cluster tol.
"""
_std = np.std(pieces, axis=0) # prevent zero-division
if _std[0] == 0:
_std[0] = 1
if _std[1] == 0:
_std[1] = 1
npieces = pieces / _std
print(npieces.shape)
kmeans = KMeans(n_clusters=self.k_cluster, random_state=0).fit(npieces)
labels = kmeans.labels_
self.centers = kmeans.cluster_centers_*_std
strings, self.hashmap = symbolsAssign(labels)
return strings
def inverse_transform(self, strings, start=0):
pieces = self.inverse_digitize(strings, self.centers, self.hashmap)
pieces = self.quantize(pieces)
time_series = self.inverse_compress(pieces, start)
return time_series
def inverse_digitize(self, strings, centers, hashmap):
pieces = np.empty([0,2])
for p in strings:
pc = centers[int(hashmap[p])]
pieces = np.vstack([pieces, pc])
return pieces[:,0:2]
def quantize(self, pieces):
if len(pieces) == 1:
pieces[0,0] = round(pieces[0,0])
else:
for p in range(len(pieces)-1):
corr = round(pieces[p,0]) - pieces[p,0]
pieces[p,0] = round(pieces[p,0] + corr)
pieces[p+1,0] = pieces[p+1,0] - corr
if pieces[p,0] == 0:
pieces[p,0] = 1
pieces[p+1,0] -= 1
pieces[-1,0] = round(pieces[-1,0],0)
return pieces
def inverse_compress(self, pieces, start):
"""Modified from ABBA package, please see ABBA package to see guidance."""
time_series = [start]
# stitch linear piece onto last
for j in range(0, len(pieces)):
x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]
#print(x)
y = time_series[-1] + x
time_series = time_series + y[1:].tolist()
return time_series
| self.n_paa_segments = None
self.width = width | conditional_block |
symbols.py | # Copyright (c) 2021, nla group, manchester
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import collections
import pandas as pd
import numpy as np
from scipy.stats import norm
from sklearn.cluster import KMeans
def symbolsAssign(clusters):
""" automatically assign symbols to different clusters, start with '!'
from https://github.com/nla-group/fABBA.
Parameters
----------
clusters - list or pd.Series or array
the list of clusters.
----------
Return:
symbols(list of string), inverse_hash(dict): repectively for the
corresponding symbolic sequence and the hashmap for inverse transform.
"""
alphabet = ['A','a','B','b','C','c','D','d','E','e',
'F','f','G','g','H','h','I','i','J','j',
'K','k','L','l','M','m','N','n','O','o',
'P','p','Q','q','R','r','S','s','T','t',
'U','u','V','v','W','w','X','x','Y','y','Z','z']
clusters = pd.Series(clusters)
N = len(clusters.unique())
cluster_sort = [0] * N
counter = collections.Counter(clusters)
for ind, el in enumerate(counter.most_common()):
cluster_sort[ind] = el[0]
if N >= len(alphabet):
alphabet = [chr(i+33) for i in range(0, N)]
else:
alphabet = alphabet[:N]
hashm = dict(zip(cluster_sort + alphabet, alphabet + cluster_sort))
strings = [hashm[i] for i in clusters]
return strings, hashm
class SAX:
"""Modified from https://github.com/nla-group/TARZAN"""
def __init__(self, *, width = 2, n_paa_segments=None, k = 5, return_list=False, verbose=True):
if n_paa_segments is not None:
# if verbose == True:
# warnings.warn("Set width to ``len(ts) // n_paa_segments''")
self.n_paa_segments = n_paa_segments
self.width = None
else:
self.n_paa_segments = None
self.width = width
self.number_of_symbols = k
self.return_list = return_list
self.mu, self.std = 0, 1
def transform(self, time_series):
if self.width is None:
self.width = len(time_series) // self.n_paa_segments
self.mu = np.mean(time_series)
self.std = np.std(time_series)
if self.std == 0:
self.std = 1
time_series = (time_series - self.mu)/self.std
compressed_time_series = self.paa_mean(time_series)
symbolic_time_series = self._digitize(compressed_time_series)
return symbolic_time_series
def inverse_transform(self, symbolic_time_series):
|
def paa_mean(self, ts):
if len(ts) % self.width != 0:
warnings.warn("Result truncates, width does not divide length")
return [np.mean(ts[i*self.width:np.min([len(ts), (i+1)*self.width])]) for i in range(int(np.floor(len(ts)/self.width)))]
def _digitize(self, ts):
symbolic_ts = self._gaussian_breakpoints(ts)
return symbolic_ts
def _gaussian_breakpoints(self, ts):
# Construct Breakpoints
breakpoints = np.hstack(
(norm.ppf([float(a) / self.number_of_symbols for a in range(1, self.number_of_symbols)], scale=1),
np.inf))
labels = []
for i in ts:
for j in range(len(breakpoints)):
if i < breakpoints[j]:
labels.append(j)
break
strings, self.hashm = symbolsAssign(labels)
if not self.return_list:
strings = "".join(strings)
return strings
def _reconstruct(self, reduced_ts):
return self._reverse_pca(reduced_ts)
def _reverse_pca(self, ts):
return np.kron(ts, np.ones([1,self.width])[0])
def _reverse_digitize(self, symbolic_ts):
return self._reverse_gaussian_breakpoints(symbolic_ts)
def _reverse_gaussian_breakpoints(self, symbols):
breakpoint_values = norm.ppf([float(a) / (2 * self.number_of_symbols) for a in range(1, 2 * self.number_of_symbols, 2)], scale=1)
ts = []
for s in symbols:
j = self.hashm[s]
ts.append(breakpoint_values[j])
return ts
# python implementation for aggregation
def aggregate(data, sorting="2-norm", tol=0.5): # , verbose=1
"""aggregate the data
Parameters
----------
data : numpy.ndarray
the input that is array-like of shape (n_samples,).
sorting : str
the sorting method for aggregation, default='2-norm', alternative option: '1-norm' and 'lexi'.
tol : float
the tolerance to control the aggregation. if the distance between the starting point
of a group and another data point is less than or equal to the tolerance,
the point is allocated to that group.
Returns
-------
labels (numpy.ndarray) :
the group categories of the data after aggregation
splist (list) :
the list of the starting points
nr_dist (int) :
number of pairwise distance calculations
"""
splist = list() # store the starting points
len_ind = data.shape[0]
if sorting == "2-norm":
sort_vals = np.linalg.norm(data, ord=2, axis=1)
ind = np.argsort(sort_vals)
elif sorting == "1-norm":
sort_vals = np.linalg.norm(data, ord=1, axis=1)
ind = np.argsort(sort_vals)
else:
ind = np.lexsort((data[:,1], data[:,0]), axis=0)
lab = 0
labels = [-1]*len_ind
nr_dist = 0
for i in range(len_ind): # tqdm(range(len_ind), disable=not verbose)
sp = ind[i] # starting point
if labels[sp] >= 0:
continue
else:
clustc = data[sp,:]
labels[sp] = lab
num_group = 1
for j in ind[i:]:
if labels[j] >= 0:
continue
dat = clustc - data[j,:]
dist = np.inner(dat, dat)
nr_dist += 1
if dist <= tol**2:
num_group += 1
labels[j] = lab
else: # apply early stopping
if sorting == "2-norm" or sorting == "1-norm":
if (sort_vals[j] - sort_vals[sp] > tol):
break
else:
if ((data[j,0] - data[sp,0] == tol) and (data[j,1] > data[sp,1])) or (data[j,0] - data[sp,0] > tol):
break
splist.append([sp, lab] + [num_group] + list(data[sp,:]) ) # respectively store starting point
# index, label, number of neighbor objects, center (starting point).
lab += 1
return np.array(labels), splist, nr_dist
def compress(ts, tol=0.5, max_len=np.inf):
"""
Approximate a time series using a continuous piecewise linear function.
Parameters
----------
ts - numpy ndarray
Time series as input of numpy array
Returns
-------
pieces - numpy array
Numpy ndarray with three columns, each row contains length, increment, error for the segment.
"""
start = 0
end = 1
pieces = list() # np.empty([0, 3])
x = np.arange(0, len(ts))
epsilon = np.finfo(float).eps
while end < len(ts):
inc = ts[end] - ts[start]
err = ts[start] + (inc/(end-start))*x[0:end-start+1] - ts[start:end+1]
err = np.inner(err, err)
if (err <= tol*(end-start-1) + epsilon) and (end-start-1 < max_len):
(lastinc, lasterr) = (inc, err)
end += 1
else:
# pieces = np.vstack([pieces, np.array([end-start-1, lastinc, lasterr])])
pieces.append([end-start-1, lastinc, lasterr])
start = end - 1
# pieces = np.vstack([pieces, np.array([end-start-1, lastinc, lasterr])])
pieces.append([end-start-1, lastinc, lasterr])
return pieces
class fABBA:
def __init__ (self, tol=0.1, alpha=0.5, sorting='2-norm', scl=1, verbose=1, max_len=np.inf):
"""
Parameters
----------
tol - float
Control tolerence for compression, default as 0.1.
scl - int
Scale for length, default as 1, means 2d-digitization, otherwise implement 1d-digitization.
verbose - int
Control logs print, default as 1, print logs.
max_len - int
The max length for each segment, default as np.inf.
"""
self.tol = tol
self.scl = scl
self.verbose = verbose
self.max_len = max_len
self.alpha = alpha
self.sorting = sorting
self.compression_rate = None
self.digitization_rate = None
def fit_transform(self, series):
"""
Compress and digitize the time series together.
Parameters
----------
series - array or list
Time series.
alpha - float
Control tolerence for digitization, default as 0.5.
string_form - boolean
Whether to return with string form, default as True.
"""
series = np.array(series).astype(np.float64)
pieces = np.array(compress(ts=series, tol=self.tol, max_len=self.max_len))
strings = self.digitize(pieces[:,0:2])
self.compression_rate = pieces.shape[0] / series.shape[0]
self.digitization_rate = self.centers.shape[0] / pieces.shape[0]
if self.verbose in [1, 2]:
print("""Compression: Reduced series of length {0} to {1} segments.""".format(series.shape[0], pieces.shape[0]),
"""Digitization: Reduced {} pieces""".format(len(strings)), "to", self.centers.shape[0], "symbols.")
# strings = ''.join(strings)
return strings
def digitize(self, pieces, early_stopping=True):
"""
Greedy 2D clustering of pieces (a Nx2 numpy array),
using tolernce tol and len/inc scaling parameter scl.
In this variant, a 'temporary' cluster center is used
when assigning pieces to clusters. This temporary cluster
is the first piece available after appropriate scaling
and sorting of all pieces. It is *not* necessarily the
mean of all pieces in that cluster and hence the final
cluster centers, which are just the means, might achieve
a smaller within-cluster tol.
"""
if self.sorting not in ['2-norm', '1-norm', 'lexi']:
raise ValueError("Please refer to a specific and correct sorting way, namely '2-norm', '1-norm' and 'lexi'")
_std = np.std(pieces, axis=0) # prevent zero-division
if _std[0] == 0:
_std[0] = 1
if _std[1] == 0:
_std[1] = 1
npieces = pieces * np.array([self.scl, 1]) / _std
labels, self.splist, self.nr_dist = aggregate(npieces, self.sorting, self.alpha) # other two variables are used for experiment
centers = np.zeros((0,2))
for c in range(len(self.splist)):
indc = np.argwhere(labels==c)
center = np.mean(pieces[indc,:], axis=0)
centers = np.r_[ centers, center ]
self.centers = centers
strings, self.hashmap = symbolsAssign(labels)
return strings
def inverse_transform(self, strings, start=0):
pieces = self.inverse_digitize(strings, self.centers, self.hashmap)
pieces = self.quantize(pieces)
time_series = self.inverse_compress(pieces, start)
return time_series
def inverse_digitize(self, strings, centers, hashmap):
pieces = np.empty([0,2])
for p in strings:
pc = centers[int(hashmap[p])]
pieces = np.vstack([pieces, pc])
return pieces[:,0:2]
def quantize(self, pieces):
if len(pieces) == 1:
pieces[0,0] = round(pieces[0,0])
else:
for p in range(len(pieces)-1):
corr = round(pieces[p,0]) - pieces[p,0]
pieces[p,0] = round(pieces[p,0] + corr)
pieces[p+1,0] = pieces[p+1,0] - corr
if pieces[p,0] == 0:
pieces[p,0] = 1
pieces[p+1,0] -= 1
pieces[-1,0] = round(pieces[-1,0],0)
return pieces
def inverse_compress(self, pieces, start):
"""Modified from ABBA package, please see ABBA package to see guidance."""
time_series = [start]
# stitch linear piece onto last
for j in range(0, len(pieces)):
x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]
#print(x)
y = time_series[-1] + x
time_series = time_series + y[1:].tolist()
return time_series
class ABBA:
def __init__ (self, tol=0.1, k_cluster=10, verbose=1, max_len=np.inf):
"""
Parameters
----------
tol - float
Control tolerence for compression, default as 0.1.
k_cluster - int
Number of symbols used for digitization.
verbose - int
Control logs print, default as 1, print logs.
max_len - int
The max length for each segment, default as np.inf.
"""
self.tol = tol
self.verbose = verbose
self.max_len = max_len
self.k_cluster = k_cluster
self.compression_rate = None
self.digitization_rate = None
def fit_transform(self, series):
"""
Compress and digitize the time series together.
Parameters
----------
series - array or list
Time series.
alpha - float
Control tolerence for digitization, default as 0.5.
string_form - boolean
Whether to return with string form, default as True.
"""
series = np.array(series).astype(np.float64)
pieces = np.array(compress(ts=series, tol=self.tol, max_len=self.max_len))
strings = self.digitize(pieces[:,0:2])
self.compression_rate = pieces.shape[0] / series.shape[0]
self.digitization_rate = self.centers.shape[0] / pieces.shape[0]
if self.verbose in [1, 2]:
print("""Compression: Reduced series of length {0} to {1} segments.""".format(series.shape[0], pieces.shape[0]),
"""Digitization: Reduced {} pieces""".format(len(strings)), "to", self.centers.shape[0], "symbols.")
# strings = ''.join(strings)
return strings
def digitize(self, pieces, early_stopping=True):
"""
Greedy 2D clustering of pieces (a Nx2 numpy array),
using tolernce tol and len/inc scaling parameter scl.
In this variant, a 'temporary' cluster center is used
when assigning pieces to clusters. This temporary cluster
is the first piece available after appropriate scaling
and sorting of all pieces. It is *not* necessarily the
mean of all pieces in that cluster and hence the final
cluster centers, which are just the means, might achieve
a smaller within-cluster tol.
"""
_std = np.std(pieces, axis=0) # prevent zero-division
if _std[0] == 0:
_std[0] = 1
if _std[1] == 0:
_std[1] = 1
npieces = pieces / _std
print(npieces.shape)
kmeans = KMeans(n_clusters=self.k_cluster, random_state=0).fit(npieces)
labels = kmeans.labels_
self.centers = kmeans.cluster_centers_*_std
strings, self.hashmap = symbolsAssign(labels)
return strings
def inverse_transform(self, strings, start=0):
pieces = self.inverse_digitize(strings, self.centers, self.hashmap)
pieces = self.quantize(pieces)
time_series = self.inverse_compress(pieces, start)
return time_series
def inverse_digitize(self, strings, centers, hashmap):
pieces = np.empty([0,2])
for p in strings:
pc = centers[int(hashmap[p])]
pieces = np.vstack([pieces, pc])
return pieces[:,0:2]
def quantize(self, pieces):
if len(pieces) == 1:
pieces[0,0] = round(pieces[0,0])
else:
for p in range(len(pieces)-1):
corr = round(pieces[p,0]) - pieces[p,0]
pieces[p,0] = round(pieces[p,0] + corr)
pieces[p+1,0] = pieces[p+1,0] - corr
if pieces[p,0] == 0:
pieces[p,0] = 1
pieces[p+1,0] -= 1
pieces[-1,0] = round(pieces[-1,0],0)
return pieces
def inverse_compress(self, pieces, start):
"""Modified from ABBA package, please see ABBA package to see guidance."""
time_series = [start]
# stitch linear piece onto last
for j in range(0, len(pieces)):
x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]
#print(x)
y = time_series[-1] + x
time_series = time_series + y[1:].tolist()
return time_series
| compressed_time_series = self._reverse_digitize(symbolic_time_series)
time_series = self._reconstruct(compressed_time_series)
time_series = time_series*self.std + self.mu
return time_series | identifier_body |
symbols.py | # Copyright (c) 2021, nla group, manchester
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import collections
import pandas as pd
import numpy as np
from scipy.stats import norm
from sklearn.cluster import KMeans
def symbolsAssign(clusters):
""" automatically assign symbols to different clusters, start with '!'
from https://github.com/nla-group/fABBA.
Parameters
----------
clusters - list or pd.Series or array
the list of clusters.
----------
Return:
symbols(list of string), inverse_hash(dict): repectively for the
corresponding symbolic sequence and the hashmap for inverse transform.
"""
alphabet = ['A','a','B','b','C','c','D','d','E','e',
'F','f','G','g','H','h','I','i','J','j',
'K','k','L','l','M','m','N','n','O','o',
'P','p','Q','q','R','r','S','s','T','t',
'U','u','V','v','W','w','X','x','Y','y','Z','z']
clusters = pd.Series(clusters)
N = len(clusters.unique())
cluster_sort = [0] * N
counter = collections.Counter(clusters)
for ind, el in enumerate(counter.most_common()):
cluster_sort[ind] = el[0]
if N >= len(alphabet):
alphabet = [chr(i+33) for i in range(0, N)]
else:
alphabet = alphabet[:N]
hashm = dict(zip(cluster_sort + alphabet, alphabet + cluster_sort))
strings = [hashm[i] for i in clusters]
return strings, hashm
class SAX:
"""Modified from https://github.com/nla-group/TARZAN"""
def __init__(self, *, width = 2, n_paa_segments=None, k = 5, return_list=False, verbose=True):
if n_paa_segments is not None:
# if verbose == True:
# warnings.warn("Set width to ``len(ts) // n_paa_segments''")
self.n_paa_segments = n_paa_segments
self.width = None
else:
self.n_paa_segments = None
self.width = width
self.number_of_symbols = k
self.return_list = return_list
self.mu, self.std = 0, 1
def transform(self, time_series):
if self.width is None:
self.width = len(time_series) // self.n_paa_segments
self.mu = np.mean(time_series)
self.std = np.std(time_series)
if self.std == 0:
self.std = 1
time_series = (time_series - self.mu)/self.std
compressed_time_series = self.paa_mean(time_series)
symbolic_time_series = self._digitize(compressed_time_series)
return symbolic_time_series
def inverse_transform(self, symbolic_time_series):
compressed_time_series = self._reverse_digitize(symbolic_time_series)
time_series = self._reconstruct(compressed_time_series)
time_series = time_series*self.std + self.mu
return time_series
def paa_mean(self, ts):
if len(ts) % self.width != 0:
warnings.warn("Result truncates, width does not divide length")
return [np.mean(ts[i*self.width:np.min([len(ts), (i+1)*self.width])]) for i in range(int(np.floor(len(ts)/self.width)))]
def _digitize(self, ts):
symbolic_ts = self._gaussian_breakpoints(ts)
return symbolic_ts
def _gaussian_breakpoints(self, ts):
# Construct Breakpoints
breakpoints = np.hstack(
(norm.ppf([float(a) / self.number_of_symbols for a in range(1, self.number_of_symbols)], scale=1),
np.inf))
labels = []
for i in ts:
for j in range(len(breakpoints)):
if i < breakpoints[j]:
labels.append(j)
break
strings, self.hashm = symbolsAssign(labels)
if not self.return_list:
strings = "".join(strings)
return strings
def _reconstruct(self, reduced_ts):
return self._reverse_pca(reduced_ts)
def _reverse_pca(self, ts):
return np.kron(ts, np.ones([1,self.width])[0])
def _reverse_digitize(self, symbolic_ts):
return self._reverse_gaussian_breakpoints(symbolic_ts)
def _reverse_gaussian_breakpoints(self, symbols):
breakpoint_values = norm.ppf([float(a) / (2 * self.number_of_symbols) for a in range(1, 2 * self.number_of_symbols, 2)], scale=1)
ts = []
for s in symbols:
j = self.hashm[s]
ts.append(breakpoint_values[j])
return ts
# python implementation for aggregation
def aggregate(data, sorting="2-norm", tol=0.5): # , verbose=1
"""aggregate the data
Parameters
----------
data : numpy.ndarray
the input that is array-like of shape (n_samples,).
sorting : str
the sorting method for aggregation, default='2-norm', alternative option: '1-norm' and 'lexi'.
tol : float
the tolerance to control the aggregation. if the distance between the starting point
of a group and another data point is less than or equal to the tolerance,
the point is allocated to that group.
Returns
-------
labels (numpy.ndarray) :
the group categories of the data after aggregation
splist (list) :
the list of the starting points
nr_dist (int) :
number of pairwise distance calculations
"""
splist = list() # store the starting points
len_ind = data.shape[0]
if sorting == "2-norm":
sort_vals = np.linalg.norm(data, ord=2, axis=1)
ind = np.argsort(sort_vals)
elif sorting == "1-norm":
sort_vals = np.linalg.norm(data, ord=1, axis=1)
ind = np.argsort(sort_vals)
else:
ind = np.lexsort((data[:,1], data[:,0]), axis=0)
lab = 0
labels = [-1]*len_ind
nr_dist = 0
for i in range(len_ind): # tqdm(range(len_ind), disable=not verbose)
sp = ind[i] # starting point
if labels[sp] >= 0:
continue
else:
clustc = data[sp,:]
labels[sp] = lab
num_group = 1
for j in ind[i:]:
if labels[j] >= 0:
continue
dat = clustc - data[j,:]
dist = np.inner(dat, dat)
nr_dist += 1
if dist <= tol**2:
num_group += 1
labels[j] = lab
else: # apply early stopping
if sorting == "2-norm" or sorting == "1-norm":
if (sort_vals[j] - sort_vals[sp] > tol):
break
else:
if ((data[j,0] - data[sp,0] == tol) and (data[j,1] > data[sp,1])) or (data[j,0] - data[sp,0] > tol):
break
splist.append([sp, lab] + [num_group] + list(data[sp,:]) ) # respectively store starting point
# index, label, number of neighbor objects, center (starting point).
lab += 1
return np.array(labels), splist, nr_dist
def compress(ts, tol=0.5, max_len=np.inf):
"""
Approximate a time series using a continuous piecewise linear function.
Parameters
----------
ts - numpy ndarray
Time series as input of numpy array
Returns
-------
pieces - numpy array
Numpy ndarray with three columns, each row contains length, increment, error for the segment.
"""
start = 0
end = 1
pieces = list() # np.empty([0, 3])
x = np.arange(0, len(ts))
epsilon = np.finfo(float).eps
while end < len(ts):
inc = ts[end] - ts[start]
err = ts[start] + (inc/(end-start))*x[0:end-start+1] - ts[start:end+1]
err = np.inner(err, err)
if (err <= tol*(end-start-1) + epsilon) and (end-start-1 < max_len):
(lastinc, lasterr) = (inc, err)
end += 1
else:
# pieces = np.vstack([pieces, np.array([end-start-1, lastinc, lasterr])])
pieces.append([end-start-1, lastinc, lasterr])
start = end - 1
# pieces = np.vstack([pieces, np.array([end-start-1, lastinc, lasterr])])
pieces.append([end-start-1, lastinc, lasterr])
return pieces
class fABBA:
def __init__ (self, tol=0.1, alpha=0.5, sorting='2-norm', scl=1, verbose=1, max_len=np.inf):
"""
Parameters
----------
tol - float
Control tolerence for compression, default as 0.1.
scl - int
Scale for length, default as 1, means 2d-digitization, otherwise implement 1d-digitization.
verbose - int
Control logs print, default as 1, print logs.
max_len - int
The max length for each segment, default as np.inf.
"""
self.tol = tol
self.scl = scl
self.verbose = verbose
self.max_len = max_len
self.alpha = alpha
self.sorting = sorting
self.compression_rate = None
self.digitization_rate = None
def fit_transform(self, series):
"""
Compress and digitize the time series together.
Parameters
----------
series - array or list
Time series.
alpha - float
Control tolerence for digitization, default as 0.5.
string_form - boolean
Whether to return with string form, default as True.
"""
series = np.array(series).astype(np.float64)
pieces = np.array(compress(ts=series, tol=self.tol, max_len=self.max_len))
strings = self.digitize(pieces[:,0:2])
self.compression_rate = pieces.shape[0] / series.shape[0]
self.digitization_rate = self.centers.shape[0] / pieces.shape[0]
if self.verbose in [1, 2]:
print("""Compression: Reduced series of length {0} to {1} segments.""".format(series.shape[0], pieces.shape[0]),
"""Digitization: Reduced {} pieces""".format(len(strings)), "to", self.centers.shape[0], "symbols.")
# strings = ''.join(strings)
return strings
def digitize(self, pieces, early_stopping=True):
"""
Greedy 2D clustering of pieces (a Nx2 numpy array),
using tolernce tol and len/inc scaling parameter scl.
In this variant, a 'temporary' cluster center is used
when assigning pieces to clusters. This temporary cluster
is the first piece available after appropriate scaling
and sorting of all pieces. It is *not* necessarily the
mean of all pieces in that cluster and hence the final
cluster centers, which are just the means, might achieve
a smaller within-cluster tol.
"""
if self.sorting not in ['2-norm', '1-norm', 'lexi']:
raise ValueError("Please refer to a specific and correct sorting way, namely '2-norm', '1-norm' and 'lexi'")
_std = np.std(pieces, axis=0) # prevent zero-division
if _std[0] == 0:
_std[0] = 1
if _std[1] == 0:
_std[1] = 1
npieces = pieces * np.array([self.scl, 1]) / _std
labels, self.splist, self.nr_dist = aggregate(npieces, self.sorting, self.alpha) # other two variables are used for experiment
centers = np.zeros((0,2))
for c in range(len(self.splist)):
indc = np.argwhere(labels==c)
center = np.mean(pieces[indc,:], axis=0)
centers = np.r_[ centers, center ]
self.centers = centers
strings, self.hashmap = symbolsAssign(labels)
return strings
def inverse_transform(self, strings, start=0):
pieces = self.inverse_digitize(strings, self.centers, self.hashmap)
pieces = self.quantize(pieces)
time_series = self.inverse_compress(pieces, start)
return time_series
def inverse_digitize(self, strings, centers, hashmap):
pieces = np.empty([0,2])
for p in strings:
pc = centers[int(hashmap[p])]
pieces = np.vstack([pieces, pc])
return pieces[:,0:2]
def quantize(self, pieces):
if len(pieces) == 1:
pieces[0,0] = round(pieces[0,0])
else:
for p in range(len(pieces)-1):
corr = round(pieces[p,0]) - pieces[p,0]
pieces[p,0] = round(pieces[p,0] + corr)
pieces[p+1,0] = pieces[p+1,0] - corr
if pieces[p,0] == 0:
pieces[p,0] = 1
pieces[p+1,0] -= 1
pieces[-1,0] = round(pieces[-1,0],0)
return pieces
def inverse_compress(self, pieces, start):
"""Modified from ABBA package, please see ABBA package to see guidance."""
time_series = [start]
# stitch linear piece onto last
for j in range(0, len(pieces)):
x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]
#print(x)
y = time_series[-1] + x
time_series = time_series + y[1:].tolist()
return time_series
class ABBA:
def __init__ (self, tol=0.1, k_cluster=10, verbose=1, max_len=np.inf):
"""
Parameters
----------
tol - float
Control tolerence for compression, default as 0.1.
k_cluster - int
Number of symbols used for digitization.
verbose - int
Control logs print, default as 1, print logs.
max_len - int
The max length for each segment, default as np.inf.
"""
self.tol = tol
self.verbose = verbose
self.max_len = max_len
self.k_cluster = k_cluster
self.compression_rate = None
self.digitization_rate = None
def fit_transform(self, series):
"""
Compress and digitize the time series together.
Parameters
----------
series - array or list
Time series.
alpha - float
Control tolerence for digitization, default as 0.5.
string_form - boolean
Whether to return with string form, default as True.
"""
series = np.array(series).astype(np.float64)
pieces = np.array(compress(ts=series, tol=self.tol, max_len=self.max_len))
strings = self.digitize(pieces[:,0:2])
self.compression_rate = pieces.shape[0] / series.shape[0]
self.digitization_rate = self.centers.shape[0] / pieces.shape[0]
if self.verbose in [1, 2]:
print("""Compression: Reduced series of length {0} to {1} segments.""".format(series.shape[0], pieces.shape[0]),
"""Digitization: Reduced {} pieces""".format(len(strings)), "to", self.centers.shape[0], "symbols.")
# strings = ''.join(strings)
return strings
def digitize(self, pieces, early_stopping=True):
"""
Greedy 2D clustering of pieces (a Nx2 numpy array),
using tolernce tol and len/inc scaling parameter scl.
In this variant, a 'temporary' cluster center is used
when assigning pieces to clusters. This temporary cluster
is the first piece available after appropriate scaling
and sorting of all pieces. It is *not* necessarily the
mean of all pieces in that cluster and hence the final
cluster centers, which are just the means, might achieve
a smaller within-cluster tol.
"""
_std = np.std(pieces, axis=0) # prevent zero-division
if _std[0] == 0:
_std[0] = 1
if _std[1] == 0:
_std[1] = 1
npieces = pieces / _std
print(npieces.shape)
kmeans = KMeans(n_clusters=self.k_cluster, random_state=0).fit(npieces)
labels = kmeans.labels_
self.centers = kmeans.cluster_centers_*_std
strings, self.hashmap = symbolsAssign(labels)
return strings
def inverse_transform(self, strings, start=0):
pieces = self.inverse_digitize(strings, self.centers, self.hashmap)
pieces = self.quantize(pieces)
time_series = self.inverse_compress(pieces, start)
return time_series
def inverse_digitize(self, strings, centers, hashmap):
pieces = np.empty([0,2])
for p in strings:
pc = centers[int(hashmap[p])]
pieces = np.vstack([pieces, pc])
return pieces[:,0:2]
def qu | elf, pieces):
if len(pieces) == 1:
pieces[0,0] = round(pieces[0,0])
else:
for p in range(len(pieces)-1):
corr = round(pieces[p,0]) - pieces[p,0]
pieces[p,0] = round(pieces[p,0] + corr)
pieces[p+1,0] = pieces[p+1,0] - corr
if pieces[p,0] == 0:
pieces[p,0] = 1
pieces[p+1,0] -= 1
pieces[-1,0] = round(pieces[-1,0],0)
return pieces
def inverse_compress(self, pieces, start):
"""Modified from ABBA package, please see ABBA package to see guidance."""
time_series = [start]
# stitch linear piece onto last
for j in range(0, len(pieces)):
x = np.arange(0,pieces[j,0]+1)/(pieces[j,0])*pieces[j,1]
#print(x)
y = time_series[-1] + x
time_series = time_series + y[1:].tolist()
return time_series
| antize(s | identifier_name |
texture.rs | // texture.rs
// Creation and handling of images and textures.
// (c) 2019, Ryan McGowan <ryan@internally-combusted.net>
//! Loading and management of textures.
use gfx_backend_metal as backend;
use nalgebra_glm as glm;
use gfx_hal::{
command::{BufferImageCopy, CommandBuffer, OneShot},
format::{Aspects, Format},
image::{Access, Extent, Layout, Offset, SubresourceLayers, SubresourceRange, Usage},
memory::{Barrier, Dependencies},
pso::PipelineStage,
Backend as GfxBackend, Device, Graphics, Limits,
};
use image::{Rgba, RgbaImage};
use nalgebra_glm::Mat3;
use serde::Deserialize;
use std::{mem, ops::Range};
use self::backend::Backend;
use crate::{
error::Error,
render::BufferObject,
resource::ResourceManager,
serial::{Filename, Index, Size},
};
// Calculates the total memory size of all the textures given.
// TODO: I probably don't need this.
pub fn total_texture_size(
textures: &[Index],
resource_manager: &ResourceManager,
limits: Limits,
) -> u64 {
textures
.iter()
.map(|texture| {
Texture::image_data_size(
resource_manager.textures[*texture].get_data().unwrap(),
&limits,
)
})
.sum()
}
// Just to make `serde` stop crying when deserializing `Texture`s.
fn default_range() -> Range<usize> {
0..0
}
/// Owns texture data and handles Vulkan-related constructs like
/// `Image`s and `ImageView`s.
#[derive(Debug, Deserialize)]
pub struct Texture {
pub index: Index,
/// The size in texels.
pub size: Size,
pub file: Filename,
/// When this `Texture` is bound to buffer memory, this stores the range of bytes within
/// the buffer that this `Texture` occupies.
#[serde(default = "default_range", skip)]
pub buffer_memory_range: Range<usize>,
/// The [`ImageView`] for the pipeline to use.
#[serde(skip)]
pub image_view: Option<<Backend as GfxBackend>::ImageView>,
/// The actual image data.
#[serde(skip)]
pub data: Option<RgbaImage>,
/// This `Texture` as a Vulkan object.
#[serde(skip)]
pub image: Option<<Backend as GfxBackend>::Image>,
/// A matrix precalculated based on the `Texture`'s size to scale
/// all (u, v) coordinates to be in the [0.0, 1.0] range.
#[serde(default = "glm::identity", skip)]
pub normalization_matrix: Mat3,
/// This `Texture`'s `DescriptorSet`.
/// TODO: Will probably need to rework how the pipeline handles textures.
#[serde(skip)]
pub descriptor_set: Option<<Backend as GfxBackend>::DescriptorSet>,
}
impl Texture {
/// Creates a new `Texture` and copies the texture data to buffer.
pub unsafe fn new(
index: Index,
device: &backend::Device,
limits: &Limits,
texture_data: RgbaImage,
buffer_memory: &mut <Backend as GfxBackend>::Memory,
buffer_memory_offset: u64,
) -> Result<Texture, Error> {
// Create Image.
let image = device.create_image(
gfx_hal::image::Kind::D2(texture_data.width(), texture_data.height(), 1, 1),
1,
Format::Rgba8Srgb,
gfx_hal::image::Tiling::Optimal,
Usage::TRANSFER_DST | Usage::SAMPLED,
gfx_hal::image::ViewCapabilities::empty(),
)?;
// Copy texture data to the given buffer.
let memory_requirement = device.get_image_requirements(&image);
Texture::write_image_to_buffer(
device,
buffer_memory,
buffer_memory_offset..memory_requirement.size + buffer_memory_offset,
&texture_data,
limits,
)?;
Ok(Texture {
index,
file: "".to_string(),
size: Size {
x: texture_data.width() as f32,
y: texture_data.height() as f32,
},
normalization_matrix: glm::scaling2d(&glm::vec2(
1.0 / texture_data.width() as f32,
1.0 / texture_data.height() as f32,
)),
data: Some(texture_data),
descriptor_set: None,
image: Some(image),
image_view: None,
buffer_memory_range: buffer_memory_offset as usize
..(memory_requirement.size + buffer_memory_offset) as usize,
})
}
/// Loads texture data from file and creates the `Texture`'s `Image`.
pub fn initialize(
&mut self,
device: &<Backend as GfxBackend>::Device,
color_format: Format,
) -> Result<(), Error> {
let data = image::open(&self.file)?.to_rgba();
let image = unsafe {
device.create_image(
gfx_hal::image::Kind::D2(data.width(), data.height(), 1, 1),
1,
color_format,
gfx_hal::image::Tiling::Optimal,
Usage::TRANSFER_DST | Usage::SAMPLED,
gfx_hal::image::ViewCapabilities::empty(),
)?
};
// Creates the uv normalization matrix for this texture.
self.normalization_matrix = glm::scaling2d(&glm::vec2(
1.0 / data.width() as f32,
1.0 / data.height() as f32,
));
self.image = Some(image);
self.data = Some(data);
Ok(())
}
/// Copies the `Texture` data to the given buffer memory.
pub unsafe fn buffer_data(
&mut self,
device: &<Backend as GfxBackend>::Device,
buffer_memory: &<Backend as GfxBackend>::Memory,
buffer_memory_offset: u64,
limits: &Limits,
) -> Result<(), Error> {
let memory_requirement = device.get_image_requirements(self.get_image()?);
self.buffer_memory_range = buffer_memory_offset as usize
..(memory_requirement.size + buffer_memory_offset) as usize;
Self::write_image_to_buffer(
device,
buffer_memory,
self.buffer_memory_range.start as u64..self.buffer_memory_range.end as u64,
self.get_data()?,
limits,
)?;
Ok(())
}
/// Finds the memory size needed for the given texture.
pub fn image_data_size(texture: &RgbaImage, limits: &Limits) -> u64 {
let pixel_size = mem::size_of::<Rgba<u8>>() as u32;
let row_size = pixel_size * texture.width();
// TODO: investigate the wizardry involved in the next two lines.
let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1;
let row_pitch = (row_size + row_alignment_mask) & !row_alignment_mask;
u64::from(row_pitch * texture.height())
}
/// Given the location of this Texture's image data in the image memory,
/// binds the given memory and copies the data into the Image itself, then
/// creates the ImageView.
#[allow(clippy::too_many_arguments)] // CLIPPY HUSH
pub unsafe fn copy_image_to_memory(
&mut self,
device: &<Backend as GfxBackend>::Device,
image_memory: &<Backend as GfxBackend>::Memory,
image_memory_offset: u64,
command_pool: &mut gfx_hal::CommandPool<Backend, Graphics>,
command_queue: &mut gfx_hal::CommandQueue<Backend, Graphics>,
staging_buffer: &BufferObject,
limits: &Limits,
) -> Result<(), Error> {
device.bind_image_memory(
&image_memory,
image_memory_offset,
&mut self.image.as_mut().unwrap(),
)?;
// Creating an Image is basically like drawing a regular frame except
// the data gets rendered to memory instead of the screen, so we go
// through the whole process of creating a command buffer, adding commands,
// and submitting.
let mut command_buffer = command_pool.acquire_command_buffer::<OneShot>();
command_buffer.begin();
// Set the Image to write mode.
Texture::reformat_image(
&mut command_buffer,
(Access::empty(), Layout::Undefined),
(Access::TRANSFER_WRITE, Layout::TransferDstOptimal),
self.get_image()?,
PipelineStage::TOP_OF_PIPE,
PipelineStage::TRANSFER,
);
// Figure out the size of the texture data.
let pixel_size = mem::size_of::<Rgba<u8>>() as u32;
let row_size = pixel_size * self.size.x as u32;
let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1;
let row_pitch = (row_size + row_alignment_mask) & !row_alignment_mask;
// Copy the data.
command_buffer.copy_buffer_to_image(
&staging_buffer.buffer,
self.get_image()?,
Layout::TransferDstOptimal,
&[BufferImageCopy {
buffer_offset: (self.buffer_memory_range.start - staging_buffer.offset) as u64,
buffer_width: (row_pitch / pixel_size) as u32,
buffer_height: self.size.y as u32,
image_layers: SubresourceLayers {
aspects: Aspects::COLOR,
level: 0,
layers: 0..1,
},
image_offset: Offset { x: 0, y: 0, z: 0 },
image_extent: Extent {
width: self.size.x as u32,
height: self.size.y as u32,
depth: 1,
},
}],
);
// Set Image to read mode.
Texture::reformat_image(
&mut command_buffer,
(Access::TRANSFER_WRITE, Layout::TransferDstOptimal),
(Access::SHADER_READ, Layout::ShaderReadOnlyOptimal),
self.get_image()?,
PipelineStage::TRANSFER,
PipelineStage::FRAGMENT_SHADER,
);
// Synchronize and then perform the rendering.
command_buffer.finish();
let upload_fence = device.create_fence(false)?;
command_queue.submit_nosemaphores(Some(&command_buffer), Some(&upload_fence));
device.wait_for_fence(&upload_fence, core::u64::MAX)?;
device.destroy_fence(upload_fence);
command_pool.free(Some(command_buffer));
// Create the ImageView.
self.image_view = Some(device.create_image_view(
self.get_image()?,
gfx_hal::image::ViewKind::D2,
// Changing this to match the renderer's surface_color_format does funky things
// TODO: Investigate why this happens
Format::Rgba8Srgb,
gfx_hal::format::Swizzle::NO,
SubresourceRange {
aspects: Aspects::COLOR,
levels: 0..1,
layers: 0..1,
},
)?);
Ok(())
}
// Extracted from copy_image_to_memory to clean it up a bit.
/// Switches an Image to the given state/format, handling the synchronization
/// involved.
fn reformat_image(
command_buffer: &mut CommandBuffer<Backend, Graphics>,
source_format: (Access, Layout),
target_format: (Access, Layout),
resource: &<Backend as GfxBackend>::Image,
source_pipeline_stage: PipelineStage,
target_pipeline_stage: PipelineStage,
) {
let image_barrier = Barrier::Image {
states: source_format..target_format,
target: resource,
families: None,
range: SubresourceRange {
aspects: Aspects::COLOR,
levels: 0..1,
layers: 0..1,
},
};
unsafe {
command_buffer.pipeline_barrier(
source_pipeline_stage..target_pipeline_stage,
Dependencies::empty(),
&[image_barrier],
)
};
}
/// Copies an `RgbaImage` containing texture data to the specified buffer.
unsafe fn write_image_to_buffer(
device: &backend::Device,
buffer_memory: &<Backend as GfxBackend>::Memory,
data_range: Range<u64>,
image: &RgbaImage,
limits: &Limits,
) -> Result<(), Error> {
let pixel_size = mem::size_of::<Rgba<u8>>() as u32;
assert_eq!(pixel_size, 32 / 8);
// Calculate image size. | let row_pitch = (row_size + row_alignment_mask) & !row_alignment_mask; // what wizardry is this
let mut writer = device.acquire_mapping_writer::<u8>(buffer_memory, data_range)?;
// Write the data row by row.
for row in 0..image.height() {
let image_offset = (row * row_size) as usize;
let data = &(**image)[image_offset..(row_size as usize + image_offset)];
let completed_row_size = (row * row_pitch) as usize;
writer[completed_row_size..(data.len() + completed_row_size)].copy_from_slice(data);
}
device.release_mapping_writer(writer)?;
Ok(())
}
/// A method for getting the `image` field because `unwrap()` unhelpfully moves
/// instead of borrowing.
pub fn get_image(&self) -> Result<&<Backend as GfxBackend>::Image, Error> {
match &self.image {
Some(image) => Ok(image),
None => Err(Error::None()),
}
}
/// A method for getting the `image_view` field because `unwrap()` unhelpfully moves
/// instead of borrowing.
pub fn get_image_view(&self) -> Result<&<Backend as GfxBackend>::ImageView, Error> {
match &self.image_view {
Some(image_view) => Ok(image_view),
None => Err(Error::None()),
}
}
/// A method for getting the `data` field because `unwrap()` unhelpfully moves instead
/// of borrowing.
pub fn get_data(&self) -> Result<&RgbaImage, Error> {
match &self.data {
Some(data) => Ok(data),
None => Err(Error::None()),
}
}
/// Releases resources held by this object.
pub unsafe fn destroy(self, device: &backend::Device) {
device.destroy_image(self.image.unwrap());
device.destroy_image_view(self.image_view.unwrap());
}
} | // TODO: Not sure why I have a function to do this but then write it out twice.
let row_size = pixel_size * image.width();
let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1; | random_line_split |
texture.rs | // texture.rs
// Creation and handling of images and textures.
// (c) 2019, Ryan McGowan <ryan@internally-combusted.net>
//! Loading and management of textures.
use gfx_backend_metal as backend;
use nalgebra_glm as glm;
use gfx_hal::{
command::{BufferImageCopy, CommandBuffer, OneShot},
format::{Aspects, Format},
image::{Access, Extent, Layout, Offset, SubresourceLayers, SubresourceRange, Usage},
memory::{Barrier, Dependencies},
pso::PipelineStage,
Backend as GfxBackend, Device, Graphics, Limits,
};
use image::{Rgba, RgbaImage};
use nalgebra_glm::Mat3;
use serde::Deserialize;
use std::{mem, ops::Range};
use self::backend::Backend;
use crate::{
error::Error,
render::BufferObject,
resource::ResourceManager,
serial::{Filename, Index, Size},
};
// Calculates the total memory size of all the textures given.
// TODO: I probably don't need this.
pub fn total_texture_size(
textures: &[Index],
resource_manager: &ResourceManager,
limits: Limits,
) -> u64 {
textures
.iter()
.map(|texture| {
Texture::image_data_size(
resource_manager.textures[*texture].get_data().unwrap(),
&limits,
)
})
.sum()
}
// Just to make `serde` stop crying when deserializing `Texture`s.
fn default_range() -> Range<usize> {
0..0
}
/// Owns texture data and handles Vulkan-related constructs like
/// `Image`s and `ImageView`s.
#[derive(Debug, Deserialize)]
pub struct Texture {
pub index: Index,
/// The size in texels.
pub size: Size,
pub file: Filename,
/// When this `Texture` is bound to buffer memory, this stores the range of bytes within
/// the buffer that this `Texture` occupies.
#[serde(default = "default_range", skip)]
pub buffer_memory_range: Range<usize>,
/// The [`ImageView`] for the pipeline to use.
#[serde(skip)]
pub image_view: Option<<Backend as GfxBackend>::ImageView>,
/// The actual image data.
#[serde(skip)]
pub data: Option<RgbaImage>,
/// This `Texture` as a Vulkan object.
#[serde(skip)]
pub image: Option<<Backend as GfxBackend>::Image>,
/// A matrix precalculated based on the `Texture`'s size to scale
/// all (u, v) coordinates to be in the [0.0, 1.0] range.
#[serde(default = "glm::identity", skip)]
pub normalization_matrix: Mat3,
/// This `Texture`'s `DescriptorSet`.
/// TODO: Will probably need to rework how the pipeline handles textures.
#[serde(skip)]
pub descriptor_set: Option<<Backend as GfxBackend>::DescriptorSet>,
}
impl Texture {
/// Creates a new `Texture` and copies the texture data to buffer.
pub unsafe fn new(
index: Index,
device: &backend::Device,
limits: &Limits,
texture_data: RgbaImage,
buffer_memory: &mut <Backend as GfxBackend>::Memory,
buffer_memory_offset: u64,
) -> Result<Texture, Error> {
// Create Image.
let image = device.create_image(
gfx_hal::image::Kind::D2(texture_data.width(), texture_data.height(), 1, 1),
1,
Format::Rgba8Srgb,
gfx_hal::image::Tiling::Optimal,
Usage::TRANSFER_DST | Usage::SAMPLED,
gfx_hal::image::ViewCapabilities::empty(),
)?;
// Copy texture data to the given buffer.
let memory_requirement = device.get_image_requirements(&image);
Texture::write_image_to_buffer(
device,
buffer_memory,
buffer_memory_offset..memory_requirement.size + buffer_memory_offset,
&texture_data,
limits,
)?;
Ok(Texture {
index,
file: "".to_string(),
size: Size {
x: texture_data.width() as f32,
y: texture_data.height() as f32,
},
normalization_matrix: glm::scaling2d(&glm::vec2(
1.0 / texture_data.width() as f32,
1.0 / texture_data.height() as f32,
)),
data: Some(texture_data),
descriptor_set: None,
image: Some(image),
image_view: None,
buffer_memory_range: buffer_memory_offset as usize
..(memory_requirement.size + buffer_memory_offset) as usize,
})
}
/// Loads texture data from file and creates the `Texture`'s `Image`.
pub fn initialize(
&mut self,
device: &<Backend as GfxBackend>::Device,
color_format: Format,
) -> Result<(), Error> {
let data = image::open(&self.file)?.to_rgba();
let image = unsafe {
device.create_image(
gfx_hal::image::Kind::D2(data.width(), data.height(), 1, 1),
1,
color_format,
gfx_hal::image::Tiling::Optimal,
Usage::TRANSFER_DST | Usage::SAMPLED,
gfx_hal::image::ViewCapabilities::empty(),
)?
};
// Creates the uv normalization matrix for this texture.
self.normalization_matrix = glm::scaling2d(&glm::vec2(
1.0 / data.width() as f32,
1.0 / data.height() as f32,
));
self.image = Some(image);
self.data = Some(data);
Ok(())
}
/// Copies the `Texture` data to the given buffer memory.
pub unsafe fn buffer_data(
&mut self,
device: &<Backend as GfxBackend>::Device,
buffer_memory: &<Backend as GfxBackend>::Memory,
buffer_memory_offset: u64,
limits: &Limits,
) -> Result<(), Error> {
let memory_requirement = device.get_image_requirements(self.get_image()?);
self.buffer_memory_range = buffer_memory_offset as usize
..(memory_requirement.size + buffer_memory_offset) as usize;
Self::write_image_to_buffer(
device,
buffer_memory,
self.buffer_memory_range.start as u64..self.buffer_memory_range.end as u64,
self.get_data()?,
limits,
)?;
Ok(())
}
/// Finds the memory size needed for the given texture.
pub fn image_data_size(texture: &RgbaImage, limits: &Limits) -> u64 {
let pixel_size = mem::size_of::<Rgba<u8>>() as u32;
let row_size = pixel_size * texture.width();
// TODO: investigate the wizardry involved in the next two lines.
let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1;
let row_pitch = (row_size + row_alignment_mask) & !row_alignment_mask;
u64::from(row_pitch * texture.height())
}
/// Given the location of this Texture's image data in the image memory,
/// binds the given memory and copies the data into the Image itself, then
/// creates the ImageView.
#[allow(clippy::too_many_arguments)] // CLIPPY HUSH
pub unsafe fn | (
&mut self,
device: &<Backend as GfxBackend>::Device,
image_memory: &<Backend as GfxBackend>::Memory,
image_memory_offset: u64,
command_pool: &mut gfx_hal::CommandPool<Backend, Graphics>,
command_queue: &mut gfx_hal::CommandQueue<Backend, Graphics>,
staging_buffer: &BufferObject,
limits: &Limits,
) -> Result<(), Error> {
device.bind_image_memory(
&image_memory,
image_memory_offset,
&mut self.image.as_mut().unwrap(),
)?;
// Creating an Image is basically like drawing a regular frame except
// the data gets rendered to memory instead of the screen, so we go
// through the whole process of creating a command buffer, adding commands,
// and submitting.
let mut command_buffer = command_pool.acquire_command_buffer::<OneShot>();
command_buffer.begin();
// Set the Image to write mode.
Texture::reformat_image(
&mut command_buffer,
(Access::empty(), Layout::Undefined),
(Access::TRANSFER_WRITE, Layout::TransferDstOptimal),
self.get_image()?,
PipelineStage::TOP_OF_PIPE,
PipelineStage::TRANSFER,
);
// Figure out the size of the texture data.
let pixel_size = mem::size_of::<Rgba<u8>>() as u32;
let row_size = pixel_size * self.size.x as u32;
let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1;
let row_pitch = (row_size + row_alignment_mask) & !row_alignment_mask;
// Copy the data.
command_buffer.copy_buffer_to_image(
&staging_buffer.buffer,
self.get_image()?,
Layout::TransferDstOptimal,
&[BufferImageCopy {
buffer_offset: (self.buffer_memory_range.start - staging_buffer.offset) as u64,
buffer_width: (row_pitch / pixel_size) as u32,
buffer_height: self.size.y as u32,
image_layers: SubresourceLayers {
aspects: Aspects::COLOR,
level: 0,
layers: 0..1,
},
image_offset: Offset { x: 0, y: 0, z: 0 },
image_extent: Extent {
width: self.size.x as u32,
height: self.size.y as u32,
depth: 1,
},
}],
);
// Set Image to read mode.
Texture::reformat_image(
&mut command_buffer,
(Access::TRANSFER_WRITE, Layout::TransferDstOptimal),
(Access::SHADER_READ, Layout::ShaderReadOnlyOptimal),
self.get_image()?,
PipelineStage::TRANSFER,
PipelineStage::FRAGMENT_SHADER,
);
// Synchronize and then perform the rendering.
command_buffer.finish();
let upload_fence = device.create_fence(false)?;
command_queue.submit_nosemaphores(Some(&command_buffer), Some(&upload_fence));
device.wait_for_fence(&upload_fence, core::u64::MAX)?;
device.destroy_fence(upload_fence);
command_pool.free(Some(command_buffer));
// Create the ImageView.
self.image_view = Some(device.create_image_view(
self.get_image()?,
gfx_hal::image::ViewKind::D2,
// Changing this to match the renderer's surface_color_format does funky things
// TODO: Investigate why this happens
Format::Rgba8Srgb,
gfx_hal::format::Swizzle::NO,
SubresourceRange {
aspects: Aspects::COLOR,
levels: 0..1,
layers: 0..1,
},
)?);
Ok(())
}
// Extracted from copy_image_to_memory to clean it up a bit.
/// Switches an Image to the given state/format, handling the synchronization
/// involved.
fn reformat_image(
command_buffer: &mut CommandBuffer<Backend, Graphics>,
source_format: (Access, Layout),
target_format: (Access, Layout),
resource: &<Backend as GfxBackend>::Image,
source_pipeline_stage: PipelineStage,
target_pipeline_stage: PipelineStage,
) {
let image_barrier = Barrier::Image {
states: source_format..target_format,
target: resource,
families: None,
range: SubresourceRange {
aspects: Aspects::COLOR,
levels: 0..1,
layers: 0..1,
},
};
unsafe {
command_buffer.pipeline_barrier(
source_pipeline_stage..target_pipeline_stage,
Dependencies::empty(),
&[image_barrier],
)
};
}
/// Copies an `RgbaImage` containing texture data to the specified buffer.
unsafe fn write_image_to_buffer(
device: &backend::Device,
buffer_memory: &<Backend as GfxBackend>::Memory,
data_range: Range<u64>,
image: &RgbaImage,
limits: &Limits,
) -> Result<(), Error> {
let pixel_size = mem::size_of::<Rgba<u8>>() as u32;
assert_eq!(pixel_size, 32 / 8);
// Calculate image size.
// TODO: Not sure why I have a function to do this but then write it out twice.
let row_size = pixel_size * image.width();
let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1;
let row_pitch = (row_size + row_alignment_mask) & !row_alignment_mask; // what wizardry is this
let mut writer = device.acquire_mapping_writer::<u8>(buffer_memory, data_range)?;
// Write the data row by row.
for row in 0..image.height() {
let image_offset = (row * row_size) as usize;
let data = &(**image)[image_offset..(row_size as usize + image_offset)];
let completed_row_size = (row * row_pitch) as usize;
writer[completed_row_size..(data.len() + completed_row_size)].copy_from_slice(data);
}
device.release_mapping_writer(writer)?;
Ok(())
}
/// A method for getting the `image` field because `unwrap()` unhelpfully moves
/// instead of borrowing.
pub fn get_image(&self) -> Result<&<Backend as GfxBackend>::Image, Error> {
match &self.image {
Some(image) => Ok(image),
None => Err(Error::None()),
}
}
/// A method for getting the `image_view` field because `unwrap()` unhelpfully moves
/// instead of borrowing.
pub fn get_image_view(&self) -> Result<&<Backend as GfxBackend>::ImageView, Error> {
match &self.image_view {
Some(image_view) => Ok(image_view),
None => Err(Error::None()),
}
}
/// A method for getting the `data` field because `unwrap()` unhelpfully moves instead
/// of borrowing.
pub fn get_data(&self) -> Result<&RgbaImage, Error> {
match &self.data {
Some(data) => Ok(data),
None => Err(Error::None()),
}
}
/// Releases resources held by this object.
pub unsafe fn destroy(self, device: &backend::Device) {
device.destroy_image(self.image.unwrap());
device.destroy_image_view(self.image_view.unwrap());
}
}
| copy_image_to_memory | identifier_name |
texture.rs | // texture.rs
// Creation and handling of images and textures.
// (c) 2019, Ryan McGowan <ryan@internally-combusted.net>
//! Loading and management of textures.
use gfx_backend_metal as backend;
use nalgebra_glm as glm;
use gfx_hal::{
command::{BufferImageCopy, CommandBuffer, OneShot},
format::{Aspects, Format},
image::{Access, Extent, Layout, Offset, SubresourceLayers, SubresourceRange, Usage},
memory::{Barrier, Dependencies},
pso::PipelineStage,
Backend as GfxBackend, Device, Graphics, Limits,
};
use image::{Rgba, RgbaImage};
use nalgebra_glm::Mat3;
use serde::Deserialize;
use std::{mem, ops::Range};
use self::backend::Backend;
use crate::{
error::Error,
render::BufferObject,
resource::ResourceManager,
serial::{Filename, Index, Size},
};
// Calculates the total memory size of all the textures given.
// TODO: I probably don't need this.
pub fn total_texture_size(
textures: &[Index],
resource_manager: &ResourceManager,
limits: Limits,
) -> u64 {
textures
.iter()
.map(|texture| {
Texture::image_data_size(
resource_manager.textures[*texture].get_data().unwrap(),
&limits,
)
})
.sum()
}
// Just to make `serde` stop crying when deserializing `Texture`s.
fn default_range() -> Range<usize> {
0..0
}
/// Owns texture data and handles Vulkan-related constructs like
/// `Image`s and `ImageView`s.
#[derive(Debug, Deserialize)]
pub struct Texture {
pub index: Index,
/// The size in texels.
pub size: Size,
pub file: Filename,
/// When this `Texture` is bound to buffer memory, this stores the range of bytes within
/// the buffer that this `Texture` occupies.
#[serde(default = "default_range", skip)]
pub buffer_memory_range: Range<usize>,
/// The [`ImageView`] for the pipeline to use.
#[serde(skip)]
pub image_view: Option<<Backend as GfxBackend>::ImageView>,
/// The actual image data.
#[serde(skip)]
pub data: Option<RgbaImage>,
/// This `Texture` as a Vulkan object.
#[serde(skip)]
pub image: Option<<Backend as GfxBackend>::Image>,
/// A matrix precalculated based on the `Texture`'s size to scale
/// all (u, v) coordinates to be in the [0.0, 1.0] range.
#[serde(default = "glm::identity", skip)]
pub normalization_matrix: Mat3,
/// This `Texture`'s `DescriptorSet`.
/// TODO: Will probably need to rework how the pipeline handles textures.
#[serde(skip)]
pub descriptor_set: Option<<Backend as GfxBackend>::DescriptorSet>,
}
impl Texture {
/// Creates a new `Texture` and copies the texture data to buffer.
pub unsafe fn new(
index: Index,
device: &backend::Device,
limits: &Limits,
texture_data: RgbaImage,
buffer_memory: &mut <Backend as GfxBackend>::Memory,
buffer_memory_offset: u64,
) -> Result<Texture, Error> {
// Create Image.
let image = device.create_image(
gfx_hal::image::Kind::D2(texture_data.width(), texture_data.height(), 1, 1),
1,
Format::Rgba8Srgb,
gfx_hal::image::Tiling::Optimal,
Usage::TRANSFER_DST | Usage::SAMPLED,
gfx_hal::image::ViewCapabilities::empty(),
)?;
// Copy texture data to the given buffer.
let memory_requirement = device.get_image_requirements(&image);
Texture::write_image_to_buffer(
device,
buffer_memory,
buffer_memory_offset..memory_requirement.size + buffer_memory_offset,
&texture_data,
limits,
)?;
Ok(Texture {
index,
file: "".to_string(),
size: Size {
x: texture_data.width() as f32,
y: texture_data.height() as f32,
},
normalization_matrix: glm::scaling2d(&glm::vec2(
1.0 / texture_data.width() as f32,
1.0 / texture_data.height() as f32,
)),
data: Some(texture_data),
descriptor_set: None,
image: Some(image),
image_view: None,
buffer_memory_range: buffer_memory_offset as usize
..(memory_requirement.size + buffer_memory_offset) as usize,
})
}
/// Loads texture data from file and creates the `Texture`'s `Image`.
pub fn initialize(
&mut self,
device: &<Backend as GfxBackend>::Device,
color_format: Format,
) -> Result<(), Error> {
let data = image::open(&self.file)?.to_rgba();
let image = unsafe {
device.create_image(
gfx_hal::image::Kind::D2(data.width(), data.height(), 1, 1),
1,
color_format,
gfx_hal::image::Tiling::Optimal,
Usage::TRANSFER_DST | Usage::SAMPLED,
gfx_hal::image::ViewCapabilities::empty(),
)?
};
// Creates the uv normalization matrix for this texture.
self.normalization_matrix = glm::scaling2d(&glm::vec2(
1.0 / data.width() as f32,
1.0 / data.height() as f32,
));
self.image = Some(image);
self.data = Some(data);
Ok(())
}
/// Copies the `Texture` data to the given buffer memory.
pub unsafe fn buffer_data(
&mut self,
device: &<Backend as GfxBackend>::Device,
buffer_memory: &<Backend as GfxBackend>::Memory,
buffer_memory_offset: u64,
limits: &Limits,
) -> Result<(), Error> {
let memory_requirement = device.get_image_requirements(self.get_image()?);
self.buffer_memory_range = buffer_memory_offset as usize
..(memory_requirement.size + buffer_memory_offset) as usize;
Self::write_image_to_buffer(
device,
buffer_memory,
self.buffer_memory_range.start as u64..self.buffer_memory_range.end as u64,
self.get_data()?,
limits,
)?;
Ok(())
}
/// Finds the memory size needed for the given texture.
pub fn image_data_size(texture: &RgbaImage, limits: &Limits) -> u64 {
let pixel_size = mem::size_of::<Rgba<u8>>() as u32;
let row_size = pixel_size * texture.width();
// TODO: investigate the wizardry involved in the next two lines.
let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1;
let row_pitch = (row_size + row_alignment_mask) & !row_alignment_mask;
u64::from(row_pitch * texture.height())
}
/// Given the location of this Texture's image data in the image memory,
/// binds the given memory and copies the data into the Image itself, then
/// creates the ImageView.
#[allow(clippy::too_many_arguments)] // CLIPPY HUSH
pub unsafe fn copy_image_to_memory(
&mut self,
device: &<Backend as GfxBackend>::Device,
image_memory: &<Backend as GfxBackend>::Memory,
image_memory_offset: u64,
command_pool: &mut gfx_hal::CommandPool<Backend, Graphics>,
command_queue: &mut gfx_hal::CommandQueue<Backend, Graphics>,
staging_buffer: &BufferObject,
limits: &Limits,
) -> Result<(), Error> |
// Extracted from copy_image_to_memory to clean it up a bit.
/// Switches an Image to the given state/format, handling the synchronization
/// involved.
fn reformat_image(
command_buffer: &mut CommandBuffer<Backend, Graphics>,
source_format: (Access, Layout),
target_format: (Access, Layout),
resource: &<Backend as GfxBackend>::Image,
source_pipeline_stage: PipelineStage,
target_pipeline_stage: PipelineStage,
) {
let image_barrier = Barrier::Image {
states: source_format..target_format,
target: resource,
families: None,
range: SubresourceRange {
aspects: Aspects::COLOR,
levels: 0..1,
layers: 0..1,
},
};
unsafe {
command_buffer.pipeline_barrier(
source_pipeline_stage..target_pipeline_stage,
Dependencies::empty(),
&[image_barrier],
)
};
}
/// Copies an `RgbaImage` containing texture data to the specified buffer.
unsafe fn write_image_to_buffer(
device: &backend::Device,
buffer_memory: &<Backend as GfxBackend>::Memory,
data_range: Range<u64>,
image: &RgbaImage,
limits: &Limits,
) -> Result<(), Error> {
let pixel_size = mem::size_of::<Rgba<u8>>() as u32;
assert_eq!(pixel_size, 32 / 8);
// Calculate image size.
// TODO: Not sure why I have a function to do this but then write it out twice.
let row_size = pixel_size * image.width();
let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1;
let row_pitch = (row_size + row_alignment_mask) & !row_alignment_mask; // what wizardry is this
let mut writer = device.acquire_mapping_writer::<u8>(buffer_memory, data_range)?;
// Write the data row by row.
for row in 0..image.height() {
let image_offset = (row * row_size) as usize;
let data = &(**image)[image_offset..(row_size as usize + image_offset)];
let completed_row_size = (row * row_pitch) as usize;
writer[completed_row_size..(data.len() + completed_row_size)].copy_from_slice(data);
}
device.release_mapping_writer(writer)?;
Ok(())
}
/// A method for getting the `image` field because `unwrap()` unhelpfully moves
/// instead of borrowing.
pub fn get_image(&self) -> Result<&<Backend as GfxBackend>::Image, Error> {
match &self.image {
Some(image) => Ok(image),
None => Err(Error::None()),
}
}
/// A method for getting the `image_view` field because `unwrap()` unhelpfully moves
/// instead of borrowing.
pub fn get_image_view(&self) -> Result<&<Backend as GfxBackend>::ImageView, Error> {
match &self.image_view {
Some(image_view) => Ok(image_view),
None => Err(Error::None()),
}
}
/// A method for getting the `data` field because `unwrap()` unhelpfully moves instead
/// of borrowing.
pub fn get_data(&self) -> Result<&RgbaImage, Error> {
match &self.data {
Some(data) => Ok(data),
None => Err(Error::None()),
}
}
/// Releases resources held by this object.
pub unsafe fn destroy(self, device: &backend::Device) {
device.destroy_image(self.image.unwrap());
device.destroy_image_view(self.image_view.unwrap());
}
}
| {
device.bind_image_memory(
&image_memory,
image_memory_offset,
&mut self.image.as_mut().unwrap(),
)?;
// Creating an Image is basically like drawing a regular frame except
// the data gets rendered to memory instead of the screen, so we go
// through the whole process of creating a command buffer, adding commands,
// and submitting.
let mut command_buffer = command_pool.acquire_command_buffer::<OneShot>();
command_buffer.begin();
// Set the Image to write mode.
Texture::reformat_image(
&mut command_buffer,
(Access::empty(), Layout::Undefined),
(Access::TRANSFER_WRITE, Layout::TransferDstOptimal),
self.get_image()?,
PipelineStage::TOP_OF_PIPE,
PipelineStage::TRANSFER,
);
// Figure out the size of the texture data.
let pixel_size = mem::size_of::<Rgba<u8>>() as u32;
let row_size = pixel_size * self.size.x as u32;
let row_alignment_mask = limits.min_buffer_copy_pitch_alignment as u32 - 1;
let row_pitch = (row_size + row_alignment_mask) & !row_alignment_mask;
// Copy the data.
command_buffer.copy_buffer_to_image(
&staging_buffer.buffer,
self.get_image()?,
Layout::TransferDstOptimal,
&[BufferImageCopy {
buffer_offset: (self.buffer_memory_range.start - staging_buffer.offset) as u64,
buffer_width: (row_pitch / pixel_size) as u32,
buffer_height: self.size.y as u32,
image_layers: SubresourceLayers {
aspects: Aspects::COLOR,
level: 0,
layers: 0..1,
},
image_offset: Offset { x: 0, y: 0, z: 0 },
image_extent: Extent {
width: self.size.x as u32,
height: self.size.y as u32,
depth: 1,
},
}],
);
// Set Image to read mode.
Texture::reformat_image(
&mut command_buffer,
(Access::TRANSFER_WRITE, Layout::TransferDstOptimal),
(Access::SHADER_READ, Layout::ShaderReadOnlyOptimal),
self.get_image()?,
PipelineStage::TRANSFER,
PipelineStage::FRAGMENT_SHADER,
);
// Synchronize and then perform the rendering.
command_buffer.finish();
let upload_fence = device.create_fence(false)?;
command_queue.submit_nosemaphores(Some(&command_buffer), Some(&upload_fence));
device.wait_for_fence(&upload_fence, core::u64::MAX)?;
device.destroy_fence(upload_fence);
command_pool.free(Some(command_buffer));
// Create the ImageView.
self.image_view = Some(device.create_image_view(
self.get_image()?,
gfx_hal::image::ViewKind::D2,
// Changing this to match the renderer's surface_color_format does funky things
// TODO: Investigate why this happens
Format::Rgba8Srgb,
gfx_hal::format::Swizzle::NO,
SubresourceRange {
aspects: Aspects::COLOR,
levels: 0..1,
layers: 0..1,
},
)?);
Ok(())
} | identifier_body |
write.py | """
Write tables to text.
"""
import numpy as np
from collections import OrderedDict
### String and table formatting #################################
def pad_delimiters(string1, number, delimiter=',', slen=None, ):
"""
Pad string1 to the requested number of delimiters.
Parameters
----------
string1 : str
String to be assessed
number : int
Number of total delimiters in final output string
delimiter : str
String type of delimiter to use
"""
current_num = string1.count(delimiter)
num_to_add = number - current_num
curlen = len(string1)
if num_to_add > 0:
if slen is not None:
len_to_add = slen - curlen
spacing = int(len_to_add / num_to_add)
xtra = len_to_add % num_to_add
s1 = ("%" + str(spacing) + "s") % delimiter
s1 = s1 * num_to_add + " " * xtra
string1 += s1
else:
string1 += delimiter * num_to_add
return string1
def format_narray(narray, fmt = '%16s', delimiter = ","):
"""
format a 2D numpy array to a list of strings
"""
narray = np.array(narray)
len1 = len(narray.shape)
if len1 == 1:
return [format_list(narray, fmt, delimiter)]
elif len1 == 2:
strlist = []
rownum = narray.shape[0]
for i in range(rownum):
stri = format_list(narray[i, :], fmt, delimiter)
strlist.append(stri)
return strlist
def format_list(list1, fmt = '%16s', delimiter = ","):
"""
format list of numbers to string.
delimiter defaults = ','
"""
string1 = delimiter.join(fmt % h for h in list1) + '\n'
return string1
def format_list2(list1, sfmt = '%16s', nfmt = '%16.8e', delimiter = ','):
"""
format list of numbers or strings to a delimited string.
Parameters
----------
list1 : list of numbers and strings
List to convert to string
sfmt : str
string formatter, ie "%16s"
nfmt : str
number format, ie '%16.8e'
delimiter : str
list delimiter
"""
outlist = []
for h in list1:
try:
outlist.append(nfmt % h)
except TypeError:
outlist.append(sfmt % h)
string1 = delimiter.join(outlist) + '\n'
return string1
def wrap_list(list1, fmt = '%16s', delimiter = ",", maxcols = 8):
"""
format a list and wrap by max number of columns
"""
len1 = len(list1)
string = ""
for i in range(0, len1, maxcols):
li = list1[i : i + maxcols]
stri = format_list(li, fmt = fmt, delimiter = delimiter)
string += stri
return string
def empty_table_str(rows, columns, fmt='%16s', delimiter=','):
"""Construct empty string list for entry numbers of colnum"""
a = [[''] * columns] * rows
return format_narray(a, fmt=fmt, delimiter=delimiter)
#def hstack_arrays0(arrays, sfmt='%16s', nfmt='%16.8e',delimiter=','):
# """
# Horizontally stack arrays of varying row number and convert to strings.
#
# """
# maxrows = max(len(table) for table in arrays)
#
# for table in arrays:
# table = np.array(table)
# if np.ndim(table) <= 1:
# table = np.reshape(table, (-1, 1))
#
#
# rows, cols = table.shape
#
# try:
# s1 = format_narray(table, nfmt, delimiter)
# except TypeError:
# s1 = format_narray(table, sfmt, delimiter)
#
# if rows < maxrows:
# s2 = empty_table_str(rows, cols, fmt=sfmt, delimiter=delimiter)
# s1.extend(s2)
#
def hstack_arrays(arrays, sfmt = '%16s', nfmt = '%16.8e',delimiter = ','):
"""
Horizontally stack arrays where:
Parameters
----------
arrays : list/iterable
Array-like objects
sfmt : str
String format (ex, '%16s')
nfmt : str
number format (ex, '%16.8e')
delimiter : str
Delimiter for columns
Returns
-------
slist : list of string lines
"""
s_tables = []
for table in arrays:
table = np.asarray(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (-1, 1))
try:
s_table = format_narray(table, nfmt, delimiter)
except TypeError:
s_table = format_narray(table, sfmt, delimiter)
s_tables.append(s_table)
slist = hstack_str_list(s_tables, delimiter)
return slist
#def hstack_strings(str_lists):
# """
# Horizontally stack lists of strings
#
# arguments:
# ------
# str_lists : list of list of strings
# str_lists = [str_list1, str_list2, str_list3, ...]
#
# str_listi : list of strings
# List similar to output from file.readlines().
#
#
# """
#
# #First get maximum row count
# maxrows = max(len(slist) for slist in str_lists)
# listnum = len(str_lists)
# max_col_lengths = []
#
#
# #Obtain maximum line length
# for sl in str_lists:
# max_clen = max(len(line) for line in sl)
# max_col_lengths.append(max_clen)
#
#
# #Pad the bottoms of str lists so all lists are equal in row number
# for i, slist in enumerate(str_lists):
# rows = len(slist)
# max_clen = max_col_lengths[i]
#
# if rows < maxrows:
# newline = max_clen * ' '
# padding = [newline] * (maxrows - rows)
# slist.extend(padding)
#
#
#
#
#
#
# return
#
def hstack_str_list(str_lists, delimiter = ',', pad=False):
"""
Horizontally stack lists of string in the format of:
str_lists = [strlist1, strlist2, strlist3, ...]
strlisti = list of strings similar to output to file.readlines()
Prepare the strings for write to file, add new lines.
Parameters
----------
str_lists: list of list of str
Multiple lists of strings, each list to be horizontally stacked.
delimiter : str
Delimiter
Returns
-------
out : list of str
"""
listnum = len(str_lists)
max_col_lengths = []
max_col_nums = []
# Strip trailing delimiter from every line in string lists.
for j, sl in enumerate(str_lists[:]):
for i in range(len(sl)):
str_lists[j][i].rstrip(delimiter)
# Obtain max line length and max number of columns defined by delimiter.
for sl in str_lists:
max_clen = max(len(line) for line in sl)
max_col_lengths.append(max_clen)
max_columns = max(line.count(delimiter) for line in sl) + 1
max_col_nums.append(max_columns)
max_row_length = max(len(sl) for sl in str_lists)
lines = []
for i in range(max_row_length):
line = ""
#Loop through each table to hstack
for j in range(listnum):
if j == listnum - 1:
dnum = max_col_nums[j] - 1
else:
dnum = max_col_nums[j]
line_len = max_col_lengths[j]
try:
linej = str_lists[j][i]
linej = linej.rstrip("\n") #Get rid of newlines
linej = pad_delimiters(linej, dnum, delimiter) #Pad out string with delimiters
if pad:
linej = linej.ljust(line_len) #Pad out the string with spaces
#Not every row in tables will have data. Pad these ones out
except IndexError:
linej = pad_delimiters("", dnum, delimiter, slen=line_len) #Pad out string with delimiters
if pad:
linej = linej.ljust(line_len) #Pad out the string with spaces
line += linej
lines.append(line + '\n')
return lines
def get_header_names(fname, row_num, delimiter = None):
"""
Return list of column header names as numpy array
"""
names = []
with open(fname) as f:
for i in range(row_num):
line = f.readline()
list_ = line.split(delimiter)
names.append(list_)
return np.array(names, dtype = str)
class StringTable(object):
"""
Standardardized string table for file output.
Parameters
----------
sfmt : str
formatting specifier for string formatting, ex. '%16s'.
nfmt : str
formatting specifier for numeric formatting, ex. '%16.8e'.
ifmt : str
formatting specifier for integer formatting, ex. '%16d'.
delimiter : str
specification for delimiter. Default is ','.
Usage
-----
Add data using self.add
- data = [A x B] array of A rows of data and B columns
- labels = [C x B] array-1d of labels for B columns.
- A = # of data rows
- B = # of data columns
- C = # of column label rows
Add data using "add" method.
"""
def __init__(self,
sfmt = '%s',
nfmt = '%.8e',
ifmt = '%d',
delimiter = ',',
header = None):
self.header_info = OrderedDict()
self.columnlabels = []
self.columns = []
self.formats = []
self.sfmt = sfmt
self.nfmt = nfmt
self.ifmt = ifmt
self.delimiter = delimiter
self.header = header
return
def add(self, labels, data, fmt = None):
"""
Add data to table.
Parameters
----------
data : [A, B] array
A rows of data and B columns
labels : [C, B] array
Labels for B columns.
fmt : str
Formatting string. If None, use default number format self.nfmt
* A = # of data rows
* B = # of data columns
* C = # of column label rows
"""
data = np.asarray(data)
self.columns.append(data)
self.columnlabels.append(labels)
if fmt is None:
dtype = data.dtype.name
if 'int' in dtype:
fmt = self.ifmt
else:
fmt = self.nfmt
self.formats.append(fmt)
return
# def add_info(self, **kwargs):
# """Add information to header """
# self.header_info.update(**kwargs)
# return
#
def write(self, fname, headers=False, labels=True):
"""
Write data to file fname. Can be file, path, or filelike.
"""
strlist = self.string_lines(headers=headers, labels=labels)
if not hasattr(fname, 'writelines'):
with open(fname, 'w') as f:
f.writelines(strlist)
else:
fname.writelines(strlist)
return
def string_lines(self, headers = False, labels = True):
"""
Construct string data for table.
"""
if self.header is not None:
if self.header[-1] != '\n':
self.header += '\n'
strlist = [self.header]
else:
strlist = []
# if headers:
# s = self._build_header()
# strlist.extend(s)
if labels:
s = self._build_labels()
strlist.extend(s)
data = self._build_data()
strlist.extend(data)
return strlist
@property
def colnum(self):
"""Return number of columns"""
colnum = 0
for table in self.columnlabels:
table = np.asarray(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (1, -1))
colnum += table.shape[1]
return colnum
def _build_data(self):
s_tables = []
for table, fmt in zip(self.columns, self.formats):
table = np.array(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (-1, 1))
try:
s_table = format_narray(table, fmt, self.delimiter)
except TypeError:
s_table = format_narray(table, self.sfmt, self.delimiter)
s_tables.append(s_table)
slist = hstack_str_list(s_tables, self.delimiter)
return slist
def _build_labels(self):
s_tables = []
for table in self.columnlabels:
table = np.array(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (1, -1))
s_table = format_narray(table, self.sfmt, self.delimiter)
s_tables.append(s_table)
slist = hstack_str_list(s_tables, self.delimiter)
return slist
#
# def _build_header(self):
#
# list1 = []
# for key, val in self.header_info.items():
# fkey = key + '='
# try:
# fval = self.sfmt % val
# except TypeError:
# fval = self.nfmt % val
# list1.append(fkey)
# list1.append(fval)
#
# slist = wrap_list(list1, self.sfmt, self.delimiter, maxcols = self.colnum)
# return slist
class ReadTable(object):
"""
Read file constructed using StringTable.
All data is read as str and as numpy arrays. All data is indexed as both
ether a numpy array or as a using keys found from the labels.
Parameters
----------
f : file-like or iterable
file or iterable with text data
labelrows : list or None
List of column row numbers that have column label information.
By default, this is set to None to use only 0th row.
delimiter : str
Table delimiter
"""
def __init__(self, f, labelrows=None, delimiter=','):
self.lines = list(f)
if labelrows is None:
labelrows = [0]
self.labelrows = labelrows
self.delimiter = delimiter
self.labels = self._read_labels()
self.data = self._read_data()
return
def _read_labels(self):
"""Read columnn headers/labels"""
lines = self.lines
hlines = [lines[i] for i in self.labelrows]
header = np.genfromtxt(hlines, delimiter=',', dtype=str)
header = np.core.defchararray.strip(header)
add = np.core.defchararray.add
header1 = add(header, '\n')
newheader = header1[0]
for h in header1[1:]:
newheader = add(newheader, h)
newheader = [s.strip() for s in newheader]
#newheader = [s.replace('\n', '.') for s in newheader]
return newheader
def _read_data(self):
"""Read tabular data"""
lines = self.lines
start = max(self.labelrows) + 1
d = np.genfromtxt(lines[start:],
delimiter=self.delimiter,
dtype=str)
return d
def dict(self):
d = {}
for i, l in enumerate(self.labels):
d[l] = self.data[:, i]
return d
def find(self, key):
"""
Find keyword within labels. Return columns with keyword
| """
d = []
for k in self.labels:
if key in k:
d.append(self[k])
return np.column_stack(d)
def __getitem__(self, key):
try:
return self.dict[key]
except:
return self.data[key]
if __name__ == '__main__':
t = pad_delimiters('bob, joe, tim', 5, slen=50)
a = np.ones((10,3))
alabels = 'joe','bob','tim'
b= np.zeros((20,4))
blabels = 'p1','p2','p3','p4'
c = np.random.rand(40,5)
clabels = [['bob','john','cid','doe'],['b','j','c','d']]
s = StringTable()
s.add(alabels, c)
s.add(blabels, b)
s.add(clabels, a)
s.write('test-file.txt')
pass | returns:
------
array of shape (a, b) | random_line_split |
write.py | """
Write tables to text.
"""
import numpy as np
from collections import OrderedDict
### String and table formatting #################################
def pad_delimiters(string1, number, delimiter=',', slen=None, ):
"""
Pad string1 to the requested number of delimiters.
Parameters
----------
string1 : str
String to be assessed
number : int
Number of total delimiters in final output string
delimiter : str
String type of delimiter to use
"""
current_num = string1.count(delimiter)
num_to_add = number - current_num
curlen = len(string1)
if num_to_add > 0:
if slen is not None:
len_to_add = slen - curlen
spacing = int(len_to_add / num_to_add)
xtra = len_to_add % num_to_add
s1 = ("%" + str(spacing) + "s") % delimiter
s1 = s1 * num_to_add + " " * xtra
string1 += s1
else:
string1 += delimiter * num_to_add
return string1
def format_narray(narray, fmt = '%16s', delimiter = ","):
"""
format a 2D numpy array to a list of strings
"""
narray = np.array(narray)
len1 = len(narray.shape)
if len1 == 1:
return [format_list(narray, fmt, delimiter)]
elif len1 == 2:
strlist = []
rownum = narray.shape[0]
for i in range(rownum):
stri = format_list(narray[i, :], fmt, delimiter)
strlist.append(stri)
return strlist
def format_list(list1, fmt = '%16s', delimiter = ","):
"""
format list of numbers to string.
delimiter defaults = ','
"""
string1 = delimiter.join(fmt % h for h in list1) + '\n'
return string1
def format_list2(list1, sfmt = '%16s', nfmt = '%16.8e', delimiter = ','):
"""
format list of numbers or strings to a delimited string.
Parameters
----------
list1 : list of numbers and strings
List to convert to string
sfmt : str
string formatter, ie "%16s"
nfmt : str
number format, ie '%16.8e'
delimiter : str
list delimiter
"""
outlist = []
for h in list1:
try:
outlist.append(nfmt % h)
except TypeError:
outlist.append(sfmt % h)
string1 = delimiter.join(outlist) + '\n'
return string1
def wrap_list(list1, fmt = '%16s', delimiter = ",", maxcols = 8):
"""
format a list and wrap by max number of columns
"""
len1 = len(list1)
string = ""
for i in range(0, len1, maxcols):
li = list1[i : i + maxcols]
stri = format_list(li, fmt = fmt, delimiter = delimiter)
string += stri
return string
def empty_table_str(rows, columns, fmt='%16s', delimiter=','):
"""Construct empty string list for entry numbers of colnum"""
a = [[''] * columns] * rows
return format_narray(a, fmt=fmt, delimiter=delimiter)
#def hstack_arrays0(arrays, sfmt='%16s', nfmt='%16.8e',delimiter=','):
# """
# Horizontally stack arrays of varying row number and convert to strings.
#
# """
# maxrows = max(len(table) for table in arrays)
#
# for table in arrays:
# table = np.array(table)
# if np.ndim(table) <= 1:
# table = np.reshape(table, (-1, 1))
#
#
# rows, cols = table.shape
#
# try:
# s1 = format_narray(table, nfmt, delimiter)
# except TypeError:
# s1 = format_narray(table, sfmt, delimiter)
#
# if rows < maxrows:
# s2 = empty_table_str(rows, cols, fmt=sfmt, delimiter=delimiter)
# s1.extend(s2)
#
def hstack_arrays(arrays, sfmt = '%16s', nfmt = '%16.8e',delimiter = ','):
"""
Horizontally stack arrays where:
Parameters
----------
arrays : list/iterable
Array-like objects
sfmt : str
String format (ex, '%16s')
nfmt : str
number format (ex, '%16.8e')
delimiter : str
Delimiter for columns
Returns
-------
slist : list of string lines
"""
s_tables = []
for table in arrays:
table = np.asarray(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (-1, 1))
try:
s_table = format_narray(table, nfmt, delimiter)
except TypeError:
s_table = format_narray(table, sfmt, delimiter)
s_tables.append(s_table)
slist = hstack_str_list(s_tables, delimiter)
return slist
#def hstack_strings(str_lists):
# """
# Horizontally stack lists of strings
#
# arguments:
# ------
# str_lists : list of list of strings
# str_lists = [str_list1, str_list2, str_list3, ...]
#
# str_listi : list of strings
# List similar to output from file.readlines().
#
#
# """
#
# #First get maximum row count
# maxrows = max(len(slist) for slist in str_lists)
# listnum = len(str_lists)
# max_col_lengths = []
#
#
# #Obtain maximum line length
# for sl in str_lists:
# max_clen = max(len(line) for line in sl)
# max_col_lengths.append(max_clen)
#
#
# #Pad the bottoms of str lists so all lists are equal in row number
# for i, slist in enumerate(str_lists):
# rows = len(slist)
# max_clen = max_col_lengths[i]
#
# if rows < maxrows:
# newline = max_clen * ' '
# padding = [newline] * (maxrows - rows)
# slist.extend(padding)
#
#
#
#
#
#
# return
#
def hstack_str_list(str_lists, delimiter = ',', pad=False):
"""
Horizontally stack lists of string in the format of:
str_lists = [strlist1, strlist2, strlist3, ...]
strlisti = list of strings similar to output to file.readlines()
Prepare the strings for write to file, add new lines.
Parameters
----------
str_lists: list of list of str
Multiple lists of strings, each list to be horizontally stacked.
delimiter : str
Delimiter
Returns
-------
out : list of str
"""
listnum = len(str_lists)
max_col_lengths = []
max_col_nums = []
# Strip trailing delimiter from every line in string lists.
for j, sl in enumerate(str_lists[:]):
for i in range(len(sl)):
str_lists[j][i].rstrip(delimiter)
# Obtain max line length and max number of columns defined by delimiter.
for sl in str_lists:
max_clen = max(len(line) for line in sl)
max_col_lengths.append(max_clen)
max_columns = max(line.count(delimiter) for line in sl) + 1
max_col_nums.append(max_columns)
max_row_length = max(len(sl) for sl in str_lists)
lines = []
for i in range(max_row_length):
line = ""
#Loop through each table to hstack
for j in range(listnum):
if j == listnum - 1:
dnum = max_col_nums[j] - 1
else:
dnum = max_col_nums[j]
line_len = max_col_lengths[j]
try:
linej = str_lists[j][i]
linej = linej.rstrip("\n") #Get rid of newlines
linej = pad_delimiters(linej, dnum, delimiter) #Pad out string with delimiters
if pad:
linej = linej.ljust(line_len) #Pad out the string with spaces
#Not every row in tables will have data. Pad these ones out
except IndexError:
linej = pad_delimiters("", dnum, delimiter, slen=line_len) #Pad out string with delimiters
if pad:
linej = linej.ljust(line_len) #Pad out the string with spaces
line += linej
lines.append(line + '\n')
return lines
def get_header_names(fname, row_num, delimiter = None):
"""
Return list of column header names as numpy array
"""
names = []
with open(fname) as f:
for i in range(row_num):
|
return np.array(names, dtype = str)
class StringTable(object):
"""
Standardardized string table for file output.
Parameters
----------
sfmt : str
formatting specifier for string formatting, ex. '%16s'.
nfmt : str
formatting specifier for numeric formatting, ex. '%16.8e'.
ifmt : str
formatting specifier for integer formatting, ex. '%16d'.
delimiter : str
specification for delimiter. Default is ','.
Usage
-----
Add data using self.add
- data = [A x B] array of A rows of data and B columns
- labels = [C x B] array-1d of labels for B columns.
- A = # of data rows
- B = # of data columns
- C = # of column label rows
Add data using "add" method.
"""
def __init__(self,
sfmt = '%s',
nfmt = '%.8e',
ifmt = '%d',
delimiter = ',',
header = None):
self.header_info = OrderedDict()
self.columnlabels = []
self.columns = []
self.formats = []
self.sfmt = sfmt
self.nfmt = nfmt
self.ifmt = ifmt
self.delimiter = delimiter
self.header = header
return
def add(self, labels, data, fmt = None):
"""
Add data to table.
Parameters
----------
data : [A, B] array
A rows of data and B columns
labels : [C, B] array
Labels for B columns.
fmt : str
Formatting string. If None, use default number format self.nfmt
* A = # of data rows
* B = # of data columns
* C = # of column label rows
"""
data = np.asarray(data)
self.columns.append(data)
self.columnlabels.append(labels)
if fmt is None:
dtype = data.dtype.name
if 'int' in dtype:
fmt = self.ifmt
else:
fmt = self.nfmt
self.formats.append(fmt)
return
# def add_info(self, **kwargs):
# """Add information to header """
# self.header_info.update(**kwargs)
# return
#
def write(self, fname, headers=False, labels=True):
"""
Write data to file fname. Can be file, path, or filelike.
"""
strlist = self.string_lines(headers=headers, labels=labels)
if not hasattr(fname, 'writelines'):
with open(fname, 'w') as f:
f.writelines(strlist)
else:
fname.writelines(strlist)
return
def string_lines(self, headers = False, labels = True):
"""
Construct string data for table.
"""
if self.header is not None:
if self.header[-1] != '\n':
self.header += '\n'
strlist = [self.header]
else:
strlist = []
# if headers:
# s = self._build_header()
# strlist.extend(s)
if labels:
s = self._build_labels()
strlist.extend(s)
data = self._build_data()
strlist.extend(data)
return strlist
@property
def colnum(self):
"""Return number of columns"""
colnum = 0
for table in self.columnlabels:
table = np.asarray(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (1, -1))
colnum += table.shape[1]
return colnum
def _build_data(self):
s_tables = []
for table, fmt in zip(self.columns, self.formats):
table = np.array(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (-1, 1))
try:
s_table = format_narray(table, fmt, self.delimiter)
except TypeError:
s_table = format_narray(table, self.sfmt, self.delimiter)
s_tables.append(s_table)
slist = hstack_str_list(s_tables, self.delimiter)
return slist
def _build_labels(self):
s_tables = []
for table in self.columnlabels:
table = np.array(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (1, -1))
s_table = format_narray(table, self.sfmt, self.delimiter)
s_tables.append(s_table)
slist = hstack_str_list(s_tables, self.delimiter)
return slist
#
# def _build_header(self):
#
# list1 = []
# for key, val in self.header_info.items():
# fkey = key + '='
# try:
# fval = self.sfmt % val
# except TypeError:
# fval = self.nfmt % val
# list1.append(fkey)
# list1.append(fval)
#
# slist = wrap_list(list1, self.sfmt, self.delimiter, maxcols = self.colnum)
# return slist
class ReadTable(object):
"""
Read file constructed using StringTable.
All data is read as str and as numpy arrays. All data is indexed as both
ether a numpy array or as a using keys found from the labels.
Parameters
----------
f : file-like or iterable
file or iterable with text data
labelrows : list or None
List of column row numbers that have column label information.
By default, this is set to None to use only 0th row.
delimiter : str
Table delimiter
"""
def __init__(self, f, labelrows=None, delimiter=','):
self.lines = list(f)
if labelrows is None:
labelrows = [0]
self.labelrows = labelrows
self.delimiter = delimiter
self.labels = self._read_labels()
self.data = self._read_data()
return
def _read_labels(self):
"""Read columnn headers/labels"""
lines = self.lines
hlines = [lines[i] for i in self.labelrows]
header = np.genfromtxt(hlines, delimiter=',', dtype=str)
header = np.core.defchararray.strip(header)
add = np.core.defchararray.add
header1 = add(header, '\n')
newheader = header1[0]
for h in header1[1:]:
newheader = add(newheader, h)
newheader = [s.strip() for s in newheader]
#newheader = [s.replace('\n', '.') for s in newheader]
return newheader
def _read_data(self):
"""Read tabular data"""
lines = self.lines
start = max(self.labelrows) + 1
d = np.genfromtxt(lines[start:],
delimiter=self.delimiter,
dtype=str)
return d
def dict(self):
d = {}
for i, l in enumerate(self.labels):
d[l] = self.data[:, i]
return d
def find(self, key):
"""
Find keyword within labels. Return columns with keyword
returns:
------
array of shape (a, b)
"""
d = []
for k in self.labels:
if key in k:
d.append(self[k])
return np.column_stack(d)
def __getitem__(self, key):
try:
return self.dict[key]
except:
return self.data[key]
if __name__ == '__main__':
t = pad_delimiters('bob, joe, tim', 5, slen=50)
a = np.ones((10,3))
alabels = 'joe','bob','tim'
b= np.zeros((20,4))
blabels = 'p1','p2','p3','p4'
c = np.random.rand(40,5)
clabels = [['bob','john','cid','doe'],['b','j','c','d']]
s = StringTable()
s.add(alabels, c)
s.add(blabels, b)
s.add(clabels, a)
s.write('test-file.txt')
pass
| line = f.readline()
list_ = line.split(delimiter)
names.append(list_) | conditional_block |
write.py | """
Write tables to text.
"""
import numpy as np
from collections import OrderedDict
### String and table formatting #################################
def pad_delimiters(string1, number, delimiter=',', slen=None, ):
"""
Pad string1 to the requested number of delimiters.
Parameters
----------
string1 : str
String to be assessed
number : int
Number of total delimiters in final output string
delimiter : str
String type of delimiter to use
"""
current_num = string1.count(delimiter)
num_to_add = number - current_num
curlen = len(string1)
if num_to_add > 0:
if slen is not None:
len_to_add = slen - curlen
spacing = int(len_to_add / num_to_add)
xtra = len_to_add % num_to_add
s1 = ("%" + str(spacing) + "s") % delimiter
s1 = s1 * num_to_add + " " * xtra
string1 += s1
else:
string1 += delimiter * num_to_add
return string1
def format_narray(narray, fmt = '%16s', delimiter = ","):
"""
format a 2D numpy array to a list of strings
"""
narray = np.array(narray)
len1 = len(narray.shape)
if len1 == 1:
return [format_list(narray, fmt, delimiter)]
elif len1 == 2:
strlist = []
rownum = narray.shape[0]
for i in range(rownum):
stri = format_list(narray[i, :], fmt, delimiter)
strlist.append(stri)
return strlist
def format_list(list1, fmt = '%16s', delimiter = ","):
"""
format list of numbers to string.
delimiter defaults = ','
"""
string1 = delimiter.join(fmt % h for h in list1) + '\n'
return string1
def format_list2(list1, sfmt = '%16s', nfmt = '%16.8e', delimiter = ','):
"""
format list of numbers or strings to a delimited string.
Parameters
----------
list1 : list of numbers and strings
List to convert to string
sfmt : str
string formatter, ie "%16s"
nfmt : str
number format, ie '%16.8e'
delimiter : str
list delimiter
"""
outlist = []
for h in list1:
try:
outlist.append(nfmt % h)
except TypeError:
outlist.append(sfmt % h)
string1 = delimiter.join(outlist) + '\n'
return string1
def wrap_list(list1, fmt = '%16s', delimiter = ",", maxcols = 8):
"""
format a list and wrap by max number of columns
"""
len1 = len(list1)
string = ""
for i in range(0, len1, maxcols):
li = list1[i : i + maxcols]
stri = format_list(li, fmt = fmt, delimiter = delimiter)
string += stri
return string
def empty_table_str(rows, columns, fmt='%16s', delimiter=','):
"""Construct empty string list for entry numbers of colnum"""
a = [[''] * columns] * rows
return format_narray(a, fmt=fmt, delimiter=delimiter)
#def hstack_arrays0(arrays, sfmt='%16s', nfmt='%16.8e',delimiter=','):
# """
# Horizontally stack arrays of varying row number and convert to strings.
#
# """
# maxrows = max(len(table) for table in arrays)
#
# for table in arrays:
# table = np.array(table)
# if np.ndim(table) <= 1:
# table = np.reshape(table, (-1, 1))
#
#
# rows, cols = table.shape
#
# try:
# s1 = format_narray(table, nfmt, delimiter)
# except TypeError:
# s1 = format_narray(table, sfmt, delimiter)
#
# if rows < maxrows:
# s2 = empty_table_str(rows, cols, fmt=sfmt, delimiter=delimiter)
# s1.extend(s2)
#
def hstack_arrays(arrays, sfmt = '%16s', nfmt = '%16.8e',delimiter = ','):
"""
Horizontally stack arrays where:
Parameters
----------
arrays : list/iterable
Array-like objects
sfmt : str
String format (ex, '%16s')
nfmt : str
number format (ex, '%16.8e')
delimiter : str
Delimiter for columns
Returns
-------
slist : list of string lines
"""
s_tables = []
for table in arrays:
table = np.asarray(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (-1, 1))
try:
s_table = format_narray(table, nfmt, delimiter)
except TypeError:
s_table = format_narray(table, sfmt, delimiter)
s_tables.append(s_table)
slist = hstack_str_list(s_tables, delimiter)
return slist
#def hstack_strings(str_lists):
# """
# Horizontally stack lists of strings
#
# arguments:
# ------
# str_lists : list of list of strings
# str_lists = [str_list1, str_list2, str_list3, ...]
#
# str_listi : list of strings
# List similar to output from file.readlines().
#
#
# """
#
# #First get maximum row count
# maxrows = max(len(slist) for slist in str_lists)
# listnum = len(str_lists)
# max_col_lengths = []
#
#
# #Obtain maximum line length
# for sl in str_lists:
# max_clen = max(len(line) for line in sl)
# max_col_lengths.append(max_clen)
#
#
# #Pad the bottoms of str lists so all lists are equal in row number
# for i, slist in enumerate(str_lists):
# rows = len(slist)
# max_clen = max_col_lengths[i]
#
# if rows < maxrows:
# newline = max_clen * ' '
# padding = [newline] * (maxrows - rows)
# slist.extend(padding)
#
#
#
#
#
#
# return
#
def hstack_str_list(str_lists, delimiter = ',', pad=False):
"""
Horizontally stack lists of string in the format of:
str_lists = [strlist1, strlist2, strlist3, ...]
strlisti = list of strings similar to output to file.readlines()
Prepare the strings for write to file, add new lines.
Parameters
----------
str_lists: list of list of str
Multiple lists of strings, each list to be horizontally stacked.
delimiter : str
Delimiter
Returns
-------
out : list of str
"""
listnum = len(str_lists)
max_col_lengths = []
max_col_nums = []
# Strip trailing delimiter from every line in string lists.
for j, sl in enumerate(str_lists[:]):
for i in range(len(sl)):
str_lists[j][i].rstrip(delimiter)
# Obtain max line length and max number of columns defined by delimiter.
for sl in str_lists:
max_clen = max(len(line) for line in sl)
max_col_lengths.append(max_clen)
max_columns = max(line.count(delimiter) for line in sl) + 1
max_col_nums.append(max_columns)
max_row_length = max(len(sl) for sl in str_lists)
lines = []
for i in range(max_row_length):
line = ""
#Loop through each table to hstack
for j in range(listnum):
if j == listnum - 1:
dnum = max_col_nums[j] - 1
else:
dnum = max_col_nums[j]
line_len = max_col_lengths[j]
try:
linej = str_lists[j][i]
linej = linej.rstrip("\n") #Get rid of newlines
linej = pad_delimiters(linej, dnum, delimiter) #Pad out string with delimiters
if pad:
linej = linej.ljust(line_len) #Pad out the string with spaces
#Not every row in tables will have data. Pad these ones out
except IndexError:
linej = pad_delimiters("", dnum, delimiter, slen=line_len) #Pad out string with delimiters
if pad:
linej = linej.ljust(line_len) #Pad out the string with spaces
line += linej
lines.append(line + '\n')
return lines
def get_header_names(fname, row_num, delimiter = None):
"""
Return list of column header names as numpy array
"""
names = []
with open(fname) as f:
for i in range(row_num):
line = f.readline()
list_ = line.split(delimiter)
names.append(list_)
return np.array(names, dtype = str)
class StringTable(object):
"""
Standardardized string table for file output.
Parameters
----------
sfmt : str
formatting specifier for string formatting, ex. '%16s'.
nfmt : str
formatting specifier for numeric formatting, ex. '%16.8e'.
ifmt : str
formatting specifier for integer formatting, ex. '%16d'.
delimiter : str
specification for delimiter. Default is ','.
Usage
-----
Add data using self.add
- data = [A x B] array of A rows of data and B columns
- labels = [C x B] array-1d of labels for B columns.
- A = # of data rows
- B = # of data columns
- C = # of column label rows
Add data using "add" method.
"""
def __init__(self,
sfmt = '%s',
nfmt = '%.8e',
ifmt = '%d',
delimiter = ',',
header = None):
self.header_info = OrderedDict()
self.columnlabels = []
self.columns = []
self.formats = []
self.sfmt = sfmt
self.nfmt = nfmt
self.ifmt = ifmt
self.delimiter = delimiter
self.header = header
return
def add(self, labels, data, fmt = None):
"""
Add data to table.
Parameters
----------
data : [A, B] array
A rows of data and B columns
labels : [C, B] array
Labels for B columns.
fmt : str
Formatting string. If None, use default number format self.nfmt
* A = # of data rows
* B = # of data columns
* C = # of column label rows
"""
data = np.asarray(data)
self.columns.append(data)
self.columnlabels.append(labels)
if fmt is None:
dtype = data.dtype.name
if 'int' in dtype:
fmt = self.ifmt
else:
fmt = self.nfmt
self.formats.append(fmt)
return
# def add_info(self, **kwargs):
# """Add information to header """
# self.header_info.update(**kwargs)
# return
#
def write(self, fname, headers=False, labels=True):
"""
Write data to file fname. Can be file, path, or filelike.
"""
strlist = self.string_lines(headers=headers, labels=labels)
if not hasattr(fname, 'writelines'):
with open(fname, 'w') as f:
f.writelines(strlist)
else:
fname.writelines(strlist)
return
def string_lines(self, headers = False, labels = True):
"""
Construct string data for table.
"""
if self.header is not None:
if self.header[-1] != '\n':
self.header += '\n'
strlist = [self.header]
else:
strlist = []
# if headers:
# s = self._build_header()
# strlist.extend(s)
if labels:
s = self._build_labels()
strlist.extend(s)
data = self._build_data()
strlist.extend(data)
return strlist
@property
def colnum(self):
"""Return number of columns"""
colnum = 0
for table in self.columnlabels:
table = np.asarray(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (1, -1))
colnum += table.shape[1]
return colnum
def _build_data(self):
s_tables = []
for table, fmt in zip(self.columns, self.formats):
table = np.array(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (-1, 1))
try:
s_table = format_narray(table, fmt, self.delimiter)
except TypeError:
s_table = format_narray(table, self.sfmt, self.delimiter)
s_tables.append(s_table)
slist = hstack_str_list(s_tables, self.delimiter)
return slist
def _build_labels(self):
|
#
# def _build_header(self):
#
# list1 = []
# for key, val in self.header_info.items():
# fkey = key + '='
# try:
# fval = self.sfmt % val
# except TypeError:
# fval = self.nfmt % val
# list1.append(fkey)
# list1.append(fval)
#
# slist = wrap_list(list1, self.sfmt, self.delimiter, maxcols = self.colnum)
# return slist
class ReadTable(object):
"""
Read file constructed using StringTable.
All data is read as str and as numpy arrays. All data is indexed as both
ether a numpy array or as a using keys found from the labels.
Parameters
----------
f : file-like or iterable
file or iterable with text data
labelrows : list or None
List of column row numbers that have column label information.
By default, this is set to None to use only 0th row.
delimiter : str
Table delimiter
"""
def __init__(self, f, labelrows=None, delimiter=','):
self.lines = list(f)
if labelrows is None:
labelrows = [0]
self.labelrows = labelrows
self.delimiter = delimiter
self.labels = self._read_labels()
self.data = self._read_data()
return
def _read_labels(self):
"""Read columnn headers/labels"""
lines = self.lines
hlines = [lines[i] for i in self.labelrows]
header = np.genfromtxt(hlines, delimiter=',', dtype=str)
header = np.core.defchararray.strip(header)
add = np.core.defchararray.add
header1 = add(header, '\n')
newheader = header1[0]
for h in header1[1:]:
newheader = add(newheader, h)
newheader = [s.strip() for s in newheader]
#newheader = [s.replace('\n', '.') for s in newheader]
return newheader
def _read_data(self):
"""Read tabular data"""
lines = self.lines
start = max(self.labelrows) + 1
d = np.genfromtxt(lines[start:],
delimiter=self.delimiter,
dtype=str)
return d
def dict(self):
d = {}
for i, l in enumerate(self.labels):
d[l] = self.data[:, i]
return d
def find(self, key):
"""
Find keyword within labels. Return columns with keyword
returns:
------
array of shape (a, b)
"""
d = []
for k in self.labels:
if key in k:
d.append(self[k])
return np.column_stack(d)
def __getitem__(self, key):
try:
return self.dict[key]
except:
return self.data[key]
if __name__ == '__main__':
t = pad_delimiters('bob, joe, tim', 5, slen=50)
a = np.ones((10,3))
alabels = 'joe','bob','tim'
b= np.zeros((20,4))
blabels = 'p1','p2','p3','p4'
c = np.random.rand(40,5)
clabels = [['bob','john','cid','doe'],['b','j','c','d']]
s = StringTable()
s.add(alabels, c)
s.add(blabels, b)
s.add(clabels, a)
s.write('test-file.txt')
pass
| s_tables = []
for table in self.columnlabels:
table = np.array(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (1, -1))
s_table = format_narray(table, self.sfmt, self.delimiter)
s_tables.append(s_table)
slist = hstack_str_list(s_tables, self.delimiter)
return slist | identifier_body |
write.py | """
Write tables to text.
"""
import numpy as np
from collections import OrderedDict
### String and table formatting #################################
def pad_delimiters(string1, number, delimiter=',', slen=None, ):
"""
Pad string1 to the requested number of delimiters.
Parameters
----------
string1 : str
String to be assessed
number : int
Number of total delimiters in final output string
delimiter : str
String type of delimiter to use
"""
current_num = string1.count(delimiter)
num_to_add = number - current_num
curlen = len(string1)
if num_to_add > 0:
if slen is not None:
len_to_add = slen - curlen
spacing = int(len_to_add / num_to_add)
xtra = len_to_add % num_to_add
s1 = ("%" + str(spacing) + "s") % delimiter
s1 = s1 * num_to_add + " " * xtra
string1 += s1
else:
string1 += delimiter * num_to_add
return string1
def format_narray(narray, fmt = '%16s', delimiter = ","):
"""
format a 2D numpy array to a list of strings
"""
narray = np.array(narray)
len1 = len(narray.shape)
if len1 == 1:
return [format_list(narray, fmt, delimiter)]
elif len1 == 2:
strlist = []
rownum = narray.shape[0]
for i in range(rownum):
stri = format_list(narray[i, :], fmt, delimiter)
strlist.append(stri)
return strlist
def format_list(list1, fmt = '%16s', delimiter = ","):
"""
format list of numbers to string.
delimiter defaults = ','
"""
string1 = delimiter.join(fmt % h for h in list1) + '\n'
return string1
def format_list2(list1, sfmt = '%16s', nfmt = '%16.8e', delimiter = ','):
"""
format list of numbers or strings to a delimited string.
Parameters
----------
list1 : list of numbers and strings
List to convert to string
sfmt : str
string formatter, ie "%16s"
nfmt : str
number format, ie '%16.8e'
delimiter : str
list delimiter
"""
outlist = []
for h in list1:
try:
outlist.append(nfmt % h)
except TypeError:
outlist.append(sfmt % h)
string1 = delimiter.join(outlist) + '\n'
return string1
def wrap_list(list1, fmt = '%16s', delimiter = ",", maxcols = 8):
"""
format a list and wrap by max number of columns
"""
len1 = len(list1)
string = ""
for i in range(0, len1, maxcols):
li = list1[i : i + maxcols]
stri = format_list(li, fmt = fmt, delimiter = delimiter)
string += stri
return string
def empty_table_str(rows, columns, fmt='%16s', delimiter=','):
"""Construct empty string list for entry numbers of colnum"""
a = [[''] * columns] * rows
return format_narray(a, fmt=fmt, delimiter=delimiter)
#def hstack_arrays0(arrays, sfmt='%16s', nfmt='%16.8e',delimiter=','):
# """
# Horizontally stack arrays of varying row number and convert to strings.
#
# """
# maxrows = max(len(table) for table in arrays)
#
# for table in arrays:
# table = np.array(table)
# if np.ndim(table) <= 1:
# table = np.reshape(table, (-1, 1))
#
#
# rows, cols = table.shape
#
# try:
# s1 = format_narray(table, nfmt, delimiter)
# except TypeError:
# s1 = format_narray(table, sfmt, delimiter)
#
# if rows < maxrows:
# s2 = empty_table_str(rows, cols, fmt=sfmt, delimiter=delimiter)
# s1.extend(s2)
#
def hstack_arrays(arrays, sfmt = '%16s', nfmt = '%16.8e',delimiter = ','):
"""
Horizontally stack arrays where:
Parameters
----------
arrays : list/iterable
Array-like objects
sfmt : str
String format (ex, '%16s')
nfmt : str
number format (ex, '%16.8e')
delimiter : str
Delimiter for columns
Returns
-------
slist : list of string lines
"""
s_tables = []
for table in arrays:
table = np.asarray(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (-1, 1))
try:
s_table = format_narray(table, nfmt, delimiter)
except TypeError:
s_table = format_narray(table, sfmt, delimiter)
s_tables.append(s_table)
slist = hstack_str_list(s_tables, delimiter)
return slist
#def hstack_strings(str_lists):
# """
# Horizontally stack lists of strings
#
# arguments:
# ------
# str_lists : list of list of strings
# str_lists = [str_list1, str_list2, str_list3, ...]
#
# str_listi : list of strings
# List similar to output from file.readlines().
#
#
# """
#
# #First get maximum row count
# maxrows = max(len(slist) for slist in str_lists)
# listnum = len(str_lists)
# max_col_lengths = []
#
#
# #Obtain maximum line length
# for sl in str_lists:
# max_clen = max(len(line) for line in sl)
# max_col_lengths.append(max_clen)
#
#
# #Pad the bottoms of str lists so all lists are equal in row number
# for i, slist in enumerate(str_lists):
# rows = len(slist)
# max_clen = max_col_lengths[i]
#
# if rows < maxrows:
# newline = max_clen * ' '
# padding = [newline] * (maxrows - rows)
# slist.extend(padding)
#
#
#
#
#
#
# return
#
def hstack_str_list(str_lists, delimiter = ',', pad=False):
"""
Horizontally stack lists of string in the format of:
str_lists = [strlist1, strlist2, strlist3, ...]
strlisti = list of strings similar to output to file.readlines()
Prepare the strings for write to file, add new lines.
Parameters
----------
str_lists: list of list of str
Multiple lists of strings, each list to be horizontally stacked.
delimiter : str
Delimiter
Returns
-------
out : list of str
"""
listnum = len(str_lists)
max_col_lengths = []
max_col_nums = []
# Strip trailing delimiter from every line in string lists.
for j, sl in enumerate(str_lists[:]):
for i in range(len(sl)):
str_lists[j][i].rstrip(delimiter)
# Obtain max line length and max number of columns defined by delimiter.
for sl in str_lists:
max_clen = max(len(line) for line in sl)
max_col_lengths.append(max_clen)
max_columns = max(line.count(delimiter) for line in sl) + 1
max_col_nums.append(max_columns)
max_row_length = max(len(sl) for sl in str_lists)
lines = []
for i in range(max_row_length):
line = ""
#Loop through each table to hstack
for j in range(listnum):
if j == listnum - 1:
dnum = max_col_nums[j] - 1
else:
dnum = max_col_nums[j]
line_len = max_col_lengths[j]
try:
linej = str_lists[j][i]
linej = linej.rstrip("\n") #Get rid of newlines
linej = pad_delimiters(linej, dnum, delimiter) #Pad out string with delimiters
if pad:
linej = linej.ljust(line_len) #Pad out the string with spaces
#Not every row in tables will have data. Pad these ones out
except IndexError:
linej = pad_delimiters("", dnum, delimiter, slen=line_len) #Pad out string with delimiters
if pad:
linej = linej.ljust(line_len) #Pad out the string with spaces
line += linej
lines.append(line + '\n')
return lines
def get_header_names(fname, row_num, delimiter = None):
"""
Return list of column header names as numpy array
"""
names = []
with open(fname) as f:
for i in range(row_num):
line = f.readline()
list_ = line.split(delimiter)
names.append(list_)
return np.array(names, dtype = str)
class StringTable(object):
"""
Standardardized string table for file output.
Parameters
----------
sfmt : str
formatting specifier for string formatting, ex. '%16s'.
nfmt : str
formatting specifier for numeric formatting, ex. '%16.8e'.
ifmt : str
formatting specifier for integer formatting, ex. '%16d'.
delimiter : str
specification for delimiter. Default is ','.
Usage
-----
Add data using self.add
- data = [A x B] array of A rows of data and B columns
- labels = [C x B] array-1d of labels for B columns.
- A = # of data rows
- B = # of data columns
- C = # of column label rows
Add data using "add" method.
"""
def __init__(self,
sfmt = '%s',
nfmt = '%.8e',
ifmt = '%d',
delimiter = ',',
header = None):
self.header_info = OrderedDict()
self.columnlabels = []
self.columns = []
self.formats = []
self.sfmt = sfmt
self.nfmt = nfmt
self.ifmt = ifmt
self.delimiter = delimiter
self.header = header
return
def add(self, labels, data, fmt = None):
"""
Add data to table.
Parameters
----------
data : [A, B] array
A rows of data and B columns
labels : [C, B] array
Labels for B columns.
fmt : str
Formatting string. If None, use default number format self.nfmt
* A = # of data rows
* B = # of data columns
* C = # of column label rows
"""
data = np.asarray(data)
self.columns.append(data)
self.columnlabels.append(labels)
if fmt is None:
dtype = data.dtype.name
if 'int' in dtype:
fmt = self.ifmt
else:
fmt = self.nfmt
self.formats.append(fmt)
return
# def add_info(self, **kwargs):
# """Add information to header """
# self.header_info.update(**kwargs)
# return
#
def write(self, fname, headers=False, labels=True):
"""
Write data to file fname. Can be file, path, or filelike.
"""
strlist = self.string_lines(headers=headers, labels=labels)
if not hasattr(fname, 'writelines'):
with open(fname, 'w') as f:
f.writelines(strlist)
else:
fname.writelines(strlist)
return
def string_lines(self, headers = False, labels = True):
"""
Construct string data for table.
"""
if self.header is not None:
if self.header[-1] != '\n':
self.header += '\n'
strlist = [self.header]
else:
strlist = []
# if headers:
# s = self._build_header()
# strlist.extend(s)
if labels:
s = self._build_labels()
strlist.extend(s)
data = self._build_data()
strlist.extend(data)
return strlist
@property
def colnum(self):
"""Return number of columns"""
colnum = 0
for table in self.columnlabels:
table = np.asarray(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (1, -1))
colnum += table.shape[1]
return colnum
def _build_data(self):
s_tables = []
for table, fmt in zip(self.columns, self.formats):
table = np.array(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (-1, 1))
try:
s_table = format_narray(table, fmt, self.delimiter)
except TypeError:
s_table = format_narray(table, self.sfmt, self.delimiter)
s_tables.append(s_table)
slist = hstack_str_list(s_tables, self.delimiter)
return slist
def _build_labels(self):
s_tables = []
for table in self.columnlabels:
table = np.array(table)
if np.ndim(table) <= 1:
table = np.reshape(table, (1, -1))
s_table = format_narray(table, self.sfmt, self.delimiter)
s_tables.append(s_table)
slist = hstack_str_list(s_tables, self.delimiter)
return slist
#
# def _build_header(self):
#
# list1 = []
# for key, val in self.header_info.items():
# fkey = key + '='
# try:
# fval = self.sfmt % val
# except TypeError:
# fval = self.nfmt % val
# list1.append(fkey)
# list1.append(fval)
#
# slist = wrap_list(list1, self.sfmt, self.delimiter, maxcols = self.colnum)
# return slist
class ReadTable(object):
"""
Read file constructed using StringTable.
All data is read as str and as numpy arrays. All data is indexed as both
ether a numpy array or as a using keys found from the labels.
Parameters
----------
f : file-like or iterable
file or iterable with text data
labelrows : list or None
List of column row numbers that have column label information.
By default, this is set to None to use only 0th row.
delimiter : str
Table delimiter
"""
def __init__(self, f, labelrows=None, delimiter=','):
self.lines = list(f)
if labelrows is None:
labelrows = [0]
self.labelrows = labelrows
self.delimiter = delimiter
self.labels = self._read_labels()
self.data = self._read_data()
return
def | (self):
"""Read columnn headers/labels"""
lines = self.lines
hlines = [lines[i] for i in self.labelrows]
header = np.genfromtxt(hlines, delimiter=',', dtype=str)
header = np.core.defchararray.strip(header)
add = np.core.defchararray.add
header1 = add(header, '\n')
newheader = header1[0]
for h in header1[1:]:
newheader = add(newheader, h)
newheader = [s.strip() for s in newheader]
#newheader = [s.replace('\n', '.') for s in newheader]
return newheader
def _read_data(self):
"""Read tabular data"""
lines = self.lines
start = max(self.labelrows) + 1
d = np.genfromtxt(lines[start:],
delimiter=self.delimiter,
dtype=str)
return d
def dict(self):
d = {}
for i, l in enumerate(self.labels):
d[l] = self.data[:, i]
return d
def find(self, key):
"""
Find keyword within labels. Return columns with keyword
returns:
------
array of shape (a, b)
"""
d = []
for k in self.labels:
if key in k:
d.append(self[k])
return np.column_stack(d)
def __getitem__(self, key):
try:
return self.dict[key]
except:
return self.data[key]
if __name__ == '__main__':
t = pad_delimiters('bob, joe, tim', 5, slen=50)
a = np.ones((10,3))
alabels = 'joe','bob','tim'
b= np.zeros((20,4))
blabels = 'p1','p2','p3','p4'
c = np.random.rand(40,5)
clabels = [['bob','john','cid','doe'],['b','j','c','d']]
s = StringTable()
s.add(alabels, c)
s.add(blabels, b)
s.add(clabels, a)
s.write('test-file.txt')
pass
| _read_labels | identifier_name |
final_cnn_mydata.py | from __future__ import print_function, division
import os
import time
import torch
import pandas as pd
#from skarr import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
import torchvision
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F | from torchsummary import summary
from pathlib import Path
from datetime import date
from datetime import datetime
import sys,re
from sklearn.metrics import confusion_matrix
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
writer = SummaryWriter()
class mydataset(Dataset):
def __init__(self, hdf_filepath,transform=None):
self.df = pd.read_hdf(str(hdf_filepath), key="df")
self.transform = transform
def __len__(self):
return self.df.shape[1]
def __getitem__(self, col_id):
if torch.is_tensor(col_id):
col_id = col_id.tolist()
arrpaths = self.df.iloc[:,col_id]
labels=[]
arrs=[]
for i,arrpath in enumerate(arrpaths):
arr = np.load(arrpath)
if self.transform:
arr = self.transform(arr)
label = col_id
labels.append(label)
arrs.append(arr)
return (arrs, labels)
class ToTensor(object):
def __call__(self, array):
array = np.expand_dims(array,axis=0)
return torch.from_numpy(array).float()
class CreateDataLoader:
def __init__(self,filepath,str_type="Train", batch_len=5):
self.filepath = filepath
self.str_type = str_type
self.batch_len = batch_len
def create_dataloader(self):
#transformed_ds = mydataset(hdf_filepath=str(filepath), transform = transforms.Compose([transforms.Grayscale(num_output_channels=1) , transforms.Resize((64,64)), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,) ) ]))
transformed_ds = mydataset(hdf_filepath=str(self.filepath), transform=transforms.Compose([ToTensor()]))
arrs=[]
labels=[]
data=[]
for column_id in range(len(transformed_ds)):
im, lab = transformed_ds[column_id]
for i in range(len(im)):
data.append([im[i], lab[i]])
arrs.append(im)
labels.append(lab)
labels = np.array(labels)
if self.str_type=="Train":
print(self.str_type +" data len ",len(data))
dataloader = torch.utils.data.DataLoader(data, shuffle=True, batch_size=self.batch_len)
elif self.str_type=="Test":
print("testdata len ",len(data))
dataloader = torch.utils.data.DataLoader(data, shuffle=False, batch_size=self.batch_len)
i1, l1 = next(iter(dataloader))
print(i1.shape)
return dataloader
class VGG16(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(VGG16, self).__init__()
self.block_1 = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(3,3), stride=(1,1), padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
)
#self.block_2 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
#self.block_3 = nn.Sequential(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
#self.block_4 = nn.Sequential(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
#self.block_5 = nn.Sequential(nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
self.classifier = nn.Sequential(
nn.Linear(64*32*32, 1024),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(1024,512),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(512,num_classes)
)
#pytorch initializes automatically
#https://stackoverflow.com/questions/49433936/how-to-initialize-weights-in-pytorch
#for m in self.modules():
# if isinstance(m , torch.nn.Conv2d):
# m.weight.detach().normal_(0,0.05)
def forward(self,x):
x = self.block_1(x)
#x = self.block_2(x)
#x = self.block_3(x)
#x = self.block_4(x)
#x = self.block_5(x)
#print("x shape ", x.shape)
logits = self.classifier(x.view(-1,64*32*32))
probs = F.softmax(logits, dim=1)
return logits, probs
def compute_accuracy(model, data_loader):
correct_pred, num_examples, data_loss = 0, 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(device)
targets = targets.to(device)
logits, probs = model(features)
cost = F.cross_entropy(logits, targets)
data_loss += cost.item()
_, predicted_labels = torch.max(probs, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100, data_loss
def matplotlib_imshow(img, one_channel=False):
if one_channel:
img = img.mean(dim=0)
img = img / 2 + 0.5 # unnormalize
imgtemp = img.cpu()
npimg = imgtemp.numpy()
if one_channel:
plt.imshow(npimg, cmap="Greys")
else:
plt.imshow(np.transpose(npimg, (1, 2, 0)))
def arrs_to_probs(net, arrs):
'''
Generates predictions and corresponding probabilities from a trained
network and a list of arrs
'''
#net = net.to(dtype=torch.float, device = torch.device("cpu"))
net = net.cuda()
output, probs = net(arrs.to(device))
_, preds_tensor = torch.max(output, 1)
preds_tensortemp = preds_tensor.cpu()
preds = np.squeeze(preds_tensortemp.numpy())
return torch.from_numpy(preds), [F.softmax(el, dim=0)[i].item() for i, el in zip(preds, output)]
def plot_confusion(confusion_mat,fname="conf_mat"):
fig,ax = plt.subplots(figsize=(11,8))
ax.set(xticks= np.arange(confusion_mat.shape[1]), yticks= np.arange(confusion_mat.shape[0]+1), xticklabels = classes, yticklabels= classes, title=None, ylabel="True label", xlabel="Predicted label")
im = ax.imshow(confusion_mat, interpolation="nearest", cmap= plt.cm.Blues)
ax.figure.colorbar(im,ax=ax)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
fmt = 'd'
thresh = confusion_mat.max() / 2.
for i in range(confusion_mat.shape[0]):
for j in range(confusion_mat.shape[1]):
ax.text(j, i, format(confusion_mat[i, j], fmt),ha="center", va="center",color="white" if confusion_mat[i, j] > thresh else "black")
fig.tight_layout()
plt.savefig(fname+".png")
plt.close()
def plot_classes_preds(model, arrs, labels):
preds, probs = arrs_to_probs(model, arrs)
# plot the arrs in the batch, along with predicted and true labels
fig = plt.figure(figsize=(48,12))
arrs = arrs.cpu().numpy()
arrs = arrs.reshape(-1,64,64)
for idx in np.arange(9):
ax = fig.add_subplot(3, 3, idx+1, xticks=[], yticks=[])
ax.imshow(arrs[idx], interpolation="bicubic", cmap="nipy_spectral")
#matplotlib_imshow(arrs[idx])
ax.set_title("{0}, {1:.1f}%\n(label: {2})".format(
classes[preds[idx]],
probs[idx] * 100.0,
classes[labels[idx]]),
color=("green" if preds[idx]==labels[idx].item() else "red"))
plt.savefig("pred_vs_true_4classes_rasca_batch"+str(num_batch)+"_epoch"+str(num_epochs)+"_classes"+str(num_classes)+"_mydata"+".png")
return fig
def check_labels(dataloader):
print("Size ", len(dataloader))
input1, label1 = next(iter(dataloader))
print("label ", label1)
print("label size ", label1.size())
class Plotter:
def __init__(self, epochs, train_nums, val_nums,x_label,y_label,str_type="Losses"): #str_type=>"Lossses" or "accuracy"
self.epochs = epochs
self.train_nums = train_nums #train accs or losses
self.val_nums = val_nums
self.x_label = x_label
self.y_label = y_label
self.str_type = str_type
def plotter(self):
plt.plot(self.epochs, self.train_nums, label="Train")
plt.plot(self.epochs, self.val_nums, label="Test")
plt.xlabel(self.x_label)
plt.ylabel(self.y_label)
plt.xticks(ticks=epochs)
plt.legend()
plotdir="cnn_plots_"+str(date.today())
if not os.path.exists(plotdir):
os.mkdir(plotdir)
plotname= plotdir+"/"+self.str_type+"_"+str(num_batch)+"_epoch"+str(num_epochs)+"_"+str(date.today())+".png"
plt.savefig(plotname)
print(plotname+" is created")
plt.close()
def compute_pred_per_batch(model, validloader):
predlist = torch.zeros(0, dtype= torch.long, device="cpu")
lablist = torch.zeros(0, dtype= torch.long, device="cpu")
for batch_idx, (features, targets) in enumerate(validloader):
preds, _ = arrs_to_probs(model, features)
predlist = torch.cat([predlist, preds.view(-1).cpu()])
lablist = torch.cat([lablist, targets.view(-1).cpu()])
return (predlist, lablist)
if __name__=="__main__":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Device:', device)
# Hyperparameters
learning_rate = 0.001
num_epochs = 30
# GLOBAL VARIABLES
num_features = 64*64
#num_classes = 16
num_classes = 25
num_batch=32
model = VGG16(num_features=num_features, num_classes= num_classes)
model.to(device)
summary(model, (1,64,64))
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
start_time = time.time()
p = Path.home()
test_filepath =p/"numpy_cnn/hdfs_2019-12-26"/"hdfs_2019-12-26_all_test.h5"
train_filepath =p/"numpy_cnn/hdfs_2019-12-26"/"hdfs_2019-12-26_all_train.h5"
TrainLd = CreateDataLoader(train_filepath,"Train",num_batch)
trainloader = TrainLd.create_dataloader()
TestLd = CreateDataLoader(test_filepath,"Test",num_batch)
validloader = TestLd.create_dataloader()
torch.save(trainloader, "trainloader"+"_"+str(datetime.now().time()))
torch.save(validloader, "validloader"+"_"+str(datetime.now().time()))
#sys.exit()
print("check train labels:")
check_labels(trainloader)
print("_______________________________________")
print("check test labels:")
check_labels(validloader)
epochs = []
train_accs=[]
val_accs=[]
train_losses=[]
val_losses=[]
running_loss=0.0
df = pd.read_hdf(test_filepath, key="df")
print(type(df.columns))
print(df.columns[1])
classes=[]
for col in df.columns:
sigx = col.split('_')[0]
sigy = col.split('_')[1]
x_val = next(iter(re.findall("\d+\.\d+",sigx)))
y_val = next(iter(re.findall("\d+\.\d+",sigy)))
val = "x"+str(x_val)+"_"+"y"+str(y_val)
classes.append(val)
print("classes", classes)
#FOR Confusion matrix
predlist = torch.zeros(0, dtype= torch.long, device="cpu")
lablist = torch.zeros(0, dtype= torch.long, device="cpu")
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(trainloader):
features = features.to(device)
targets = targets.to(device)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
running_loss += cost.item()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(trainloader), cost))
# ...log the running loss
writer.add_scalar('training loss',
running_loss / 100,
epoch * len(trainloader) + batch_idx)
# ...log a Matplotlib Figure showing the model's predictions on a
# random mini-batch
#writer.add_figure('predictions vs. actuals',
# plot_classes_preds(model.cpu(), features, targets),
# global_step=epoch * len(trainloader) + batch_idx)
running_loss = 0.0
with torch.no_grad():
if epoch==num_epochs-1:
#preds, _ = arrs_to_probs(model, features)
#print("type pred ", type(preds))
#print("preds shape ", preds.shape)
#predlist = torch.cat([predlist, preds.view(-1).cpu()])
#lablist = torch.cat([lablist, targets.view(-1).cpu()])
predlist, lablist = compute_pred_per_batch(model, validloader)
if batch_idx== len(trainloader)-1:
#plot_classes_preds(model.cpu(), features, targets)
print("inside plot_classes")
model.eval()
#with torch.set_grad_enabled(False): # save memory during inference
with torch.no_grad(): # save memory during inference
epochs.append(epoch)
train_acc, train_loss = compute_accuracy(model, trainloader)
val_acc, val_loss = compute_accuracy(model, validloader)
train_accs.append(train_acc)
val_accs.append(val_acc)
train_losses.append(train_loss)
val_losses.append(val_loss)
print('Epoch: %03d/%03d | Train: %.3f%% | Valid: %.3f%%' % (
epoch+1, num_epochs,
train_acc,
val_acc))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
#WRITE ARCHITECTURE
arrs, labels = next(iter(trainloader))
#img_grid = torchvision.utils.make_grid(arrs)
model = model.to(dtype=torch.float, device = torch.device("cpu"))
writer.add_graph(model, arrs)
#SAVE MODEL
torch.save(model.state_dict(), "model"+"_"+str(datetime.now().time()))
#MAKE PLOTS
LossPlot= Plotter(epochs,train_losses, val_losses,"Epochs","Loss","Losses")
LossPlot.plotter()
AccPlot= Plotter(epochs,train_accs, val_accs,"Epochs","Accuracy","Accuracys")
AccPlot.plotter()
testpreds, testlabs = compute_pred_per_batch(model, validloader)
conf_mat_train = confusion_matrix(lablist.numpy(), predlist.numpy())
conf_mat_test = confusion_matrix(testlabs.numpy(), testpreds.numpy())
plot_confusion(conf_mat_train, "conf_train_test5")
plot_confusion(conf_mat_test, "conf_test_test5") | from torchvision import datasets
from torchvision import transforms | random_line_split |
final_cnn_mydata.py | from __future__ import print_function, division
import os
import time
import torch
import pandas as pd
#from skarr import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
import torchvision
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torchsummary import summary
from pathlib import Path
from datetime import date
from datetime import datetime
import sys,re
from sklearn.metrics import confusion_matrix
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
writer = SummaryWriter()
class mydataset(Dataset):
def __init__(self, hdf_filepath,transform=None):
self.df = pd.read_hdf(str(hdf_filepath), key="df")
self.transform = transform
def __len__(self):
return self.df.shape[1]
def __getitem__(self, col_id):
if torch.is_tensor(col_id):
col_id = col_id.tolist()
arrpaths = self.df.iloc[:,col_id]
labels=[]
arrs=[]
for i,arrpath in enumerate(arrpaths):
arr = np.load(arrpath)
if self.transform:
arr = self.transform(arr)
label = col_id
labels.append(label)
arrs.append(arr)
return (arrs, labels)
class ToTensor(object):
def __call__(self, array):
array = np.expand_dims(array,axis=0)
return torch.from_numpy(array).float()
class CreateDataLoader:
def __init__(self,filepath,str_type="Train", batch_len=5):
self.filepath = filepath
self.str_type = str_type
self.batch_len = batch_len
def create_dataloader(self):
#transformed_ds = mydataset(hdf_filepath=str(filepath), transform = transforms.Compose([transforms.Grayscale(num_output_channels=1) , transforms.Resize((64,64)), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,) ) ]))
transformed_ds = mydataset(hdf_filepath=str(self.filepath), transform=transforms.Compose([ToTensor()]))
arrs=[]
labels=[]
data=[]
for column_id in range(len(transformed_ds)):
im, lab = transformed_ds[column_id]
for i in range(len(im)):
data.append([im[i], lab[i]])
arrs.append(im)
labels.append(lab)
labels = np.array(labels)
if self.str_type=="Train":
print(self.str_type +" data len ",len(data))
dataloader = torch.utils.data.DataLoader(data, shuffle=True, batch_size=self.batch_len)
elif self.str_type=="Test":
print("testdata len ",len(data))
dataloader = torch.utils.data.DataLoader(data, shuffle=False, batch_size=self.batch_len)
i1, l1 = next(iter(dataloader))
print(i1.shape)
return dataloader
class VGG16(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(VGG16, self).__init__()
self.block_1 = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(3,3), stride=(1,1), padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
)
#self.block_2 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
#self.block_3 = nn.Sequential(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
#self.block_4 = nn.Sequential(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
#self.block_5 = nn.Sequential(nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
self.classifier = nn.Sequential(
nn.Linear(64*32*32, 1024),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(1024,512),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(512,num_classes)
)
#pytorch initializes automatically
#https://stackoverflow.com/questions/49433936/how-to-initialize-weights-in-pytorch
#for m in self.modules():
# if isinstance(m , torch.nn.Conv2d):
# m.weight.detach().normal_(0,0.05)
def forward(self,x):
x = self.block_1(x)
#x = self.block_2(x)
#x = self.block_3(x)
#x = self.block_4(x)
#x = self.block_5(x)
#print("x shape ", x.shape)
logits = self.classifier(x.view(-1,64*32*32))
probs = F.softmax(logits, dim=1)
return logits, probs
def compute_accuracy(model, data_loader):
correct_pred, num_examples, data_loss = 0, 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(device)
targets = targets.to(device)
logits, probs = model(features)
cost = F.cross_entropy(logits, targets)
data_loss += cost.item()
_, predicted_labels = torch.max(probs, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100, data_loss
def matplotlib_imshow(img, one_channel=False):
if one_channel:
img = img.mean(dim=0)
img = img / 2 + 0.5 # unnormalize
imgtemp = img.cpu()
npimg = imgtemp.numpy()
if one_channel:
plt.imshow(npimg, cmap="Greys")
else:
plt.imshow(np.transpose(npimg, (1, 2, 0)))
def arrs_to_probs(net, arrs):
'''
Generates predictions and corresponding probabilities from a trained
network and a list of arrs
'''
#net = net.to(dtype=torch.float, device = torch.device("cpu"))
net = net.cuda()
output, probs = net(arrs.to(device))
_, preds_tensor = torch.max(output, 1)
preds_tensortemp = preds_tensor.cpu()
preds = np.squeeze(preds_tensortemp.numpy())
return torch.from_numpy(preds), [F.softmax(el, dim=0)[i].item() for i, el in zip(preds, output)]
def plot_confusion(confusion_mat,fname="conf_mat"):
fig,ax = plt.subplots(figsize=(11,8))
ax.set(xticks= np.arange(confusion_mat.shape[1]), yticks= np.arange(confusion_mat.shape[0]+1), xticklabels = classes, yticklabels= classes, title=None, ylabel="True label", xlabel="Predicted label")
im = ax.imshow(confusion_mat, interpolation="nearest", cmap= plt.cm.Blues)
ax.figure.colorbar(im,ax=ax)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
fmt = 'd'
thresh = confusion_mat.max() / 2.
for i in range(confusion_mat.shape[0]):
for j in range(confusion_mat.shape[1]):
ax.text(j, i, format(confusion_mat[i, j], fmt),ha="center", va="center",color="white" if confusion_mat[i, j] > thresh else "black")
fig.tight_layout()
plt.savefig(fname+".png")
plt.close()
def plot_classes_preds(model, arrs, labels):
preds, probs = arrs_to_probs(model, arrs)
# plot the arrs in the batch, along with predicted and true labels
fig = plt.figure(figsize=(48,12))
arrs = arrs.cpu().numpy()
arrs = arrs.reshape(-1,64,64)
for idx in np.arange(9):
ax = fig.add_subplot(3, 3, idx+1, xticks=[], yticks=[])
ax.imshow(arrs[idx], interpolation="bicubic", cmap="nipy_spectral")
#matplotlib_imshow(arrs[idx])
ax.set_title("{0}, {1:.1f}%\n(label: {2})".format(
classes[preds[idx]],
probs[idx] * 100.0,
classes[labels[idx]]),
color=("green" if preds[idx]==labels[idx].item() else "red"))
plt.savefig("pred_vs_true_4classes_rasca_batch"+str(num_batch)+"_epoch"+str(num_epochs)+"_classes"+str(num_classes)+"_mydata"+".png")
return fig
def check_labels(dataloader):
print("Size ", len(dataloader))
input1, label1 = next(iter(dataloader))
print("label ", label1)
print("label size ", label1.size())
class Plotter:
def __init__(self, epochs, train_nums, val_nums,x_label,y_label,str_type="Losses"): #str_type=>"Lossses" or "accuracy"
self.epochs = epochs
self.train_nums = train_nums #train accs or losses
self.val_nums = val_nums
self.x_label = x_label
self.y_label = y_label
self.str_type = str_type
def plotter(self):
plt.plot(self.epochs, self.train_nums, label="Train")
plt.plot(self.epochs, self.val_nums, label="Test")
plt.xlabel(self.x_label)
plt.ylabel(self.y_label)
plt.xticks(ticks=epochs)
plt.legend()
plotdir="cnn_plots_"+str(date.today())
if not os.path.exists(plotdir):
os.mkdir(plotdir)
plotname= plotdir+"/"+self.str_type+"_"+str(num_batch)+"_epoch"+str(num_epochs)+"_"+str(date.today())+".png"
plt.savefig(plotname)
print(plotname+" is created")
plt.close()
def compute_pred_per_batch(model, validloader):
predlist = torch.zeros(0, dtype= torch.long, device="cpu")
lablist = torch.zeros(0, dtype= torch.long, device="cpu")
for batch_idx, (features, targets) in enumerate(validloader):
preds, _ = arrs_to_probs(model, features)
predlist = torch.cat([predlist, preds.view(-1).cpu()])
lablist = torch.cat([lablist, targets.view(-1).cpu()])
return (predlist, lablist)
if __name__=="__main__":
| device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Device:', device)
# Hyperparameters
learning_rate = 0.001
num_epochs = 30
# GLOBAL VARIABLES
num_features = 64*64
#num_classes = 16
num_classes = 25
num_batch=32
model = VGG16(num_features=num_features, num_classes= num_classes)
model.to(device)
summary(model, (1,64,64))
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
start_time = time.time()
p = Path.home()
test_filepath =p/"numpy_cnn/hdfs_2019-12-26"/"hdfs_2019-12-26_all_test.h5"
train_filepath =p/"numpy_cnn/hdfs_2019-12-26"/"hdfs_2019-12-26_all_train.h5"
TrainLd = CreateDataLoader(train_filepath,"Train",num_batch)
trainloader = TrainLd.create_dataloader()
TestLd = CreateDataLoader(test_filepath,"Test",num_batch)
validloader = TestLd.create_dataloader()
torch.save(trainloader, "trainloader"+"_"+str(datetime.now().time()))
torch.save(validloader, "validloader"+"_"+str(datetime.now().time()))
#sys.exit()
print("check train labels:")
check_labels(trainloader)
print("_______________________________________")
print("check test labels:")
check_labels(validloader)
epochs = []
train_accs=[]
val_accs=[]
train_losses=[]
val_losses=[]
running_loss=0.0
df = pd.read_hdf(test_filepath, key="df")
print(type(df.columns))
print(df.columns[1])
classes=[]
for col in df.columns:
sigx = col.split('_')[0]
sigy = col.split('_')[1]
x_val = next(iter(re.findall("\d+\.\d+",sigx)))
y_val = next(iter(re.findall("\d+\.\d+",sigy)))
val = "x"+str(x_val)+"_"+"y"+str(y_val)
classes.append(val)
print("classes", classes)
#FOR Confusion matrix
predlist = torch.zeros(0, dtype= torch.long, device="cpu")
lablist = torch.zeros(0, dtype= torch.long, device="cpu")
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(trainloader):
features = features.to(device)
targets = targets.to(device)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
running_loss += cost.item()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(trainloader), cost))
# ...log the running loss
writer.add_scalar('training loss',
running_loss / 100,
epoch * len(trainloader) + batch_idx)
# ...log a Matplotlib Figure showing the model's predictions on a
# random mini-batch
#writer.add_figure('predictions vs. actuals',
# plot_classes_preds(model.cpu(), features, targets),
# global_step=epoch * len(trainloader) + batch_idx)
running_loss = 0.0
with torch.no_grad():
if epoch==num_epochs-1:
#preds, _ = arrs_to_probs(model, features)
#print("type pred ", type(preds))
#print("preds shape ", preds.shape)
#predlist = torch.cat([predlist, preds.view(-1).cpu()])
#lablist = torch.cat([lablist, targets.view(-1).cpu()])
predlist, lablist = compute_pred_per_batch(model, validloader)
if batch_idx== len(trainloader)-1:
#plot_classes_preds(model.cpu(), features, targets)
print("inside plot_classes")
model.eval()
#with torch.set_grad_enabled(False): # save memory during inference
with torch.no_grad(): # save memory during inference
epochs.append(epoch)
train_acc, train_loss = compute_accuracy(model, trainloader)
val_acc, val_loss = compute_accuracy(model, validloader)
train_accs.append(train_acc)
val_accs.append(val_acc)
train_losses.append(train_loss)
val_losses.append(val_loss)
print('Epoch: %03d/%03d | Train: %.3f%% | Valid: %.3f%%' % (
epoch+1, num_epochs,
train_acc,
val_acc))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
#WRITE ARCHITECTURE
arrs, labels = next(iter(trainloader))
#img_grid = torchvision.utils.make_grid(arrs)
model = model.to(dtype=torch.float, device = torch.device("cpu"))
writer.add_graph(model, arrs)
#SAVE MODEL
torch.save(model.state_dict(), "model"+"_"+str(datetime.now().time()))
#MAKE PLOTS
LossPlot= Plotter(epochs,train_losses, val_losses,"Epochs","Loss","Losses")
LossPlot.plotter()
AccPlot= Plotter(epochs,train_accs, val_accs,"Epochs","Accuracy","Accuracys")
AccPlot.plotter()
testpreds, testlabs = compute_pred_per_batch(model, validloader)
conf_mat_train = confusion_matrix(lablist.numpy(), predlist.numpy())
conf_mat_test = confusion_matrix(testlabs.numpy(), testpreds.numpy())
plot_confusion(conf_mat_train, "conf_train_test5")
plot_confusion(conf_mat_test, "conf_test_test5") | conditional_block | |
final_cnn_mydata.py | from __future__ import print_function, division
import os
import time
import torch
import pandas as pd
#from skarr import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
import torchvision
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torchsummary import summary
from pathlib import Path
from datetime import date
from datetime import datetime
import sys,re
from sklearn.metrics import confusion_matrix
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
writer = SummaryWriter()
class mydataset(Dataset):
def __init__(self, hdf_filepath,transform=None):
self.df = pd.read_hdf(str(hdf_filepath), key="df")
self.transform = transform
def __len__(self):
return self.df.shape[1]
def __getitem__(self, col_id):
if torch.is_tensor(col_id):
col_id = col_id.tolist()
arrpaths = self.df.iloc[:,col_id]
labels=[]
arrs=[]
for i,arrpath in enumerate(arrpaths):
arr = np.load(arrpath)
if self.transform:
arr = self.transform(arr)
label = col_id
labels.append(label)
arrs.append(arr)
return (arrs, labels)
class ToTensor(object):
def __call__(self, array):
array = np.expand_dims(array,axis=0)
return torch.from_numpy(array).float()
class CreateDataLoader:
def | (self,filepath,str_type="Train", batch_len=5):
self.filepath = filepath
self.str_type = str_type
self.batch_len = batch_len
def create_dataloader(self):
#transformed_ds = mydataset(hdf_filepath=str(filepath), transform = transforms.Compose([transforms.Grayscale(num_output_channels=1) , transforms.Resize((64,64)), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,) ) ]))
transformed_ds = mydataset(hdf_filepath=str(self.filepath), transform=transforms.Compose([ToTensor()]))
arrs=[]
labels=[]
data=[]
for column_id in range(len(transformed_ds)):
im, lab = transformed_ds[column_id]
for i in range(len(im)):
data.append([im[i], lab[i]])
arrs.append(im)
labels.append(lab)
labels = np.array(labels)
if self.str_type=="Train":
print(self.str_type +" data len ",len(data))
dataloader = torch.utils.data.DataLoader(data, shuffle=True, batch_size=self.batch_len)
elif self.str_type=="Test":
print("testdata len ",len(data))
dataloader = torch.utils.data.DataLoader(data, shuffle=False, batch_size=self.batch_len)
i1, l1 = next(iter(dataloader))
print(i1.shape)
return dataloader
class VGG16(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(VGG16, self).__init__()
self.block_1 = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(3,3), stride=(1,1), padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
)
#self.block_2 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
#self.block_3 = nn.Sequential(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
#self.block_4 = nn.Sequential(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
#self.block_5 = nn.Sequential(nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
self.classifier = nn.Sequential(
nn.Linear(64*32*32, 1024),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(1024,512),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(512,num_classes)
)
#pytorch initializes automatically
#https://stackoverflow.com/questions/49433936/how-to-initialize-weights-in-pytorch
#for m in self.modules():
# if isinstance(m , torch.nn.Conv2d):
# m.weight.detach().normal_(0,0.05)
def forward(self,x):
x = self.block_1(x)
#x = self.block_2(x)
#x = self.block_3(x)
#x = self.block_4(x)
#x = self.block_5(x)
#print("x shape ", x.shape)
logits = self.classifier(x.view(-1,64*32*32))
probs = F.softmax(logits, dim=1)
return logits, probs
def compute_accuracy(model, data_loader):
correct_pred, num_examples, data_loss = 0, 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(device)
targets = targets.to(device)
logits, probs = model(features)
cost = F.cross_entropy(logits, targets)
data_loss += cost.item()
_, predicted_labels = torch.max(probs, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100, data_loss
def matplotlib_imshow(img, one_channel=False):
if one_channel:
img = img.mean(dim=0)
img = img / 2 + 0.5 # unnormalize
imgtemp = img.cpu()
npimg = imgtemp.numpy()
if one_channel:
plt.imshow(npimg, cmap="Greys")
else:
plt.imshow(np.transpose(npimg, (1, 2, 0)))
def arrs_to_probs(net, arrs):
'''
Generates predictions and corresponding probabilities from a trained
network and a list of arrs
'''
#net = net.to(dtype=torch.float, device = torch.device("cpu"))
net = net.cuda()
output, probs = net(arrs.to(device))
_, preds_tensor = torch.max(output, 1)
preds_tensortemp = preds_tensor.cpu()
preds = np.squeeze(preds_tensortemp.numpy())
return torch.from_numpy(preds), [F.softmax(el, dim=0)[i].item() for i, el in zip(preds, output)]
def plot_confusion(confusion_mat,fname="conf_mat"):
fig,ax = plt.subplots(figsize=(11,8))
ax.set(xticks= np.arange(confusion_mat.shape[1]), yticks= np.arange(confusion_mat.shape[0]+1), xticklabels = classes, yticklabels= classes, title=None, ylabel="True label", xlabel="Predicted label")
im = ax.imshow(confusion_mat, interpolation="nearest", cmap= plt.cm.Blues)
ax.figure.colorbar(im,ax=ax)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
fmt = 'd'
thresh = confusion_mat.max() / 2.
for i in range(confusion_mat.shape[0]):
for j in range(confusion_mat.shape[1]):
ax.text(j, i, format(confusion_mat[i, j], fmt),ha="center", va="center",color="white" if confusion_mat[i, j] > thresh else "black")
fig.tight_layout()
plt.savefig(fname+".png")
plt.close()
def plot_classes_preds(model, arrs, labels):
preds, probs = arrs_to_probs(model, arrs)
# plot the arrs in the batch, along with predicted and true labels
fig = plt.figure(figsize=(48,12))
arrs = arrs.cpu().numpy()
arrs = arrs.reshape(-1,64,64)
for idx in np.arange(9):
ax = fig.add_subplot(3, 3, idx+1, xticks=[], yticks=[])
ax.imshow(arrs[idx], interpolation="bicubic", cmap="nipy_spectral")
#matplotlib_imshow(arrs[idx])
ax.set_title("{0}, {1:.1f}%\n(label: {2})".format(
classes[preds[idx]],
probs[idx] * 100.0,
classes[labels[idx]]),
color=("green" if preds[idx]==labels[idx].item() else "red"))
plt.savefig("pred_vs_true_4classes_rasca_batch"+str(num_batch)+"_epoch"+str(num_epochs)+"_classes"+str(num_classes)+"_mydata"+".png")
return fig
def check_labels(dataloader):
print("Size ", len(dataloader))
input1, label1 = next(iter(dataloader))
print("label ", label1)
print("label size ", label1.size())
class Plotter:
def __init__(self, epochs, train_nums, val_nums,x_label,y_label,str_type="Losses"): #str_type=>"Lossses" or "accuracy"
self.epochs = epochs
self.train_nums = train_nums #train accs or losses
self.val_nums = val_nums
self.x_label = x_label
self.y_label = y_label
self.str_type = str_type
def plotter(self):
plt.plot(self.epochs, self.train_nums, label="Train")
plt.plot(self.epochs, self.val_nums, label="Test")
plt.xlabel(self.x_label)
plt.ylabel(self.y_label)
plt.xticks(ticks=epochs)
plt.legend()
plotdir="cnn_plots_"+str(date.today())
if not os.path.exists(plotdir):
os.mkdir(plotdir)
plotname= plotdir+"/"+self.str_type+"_"+str(num_batch)+"_epoch"+str(num_epochs)+"_"+str(date.today())+".png"
plt.savefig(plotname)
print(plotname+" is created")
plt.close()
def compute_pred_per_batch(model, validloader):
predlist = torch.zeros(0, dtype= torch.long, device="cpu")
lablist = torch.zeros(0, dtype= torch.long, device="cpu")
for batch_idx, (features, targets) in enumerate(validloader):
preds, _ = arrs_to_probs(model, features)
predlist = torch.cat([predlist, preds.view(-1).cpu()])
lablist = torch.cat([lablist, targets.view(-1).cpu()])
return (predlist, lablist)
if __name__=="__main__":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Device:', device)
# Hyperparameters
learning_rate = 0.001
num_epochs = 30
# GLOBAL VARIABLES
num_features = 64*64
#num_classes = 16
num_classes = 25
num_batch=32
model = VGG16(num_features=num_features, num_classes= num_classes)
model.to(device)
summary(model, (1,64,64))
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
start_time = time.time()
p = Path.home()
test_filepath =p/"numpy_cnn/hdfs_2019-12-26"/"hdfs_2019-12-26_all_test.h5"
train_filepath =p/"numpy_cnn/hdfs_2019-12-26"/"hdfs_2019-12-26_all_train.h5"
TrainLd = CreateDataLoader(train_filepath,"Train",num_batch)
trainloader = TrainLd.create_dataloader()
TestLd = CreateDataLoader(test_filepath,"Test",num_batch)
validloader = TestLd.create_dataloader()
torch.save(trainloader, "trainloader"+"_"+str(datetime.now().time()))
torch.save(validloader, "validloader"+"_"+str(datetime.now().time()))
#sys.exit()
print("check train labels:")
check_labels(trainloader)
print("_______________________________________")
print("check test labels:")
check_labels(validloader)
epochs = []
train_accs=[]
val_accs=[]
train_losses=[]
val_losses=[]
running_loss=0.0
df = pd.read_hdf(test_filepath, key="df")
print(type(df.columns))
print(df.columns[1])
classes=[]
for col in df.columns:
sigx = col.split('_')[0]
sigy = col.split('_')[1]
x_val = next(iter(re.findall("\d+\.\d+",sigx)))
y_val = next(iter(re.findall("\d+\.\d+",sigy)))
val = "x"+str(x_val)+"_"+"y"+str(y_val)
classes.append(val)
print("classes", classes)
#FOR Confusion matrix
predlist = torch.zeros(0, dtype= torch.long, device="cpu")
lablist = torch.zeros(0, dtype= torch.long, device="cpu")
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(trainloader):
features = features.to(device)
targets = targets.to(device)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
running_loss += cost.item()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(trainloader), cost))
# ...log the running loss
writer.add_scalar('training loss',
running_loss / 100,
epoch * len(trainloader) + batch_idx)
# ...log a Matplotlib Figure showing the model's predictions on a
# random mini-batch
#writer.add_figure('predictions vs. actuals',
# plot_classes_preds(model.cpu(), features, targets),
# global_step=epoch * len(trainloader) + batch_idx)
running_loss = 0.0
with torch.no_grad():
if epoch==num_epochs-1:
#preds, _ = arrs_to_probs(model, features)
#print("type pred ", type(preds))
#print("preds shape ", preds.shape)
#predlist = torch.cat([predlist, preds.view(-1).cpu()])
#lablist = torch.cat([lablist, targets.view(-1).cpu()])
predlist, lablist = compute_pred_per_batch(model, validloader)
if batch_idx== len(trainloader)-1:
#plot_classes_preds(model.cpu(), features, targets)
print("inside plot_classes")
model.eval()
#with torch.set_grad_enabled(False): # save memory during inference
with torch.no_grad(): # save memory during inference
epochs.append(epoch)
train_acc, train_loss = compute_accuracy(model, trainloader)
val_acc, val_loss = compute_accuracy(model, validloader)
train_accs.append(train_acc)
val_accs.append(val_acc)
train_losses.append(train_loss)
val_losses.append(val_loss)
print('Epoch: %03d/%03d | Train: %.3f%% | Valid: %.3f%%' % (
epoch+1, num_epochs,
train_acc,
val_acc))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
#WRITE ARCHITECTURE
arrs, labels = next(iter(trainloader))
#img_grid = torchvision.utils.make_grid(arrs)
model = model.to(dtype=torch.float, device = torch.device("cpu"))
writer.add_graph(model, arrs)
#SAVE MODEL
torch.save(model.state_dict(), "model"+"_"+str(datetime.now().time()))
#MAKE PLOTS
LossPlot= Plotter(epochs,train_losses, val_losses,"Epochs","Loss","Losses")
LossPlot.plotter()
AccPlot= Plotter(epochs,train_accs, val_accs,"Epochs","Accuracy","Accuracys")
AccPlot.plotter()
testpreds, testlabs = compute_pred_per_batch(model, validloader)
conf_mat_train = confusion_matrix(lablist.numpy(), predlist.numpy())
conf_mat_test = confusion_matrix(testlabs.numpy(), testpreds.numpy())
plot_confusion(conf_mat_train, "conf_train_test5")
plot_confusion(conf_mat_test, "conf_test_test5")
| __init__ | identifier_name |
final_cnn_mydata.py | from __future__ import print_function, division
import os
import time
import torch
import pandas as pd
#from skarr import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
import torchvision
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torchsummary import summary
from pathlib import Path
from datetime import date
from datetime import datetime
import sys,re
from sklearn.metrics import confusion_matrix
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
writer = SummaryWriter()
class mydataset(Dataset):
def __init__(self, hdf_filepath,transform=None):
self.df = pd.read_hdf(str(hdf_filepath), key="df")
self.transform = transform
def __len__(self):
return self.df.shape[1]
def __getitem__(self, col_id):
if torch.is_tensor(col_id):
col_id = col_id.tolist()
arrpaths = self.df.iloc[:,col_id]
labels=[]
arrs=[]
for i,arrpath in enumerate(arrpaths):
arr = np.load(arrpath)
if self.transform:
arr = self.transform(arr)
label = col_id
labels.append(label)
arrs.append(arr)
return (arrs, labels)
class ToTensor(object):
def __call__(self, array):
array = np.expand_dims(array,axis=0)
return torch.from_numpy(array).float()
class CreateDataLoader:
def __init__(self,filepath,str_type="Train", batch_len=5):
self.filepath = filepath
self.str_type = str_type
self.batch_len = batch_len
def create_dataloader(self):
#transformed_ds = mydataset(hdf_filepath=str(filepath), transform = transforms.Compose([transforms.Grayscale(num_output_channels=1) , transforms.Resize((64,64)), transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,) ) ]))
transformed_ds = mydataset(hdf_filepath=str(self.filepath), transform=transforms.Compose([ToTensor()]))
arrs=[]
labels=[]
data=[]
for column_id in range(len(transformed_ds)):
im, lab = transformed_ds[column_id]
for i in range(len(im)):
data.append([im[i], lab[i]])
arrs.append(im)
labels.append(lab)
labels = np.array(labels)
if self.str_type=="Train":
print(self.str_type +" data len ",len(data))
dataloader = torch.utils.data.DataLoader(data, shuffle=True, batch_size=self.batch_len)
elif self.str_type=="Test":
print("testdata len ",len(data))
dataloader = torch.utils.data.DataLoader(data, shuffle=False, batch_size=self.batch_len)
i1, l1 = next(iter(dataloader))
print(i1.shape)
return dataloader
class VGG16(torch.nn.Module):
|
def compute_accuracy(model, data_loader):
correct_pred, num_examples, data_loss = 0, 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(device)
targets = targets.to(device)
logits, probs = model(features)
cost = F.cross_entropy(logits, targets)
data_loss += cost.item()
_, predicted_labels = torch.max(probs, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100, data_loss
def matplotlib_imshow(img, one_channel=False):
if one_channel:
img = img.mean(dim=0)
img = img / 2 + 0.5 # unnormalize
imgtemp = img.cpu()
npimg = imgtemp.numpy()
if one_channel:
plt.imshow(npimg, cmap="Greys")
else:
plt.imshow(np.transpose(npimg, (1, 2, 0)))
def arrs_to_probs(net, arrs):
'''
Generates predictions and corresponding probabilities from a trained
network and a list of arrs
'''
#net = net.to(dtype=torch.float, device = torch.device("cpu"))
net = net.cuda()
output, probs = net(arrs.to(device))
_, preds_tensor = torch.max(output, 1)
preds_tensortemp = preds_tensor.cpu()
preds = np.squeeze(preds_tensortemp.numpy())
return torch.from_numpy(preds), [F.softmax(el, dim=0)[i].item() for i, el in zip(preds, output)]
def plot_confusion(confusion_mat,fname="conf_mat"):
fig,ax = plt.subplots(figsize=(11,8))
ax.set(xticks= np.arange(confusion_mat.shape[1]), yticks= np.arange(confusion_mat.shape[0]+1), xticklabels = classes, yticklabels= classes, title=None, ylabel="True label", xlabel="Predicted label")
im = ax.imshow(confusion_mat, interpolation="nearest", cmap= plt.cm.Blues)
ax.figure.colorbar(im,ax=ax)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
fmt = 'd'
thresh = confusion_mat.max() / 2.
for i in range(confusion_mat.shape[0]):
for j in range(confusion_mat.shape[1]):
ax.text(j, i, format(confusion_mat[i, j], fmt),ha="center", va="center",color="white" if confusion_mat[i, j] > thresh else "black")
fig.tight_layout()
plt.savefig(fname+".png")
plt.close()
def plot_classes_preds(model, arrs, labels):
preds, probs = arrs_to_probs(model, arrs)
# plot the arrs in the batch, along with predicted and true labels
fig = plt.figure(figsize=(48,12))
arrs = arrs.cpu().numpy()
arrs = arrs.reshape(-1,64,64)
for idx in np.arange(9):
ax = fig.add_subplot(3, 3, idx+1, xticks=[], yticks=[])
ax.imshow(arrs[idx], interpolation="bicubic", cmap="nipy_spectral")
#matplotlib_imshow(arrs[idx])
ax.set_title("{0}, {1:.1f}%\n(label: {2})".format(
classes[preds[idx]],
probs[idx] * 100.0,
classes[labels[idx]]),
color=("green" if preds[idx]==labels[idx].item() else "red"))
plt.savefig("pred_vs_true_4classes_rasca_batch"+str(num_batch)+"_epoch"+str(num_epochs)+"_classes"+str(num_classes)+"_mydata"+".png")
return fig
def check_labels(dataloader):
print("Size ", len(dataloader))
input1, label1 = next(iter(dataloader))
print("label ", label1)
print("label size ", label1.size())
class Plotter:
def __init__(self, epochs, train_nums, val_nums,x_label,y_label,str_type="Losses"): #str_type=>"Lossses" or "accuracy"
self.epochs = epochs
self.train_nums = train_nums #train accs or losses
self.val_nums = val_nums
self.x_label = x_label
self.y_label = y_label
self.str_type = str_type
def plotter(self):
plt.plot(self.epochs, self.train_nums, label="Train")
plt.plot(self.epochs, self.val_nums, label="Test")
plt.xlabel(self.x_label)
plt.ylabel(self.y_label)
plt.xticks(ticks=epochs)
plt.legend()
plotdir="cnn_plots_"+str(date.today())
if not os.path.exists(plotdir):
os.mkdir(plotdir)
plotname= plotdir+"/"+self.str_type+"_"+str(num_batch)+"_epoch"+str(num_epochs)+"_"+str(date.today())+".png"
plt.savefig(plotname)
print(plotname+" is created")
plt.close()
def compute_pred_per_batch(model, validloader):
predlist = torch.zeros(0, dtype= torch.long, device="cpu")
lablist = torch.zeros(0, dtype= torch.long, device="cpu")
for batch_idx, (features, targets) in enumerate(validloader):
preds, _ = arrs_to_probs(model, features)
predlist = torch.cat([predlist, preds.view(-1).cpu()])
lablist = torch.cat([lablist, targets.view(-1).cpu()])
return (predlist, lablist)
if __name__=="__main__":
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Device:', device)
# Hyperparameters
learning_rate = 0.001
num_epochs = 30
# GLOBAL VARIABLES
num_features = 64*64
#num_classes = 16
num_classes = 25
num_batch=32
model = VGG16(num_features=num_features, num_classes= num_classes)
model.to(device)
summary(model, (1,64,64))
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
start_time = time.time()
p = Path.home()
test_filepath =p/"numpy_cnn/hdfs_2019-12-26"/"hdfs_2019-12-26_all_test.h5"
train_filepath =p/"numpy_cnn/hdfs_2019-12-26"/"hdfs_2019-12-26_all_train.h5"
TrainLd = CreateDataLoader(train_filepath,"Train",num_batch)
trainloader = TrainLd.create_dataloader()
TestLd = CreateDataLoader(test_filepath,"Test",num_batch)
validloader = TestLd.create_dataloader()
torch.save(trainloader, "trainloader"+"_"+str(datetime.now().time()))
torch.save(validloader, "validloader"+"_"+str(datetime.now().time()))
#sys.exit()
print("check train labels:")
check_labels(trainloader)
print("_______________________________________")
print("check test labels:")
check_labels(validloader)
epochs = []
train_accs=[]
val_accs=[]
train_losses=[]
val_losses=[]
running_loss=0.0
df = pd.read_hdf(test_filepath, key="df")
print(type(df.columns))
print(df.columns[1])
classes=[]
for col in df.columns:
sigx = col.split('_')[0]
sigy = col.split('_')[1]
x_val = next(iter(re.findall("\d+\.\d+",sigx)))
y_val = next(iter(re.findall("\d+\.\d+",sigy)))
val = "x"+str(x_val)+"_"+"y"+str(y_val)
classes.append(val)
print("classes", classes)
#FOR Confusion matrix
predlist = torch.zeros(0, dtype= torch.long, device="cpu")
lablist = torch.zeros(0, dtype= torch.long, device="cpu")
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(trainloader):
features = features.to(device)
targets = targets.to(device)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
running_loss += cost.item()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(trainloader), cost))
# ...log the running loss
writer.add_scalar('training loss',
running_loss / 100,
epoch * len(trainloader) + batch_idx)
# ...log a Matplotlib Figure showing the model's predictions on a
# random mini-batch
#writer.add_figure('predictions vs. actuals',
# plot_classes_preds(model.cpu(), features, targets),
# global_step=epoch * len(trainloader) + batch_idx)
running_loss = 0.0
with torch.no_grad():
if epoch==num_epochs-1:
#preds, _ = arrs_to_probs(model, features)
#print("type pred ", type(preds))
#print("preds shape ", preds.shape)
#predlist = torch.cat([predlist, preds.view(-1).cpu()])
#lablist = torch.cat([lablist, targets.view(-1).cpu()])
predlist, lablist = compute_pred_per_batch(model, validloader)
if batch_idx== len(trainloader)-1:
#plot_classes_preds(model.cpu(), features, targets)
print("inside plot_classes")
model.eval()
#with torch.set_grad_enabled(False): # save memory during inference
with torch.no_grad(): # save memory during inference
epochs.append(epoch)
train_acc, train_loss = compute_accuracy(model, trainloader)
val_acc, val_loss = compute_accuracy(model, validloader)
train_accs.append(train_acc)
val_accs.append(val_acc)
train_losses.append(train_loss)
val_losses.append(val_loss)
print('Epoch: %03d/%03d | Train: %.3f%% | Valid: %.3f%%' % (
epoch+1, num_epochs,
train_acc,
val_acc))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
#WRITE ARCHITECTURE
arrs, labels = next(iter(trainloader))
#img_grid = torchvision.utils.make_grid(arrs)
model = model.to(dtype=torch.float, device = torch.device("cpu"))
writer.add_graph(model, arrs)
#SAVE MODEL
torch.save(model.state_dict(), "model"+"_"+str(datetime.now().time()))
#MAKE PLOTS
LossPlot= Plotter(epochs,train_losses, val_losses,"Epochs","Loss","Losses")
LossPlot.plotter()
AccPlot= Plotter(epochs,train_accs, val_accs,"Epochs","Accuracy","Accuracys")
AccPlot.plotter()
testpreds, testlabs = compute_pred_per_batch(model, validloader)
conf_mat_train = confusion_matrix(lablist.numpy(), predlist.numpy())
conf_mat_test = confusion_matrix(testlabs.numpy(), testpreds.numpy())
plot_confusion(conf_mat_train, "conf_train_test5")
plot_confusion(conf_mat_test, "conf_test_test5")
| def __init__(self, num_features, num_classes):
super(VGG16, self).__init__()
self.block_1 = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(3,3), stride=(1,1), padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
)
#self.block_2 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=64, out_channels=64, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
#self.block_3 = nn.Sequential(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
#self.block_4 = nn.Sequential(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
#self.block_5 = nn.Sequential(nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3,3), stride=(1,1), padding=1),
# nn.ReLU(),
# nn.Conv2d(in_channels=512, out_channels=512, kernel_size=(3,3),stride=(1,1),padding=1),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=(2,2), stride=(2,2))
# )
self.classifier = nn.Sequential(
nn.Linear(64*32*32, 1024),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(1024,512),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(512,num_classes)
)
#pytorch initializes automatically
#https://stackoverflow.com/questions/49433936/how-to-initialize-weights-in-pytorch
#for m in self.modules():
# if isinstance(m , torch.nn.Conv2d):
# m.weight.detach().normal_(0,0.05)
def forward(self,x):
x = self.block_1(x)
#x = self.block_2(x)
#x = self.block_3(x)
#x = self.block_4(x)
#x = self.block_5(x)
#print("x shape ", x.shape)
logits = self.classifier(x.view(-1,64*32*32))
probs = F.softmax(logits, dim=1)
return logits, probs | identifier_body |
scripts.js | 'use strict';
// fixed svg show
//-----------------------------------------------------------------------------
svg4everybody();
// checking if element for page
//-----------------------------------------------------------------------------------
function isOnPage(selector) {
return ($(selector).length) ? $(selector) : false;
}
// search page
function pageWidget(pages) |
$('.section-block').slick({
infinite: true,
dots: false,
arrows: true,
slidesToShow: 1,
slidesToScroll: 1,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>'
});
$('.section-news').slick({
infinite: true,
dots: true,
arrows: true,
slidesToShow: 3,
slidesToScroll: 3,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1164,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
dots: true
}
},
{
breakpoint: 1000,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
arrows:false,
dots: true
}
},
{
breakpoint: 754,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
dots: true
}
},
{
breakpoint: 425,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
arrows:false,
dots: true
}
}
]
});
$('.header-menu-account').slick({
infinite: true,
dots: false,
arrows: true,
slidesToShow: 4,
slidesToScroll: 4,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow mod-color-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow mod-color-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1025,
settings: {
slidesToShow: 4,
slidesToScroll: 4,
infinite: true,
dots: false,
arrows: false,
}
},
{
breakpoint: 768,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
dots: false,
arrows: false,
}
},
{
breakpoint: 321,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
dots: false,
arrows: false,
}
}
]
});
$('.trainer-block').slick({
infinite: true,
dots: true,
arrows: true,
slidesToShow: 4,
slidesToScroll: 4,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1164,
settings: {
slidesToShow: 3,
slidesToScroll: 3,
infinite: true,
dots: true
}
},
{
breakpoint: 951,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
dots: true
}
},
{
breakpoint: 542,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
dots: true
}
}
]
});
$('.archive-slider').slick({
infinite: true,
dots: false,
arrows: true,
slidesToShow: 1,
slidesToScroll: 1,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1164,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
arrows: false,
dots: true
}
}
]
});
function initMap() {
// The location of Uluru
var uluru = {lat: 49.421036, lng: 26.976296};
// The map, centered at Uluru
var map = new google.maps.Map(
document.getElementById('map'), {
zoom: 15,
center: uluru,
disableDefaultUI: true,
zoomControl: false
});
// The marker, positioned at Uluru
var marker = new google.maps.Marker({position: uluru, map: map});
}
if(isOnPage('#map')){
initMap();
}
$(document).ready(function($) {
pageWidget(['index']);
});
// custom jQuery validation
//-----------------------------------------------------------------------------------
var validator = {
init: function () {
$('form').each(function () {
var $form = $(this);
var name = $form.attr('name');
if (validator.valitatorRules.hasOwnProperty(name) || $form.hasClass('js-validate')) {
var rules = validator.valitatorRules[name];
$form.validate({
rules: rules,
errorElement: 'b',
errorClass: 'error',
focusInvalid: true,
focusCleanup: false,
errorPlacement: function (error, element) {
validator.setError($(element), error);
},
highlight: function (element, errorClass, validClass) {
var $el = validator.defineElement($(element));
if ($(element).attr('type') == 'file'){
setTimeout(function () {
$(element).parents('label').find('b.error').addClass('file-error');
$(element).parents('.img-load').after($(element).parents('label').find('b.error'));
}, 100)
} else {
if ($el){
$el.closest('.el-text-fel').removeClass(validClass).addClass(errorClass);
}
}
},
unhighlight: function (element, errorClass, validClass) {
var $el = validator.defineElement($(element));
if ($el){
$el.closest('.el-text-fel').removeClass(errorClass).addClass(validClass);
}
},
onfocusout: function(element) {
// var $el = validator.defineElement($(element));
// $el.valid();
},
messages: validator.messages
});
}
});
},
setError: function ($el, message) {
$el = this.defineElement($el);
if ($el) this.domWorker.error($el, message);
},
defineElement: function ($el) {
return $el;
},
domWorker: {
error: function ($el, message) {
$el.closest('.el-text-fel').addClass('error');
$el.after(message);
}
},
messages: {
'field_test': {
required: 'This field is required.'
}
},
valitatorRules: {
'form_test': {
'field_test': {
required: true
}
}
}
};
validator.init();
// validate by data attribute
//-----------------------------------------------------------------------------------
(function(){
// add to validate form class 'js-validate'
// add to validate field data-valid="test"
//-----------------------------------------------------------------------------------
var rules = {
'name': {
required: true,
minlength: 2,
maxlength: 255,
messages: {
required: "Это поле обезательное для заполнения",
minlength: 'Минимум 2 символа',
maxlength: 'Максимально 255 символов'
}
},
'phone': {
required: true,
digits: true,
minlength: 8,
maxlength: 255,
messages: {
required: "Это поле обезательное для заполнения",
minlength: 'Минимум 8 символа',
maxlength: 'Максимально 255 символов',
digits: 'Вводите только цифры'
}
},
'company': {
minlength: 2,
maxlength: 255,
messages: {
minlength: 'Must have at least 2 characters!',
maxlength: 'No more than 255 characters.'
}
},
'message': {
minlength: 10,
maxlength: 500,
messages: {
minlength: 'Must have at least 10 characters!',
maxlength: 'No more than 500 characters.'
}
},
'email': {
required: true,
email: true,
maxlength: 255,
messages: {
required: "Это поле обезательное для заполнения",
email: 'Неправильный e-mail!',
maxlength: 'Максимально 255 символов'
}
},
'file': {
extension: "jpeg|jpg|png|doc|docx|pdf",
filesize: 30720000,
messages: {
extension: 'Invalid extension jpeg|jpg|png|doc|docx|pdf',
filesize: 'File must be less than 30mb.'
}
}
};
for (var ruleName in rules) {
$('[data-valid=' + ruleName + ']').each(function(){
$(this).rules('add', rules[ruleName]);
});
};
}());
// custom rules
//-----------------------------------------------------------------------------------
$.validator.addMethod("email", function (value) {
if (value == '') return true;
var regexp = /[a-zA-Zа-яА-ЯёЁ0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-zA-Zа-яА-ЯёЁ0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-zA-Zа-яА-ЯёЁ0-9](?:[a-zA-Zа-яА-ЯёЁ0-9-]*[a-zA-Zа-яА-ЯёЁ0-9])?\.)+[a-zA-Zа-яА-ЯёЁ0-9](?:[a-zA-Zа-яА-ЯёЁ0-9-]*[a-zA-Zа-яА-ЯёЁ0-9])?/;
return regexp.test(value);
});
$.validator.addMethod("extension", function (value, element, param) {
param = typeof param === "string" ? param.replace(/,/g, '|') : "png|jpe?g|gif";
return this.optional(element) || value.match(new RegExp(".(" + param + ")$", "i"));
});
$.validator.addMethod('filesize', function (value, element, param) {
return this.optional(element) || (element.files[0].size <= param)
});
$.validator.addMethod("letters", function(value, element) {
return this.optional(element) || /^[^1-9!@#\$%\^&\*\(\)\[\]:;,.?=+_<>`~\\\/"]+$/i.test(value);
});
$.validator.addMethod("digits", function(value, element) {
return this.optional(element) || /^(\+?\d+)?\s*(\(\d+\))?[\s-]*([\d-]*)$/i.test(value);
});
$.validator.addMethod("valueNotEquals", function(value, element, arg){
return arg != value;
}, "Value must not equal arg.");
$.validator.addMethod( "require_from_group", function( value, element, options ) {
var $fields = $( options[ 1 ], element.form ),
$fieldsFirst = $fields.eq( 0 ),
validator = $fieldsFirst.data( "valid_req_grp" ) ? $fieldsFirst.data( "valid_req_grp" ) : $.extend( {}, this ),
isValid = $fields.filter( function() {
return validator.elementValue( this );
} ).length >= options[ 0 ];
$fieldsFirst.data( "valid_req_grp", validator );
if ( !$( element ).data( "being_validated" ) ) {
$fields.data( "being_validated", true );
$fields.each( function() {
validator.element( this );
} );
$fields.data( "being_validated", false );
}
return isValid;
}, $.validator.format( "Please fill at least {0} of these fields." ) );
$(function() {
var $container = $('.js-index');
$container.find('');
}); | {
var widgetWrap = $('<div class="widget_wrap"><ul class="widget_list"></ul></div>');
widgetWrap.prependTo("body");
for (var i = 0; i < pages.length; i++) {
if (pages[i][0] === '#') {
$('<li class="widget_item"><a class="widget_link" href="' + pages[i] +'">' + pages[i] + '</a></li>').appendTo('.widget_list');
} else {
$('<li class="widget_item"><a class="widget_link" href="' + pages[i] + '.html' + '">' + pages[i] + '</a></li>').appendTo('.widget_list');
}
}
var widgetStilization = $('<style>body {position:relative} .widget_wrap{position:fixed;top:0;left:0;z-index:9999;padding:20px 20px;background:#222;border-bottom-right-radius:10px;-webkit-transition:all .3s ease;transition:all .3s ease;-webkit-transform:translate(-100%,0);-ms-transform:translate(-100%,0);transform:translate(-100%,0)}.widget_wrap:after{content:" ";position:absolute;top:0;left:100%;width:24px;height:24px;background:#222 url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAABGdBTUEAALGPC/xhBQAAAAxQTFRF////////AAAA////BQBkwgAAAAN0Uk5TxMMAjAd+zwAAACNJREFUCNdjqP///y/DfyBg+LVq1Xoo8W8/CkFYAmwA0Kg/AFcANT5fe7l4AAAAAElFTkSuQmCC) no-repeat 50% 50%;cursor:pointer}.widget_wrap:hover{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);transform:translate(0,0)}.widget_item{padding:0 0 10px}.widget_link{color:#fff;text-decoration:none;font-size:15px;}.widget_link:hover{text-decoration:underline} </style>');
widgetStilization.prependTo(".widget_wrap");
} | identifier_body |
scripts.js | 'use strict';
// fixed svg show
//-----------------------------------------------------------------------------
svg4everybody();
// checking if element for page
//-----------------------------------------------------------------------------------
function isOnPage(selector) {
return ($(selector).length) ? $(selector) : false;
}
// search page
function pageWidget(pages) {
var widgetWrap = $('<div class="widget_wrap"><ul class="widget_list"></ul></div>');
widgetWrap.prependTo("body");
for (var i = 0; i < pages.length; i++) {
if (pages[i][0] === '#') {
$('<li class="widget_item"><a class="widget_link" href="' + pages[i] +'">' + pages[i] + '</a></li>').appendTo('.widget_list');
} else {
$('<li class="widget_item"><a class="widget_link" href="' + pages[i] + '.html' + '">' + pages[i] + '</a></li>').appendTo('.widget_list');
}
}
var widgetStilization = $('<style>body {position:relative} .widget_wrap{position:fixed;top:0;left:0;z-index:9999;padding:20px 20px;background:#222;border-bottom-right-radius:10px;-webkit-transition:all .3s ease;transition:all .3s ease;-webkit-transform:translate(-100%,0);-ms-transform:translate(-100%,0);transform:translate(-100%,0)}.widget_wrap:after{content:" ";position:absolute;top:0;left:100%;width:24px;height:24px;background:#222 url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAABGdBTUEAALGPC/xhBQAAAAxQTFRF////////AAAA////BQBkwgAAAAN0Uk5TxMMAjAd+zwAAACNJREFUCNdjqP///y/DfyBg+LVq1Xoo8W8/CkFYAmwA0Kg/AFcANT5fe7l4AAAAAElFTkSuQmCC) no-repeat 50% 50%;cursor:pointer}.widget_wrap:hover{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);transform:translate(0,0)}.widget_item{padding:0 0 10px}.widget_link{color:#fff;text-decoration:none;font-size:15px;}.widget_link:hover{text-decoration:underline} </style>');
widgetStilization.prependTo(".widget_wrap");
}
$('.section-block').slick({
infinite: true,
dots: false,
arrows: true,
slidesToShow: 1,
slidesToScroll: 1,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>'
});
$('.section-news').slick({
infinite: true,
dots: true,
arrows: true,
slidesToShow: 3,
slidesToScroll: 3,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1164,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
dots: true
}
},
{
breakpoint: 1000,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
arrows:false,
dots: true
}
},
{
breakpoint: 754,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
dots: true
}
},
{
breakpoint: 425,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
arrows:false,
dots: true
}
}
]
});
$('.header-menu-account').slick({
infinite: true,
dots: false,
arrows: true,
slidesToShow: 4,
slidesToScroll: 4,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow mod-color-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow mod-color-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1025,
settings: {
slidesToShow: 4,
slidesToScroll: 4,
infinite: true,
dots: false,
arrows: false,
}
},
{
breakpoint: 768,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
dots: false,
arrows: false,
}
},
{
breakpoint: 321,
settings: { | }
}
]
});
$('.trainer-block').slick({
infinite: true,
dots: true,
arrows: true,
slidesToShow: 4,
slidesToScroll: 4,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1164,
settings: {
slidesToShow: 3,
slidesToScroll: 3,
infinite: true,
dots: true
}
},
{
breakpoint: 951,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
dots: true
}
},
{
breakpoint: 542,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
dots: true
}
}
]
});
$('.archive-slider').slick({
infinite: true,
dots: false,
arrows: true,
slidesToShow: 1,
slidesToScroll: 1,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1164,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
arrows: false,
dots: true
}
}
]
});
function initMap() {
// The location of Uluru
var uluru = {lat: 49.421036, lng: 26.976296};
// The map, centered at Uluru
var map = new google.maps.Map(
document.getElementById('map'), {
zoom: 15,
center: uluru,
disableDefaultUI: true,
zoomControl: false
});
// The marker, positioned at Uluru
var marker = new google.maps.Marker({position: uluru, map: map});
}
if(isOnPage('#map')){
initMap();
}
$(document).ready(function($) {
pageWidget(['index']);
});
// custom jQuery validation
//-----------------------------------------------------------------------------------
var validator = {
init: function () {
$('form').each(function () {
var $form = $(this);
var name = $form.attr('name');
if (validator.valitatorRules.hasOwnProperty(name) || $form.hasClass('js-validate')) {
var rules = validator.valitatorRules[name];
$form.validate({
rules: rules,
errorElement: 'b',
errorClass: 'error',
focusInvalid: true,
focusCleanup: false,
errorPlacement: function (error, element) {
validator.setError($(element), error);
},
highlight: function (element, errorClass, validClass) {
var $el = validator.defineElement($(element));
if ($(element).attr('type') == 'file'){
setTimeout(function () {
$(element).parents('label').find('b.error').addClass('file-error');
$(element).parents('.img-load').after($(element).parents('label').find('b.error'));
}, 100)
} else {
if ($el){
$el.closest('.el-text-fel').removeClass(validClass).addClass(errorClass);
}
}
},
unhighlight: function (element, errorClass, validClass) {
var $el = validator.defineElement($(element));
if ($el){
$el.closest('.el-text-fel').removeClass(errorClass).addClass(validClass);
}
},
onfocusout: function(element) {
// var $el = validator.defineElement($(element));
// $el.valid();
},
messages: validator.messages
});
}
});
},
setError: function ($el, message) {
$el = this.defineElement($el);
if ($el) this.domWorker.error($el, message);
},
defineElement: function ($el) {
return $el;
},
domWorker: {
error: function ($el, message) {
$el.closest('.el-text-fel').addClass('error');
$el.after(message);
}
},
messages: {
'field_test': {
required: 'This field is required.'
}
},
valitatorRules: {
'form_test': {
'field_test': {
required: true
}
}
}
};
validator.init();
// validate by data attribute
//-----------------------------------------------------------------------------------
(function(){
// add to validate form class 'js-validate'
// add to validate field data-valid="test"
//-----------------------------------------------------------------------------------
var rules = {
'name': {
required: true,
minlength: 2,
maxlength: 255,
messages: {
required: "Это поле обезательное для заполнения",
minlength: 'Минимум 2 символа',
maxlength: 'Максимально 255 символов'
}
},
'phone': {
required: true,
digits: true,
minlength: 8,
maxlength: 255,
messages: {
required: "Это поле обезательное для заполнения",
minlength: 'Минимум 8 символа',
maxlength: 'Максимально 255 символов',
digits: 'Вводите только цифры'
}
},
'company': {
minlength: 2,
maxlength: 255,
messages: {
minlength: 'Must have at least 2 characters!',
maxlength: 'No more than 255 characters.'
}
},
'message': {
minlength: 10,
maxlength: 500,
messages: {
minlength: 'Must have at least 10 characters!',
maxlength: 'No more than 500 characters.'
}
},
'email': {
required: true,
email: true,
maxlength: 255,
messages: {
required: "Это поле обезательное для заполнения",
email: 'Неправильный e-mail!',
maxlength: 'Максимально 255 символов'
}
},
'file': {
extension: "jpeg|jpg|png|doc|docx|pdf",
filesize: 30720000,
messages: {
extension: 'Invalid extension jpeg|jpg|png|doc|docx|pdf',
filesize: 'File must be less than 30mb.'
}
}
};
for (var ruleName in rules) {
$('[data-valid=' + ruleName + ']').each(function(){
$(this).rules('add', rules[ruleName]);
});
};
}());
// custom rules
//-----------------------------------------------------------------------------------
$.validator.addMethod("email", function (value) {
if (value == '') return true;
var regexp = /[a-zA-Zа-яА-ЯёЁ0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-zA-Zа-яА-ЯёЁ0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-zA-Zа-яА-ЯёЁ0-9](?:[a-zA-Zа-яА-ЯёЁ0-9-]*[a-zA-Zа-яА-ЯёЁ0-9])?\.)+[a-zA-Zа-яА-ЯёЁ0-9](?:[a-zA-Zа-яА-ЯёЁ0-9-]*[a-zA-Zа-яА-ЯёЁ0-9])?/;
return regexp.test(value);
});
$.validator.addMethod("extension", function (value, element, param) {
param = typeof param === "string" ? param.replace(/,/g, '|') : "png|jpe?g|gif";
return this.optional(element) || value.match(new RegExp(".(" + param + ")$", "i"));
});
$.validator.addMethod('filesize', function (value, element, param) {
return this.optional(element) || (element.files[0].size <= param)
});
$.validator.addMethod("letters", function(value, element) {
return this.optional(element) || /^[^1-9!@#\$%\^&\*\(\)\[\]:;,.?=+_<>`~\\\/"]+$/i.test(value);
});
$.validator.addMethod("digits", function(value, element) {
return this.optional(element) || /^(\+?\d+)?\s*(\(\d+\))?[\s-]*([\d-]*)$/i.test(value);
});
$.validator.addMethod("valueNotEquals", function(value, element, arg){
return arg != value;
}, "Value must not equal arg.");
$.validator.addMethod( "require_from_group", function( value, element, options ) {
var $fields = $( options[ 1 ], element.form ),
$fieldsFirst = $fields.eq( 0 ),
validator = $fieldsFirst.data( "valid_req_grp" ) ? $fieldsFirst.data( "valid_req_grp" ) : $.extend( {}, this ),
isValid = $fields.filter( function() {
return validator.elementValue( this );
} ).length >= options[ 0 ];
$fieldsFirst.data( "valid_req_grp", validator );
if ( !$( element ).data( "being_validated" ) ) {
$fields.data( "being_validated", true );
$fields.each( function() {
validator.element( this );
} );
$fields.data( "being_validated", false );
}
return isValid;
}, $.validator.format( "Please fill at least {0} of these fields." ) );
$(function() {
var $container = $('.js-index');
$container.find('');
}); | slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
dots: false,
arrows: false, | random_line_split |
scripts.js | 'use strict';
// fixed svg show
//-----------------------------------------------------------------------------
svg4everybody();
// checking if element for page
//-----------------------------------------------------------------------------------
function isOnPage(selector) {
return ($(selector).length) ? $(selector) : false;
}
// search page
function pageWidget(pages) {
var widgetWrap = $('<div class="widget_wrap"><ul class="widget_list"></ul></div>');
widgetWrap.prependTo("body");
for (var i = 0; i < pages.length; i++) {
if (pages[i][0] === '#') {
$('<li class="widget_item"><a class="widget_link" href="' + pages[i] +'">' + pages[i] + '</a></li>').appendTo('.widget_list');
} else {
$('<li class="widget_item"><a class="widget_link" href="' + pages[i] + '.html' + '">' + pages[i] + '</a></li>').appendTo('.widget_list');
}
}
var widgetStilization = $('<style>body {position:relative} .widget_wrap{position:fixed;top:0;left:0;z-index:9999;padding:20px 20px;background:#222;border-bottom-right-radius:10px;-webkit-transition:all .3s ease;transition:all .3s ease;-webkit-transform:translate(-100%,0);-ms-transform:translate(-100%,0);transform:translate(-100%,0)}.widget_wrap:after{content:" ";position:absolute;top:0;left:100%;width:24px;height:24px;background:#222 url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAABGdBTUEAALGPC/xhBQAAAAxQTFRF////////AAAA////BQBkwgAAAAN0Uk5TxMMAjAd+zwAAACNJREFUCNdjqP///y/DfyBg+LVq1Xoo8W8/CkFYAmwA0Kg/AFcANT5fe7l4AAAAAElFTkSuQmCC) no-repeat 50% 50%;cursor:pointer}.widget_wrap:hover{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);transform:translate(0,0)}.widget_item{padding:0 0 10px}.widget_link{color:#fff;text-decoration:none;font-size:15px;}.widget_link:hover{text-decoration:underline} </style>');
widgetStilization.prependTo(".widget_wrap");
}
$('.section-block').slick({
infinite: true,
dots: false,
arrows: true,
slidesToShow: 1,
slidesToScroll: 1,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>'
});
$('.section-news').slick({
infinite: true,
dots: true,
arrows: true,
slidesToShow: 3,
slidesToScroll: 3,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1164,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
dots: true
}
},
{
breakpoint: 1000,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
arrows:false,
dots: true
}
},
{
breakpoint: 754,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
dots: true
}
},
{
breakpoint: 425,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
arrows:false,
dots: true
}
}
]
});
$('.header-menu-account').slick({
infinite: true,
dots: false,
arrows: true,
slidesToShow: 4,
slidesToScroll: 4,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow mod-color-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow mod-color-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1025,
settings: {
slidesToShow: 4,
slidesToScroll: 4,
infinite: true,
dots: false,
arrows: false,
}
},
{
breakpoint: 768,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
dots: false,
arrows: false,
}
},
{
breakpoint: 321,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
dots: false,
arrows: false,
}
}
]
});
$('.trainer-block').slick({
infinite: true,
dots: true,
arrows: true,
slidesToShow: 4,
slidesToScroll: 4,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1164,
settings: {
slidesToShow: 3,
slidesToScroll: 3,
infinite: true,
dots: true
}
},
{
breakpoint: 951,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
dots: true
}
},
{
breakpoint: 542,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
dots: true
}
}
]
});
$('.archive-slider').slick({
infinite: true,
dots: false,
arrows: true,
slidesToShow: 1,
slidesToScroll: 1,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1164,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
arrows: false,
dots: true
}
}
]
});
function | () {
// The location of Uluru
var uluru = {lat: 49.421036, lng: 26.976296};
// The map, centered at Uluru
var map = new google.maps.Map(
document.getElementById('map'), {
zoom: 15,
center: uluru,
disableDefaultUI: true,
zoomControl: false
});
// The marker, positioned at Uluru
var marker = new google.maps.Marker({position: uluru, map: map});
}
if(isOnPage('#map')){
initMap();
}
$(document).ready(function($) {
pageWidget(['index']);
});
// custom jQuery validation
//-----------------------------------------------------------------------------------
var validator = {
init: function () {
$('form').each(function () {
var $form = $(this);
var name = $form.attr('name');
if (validator.valitatorRules.hasOwnProperty(name) || $form.hasClass('js-validate')) {
var rules = validator.valitatorRules[name];
$form.validate({
rules: rules,
errorElement: 'b',
errorClass: 'error',
focusInvalid: true,
focusCleanup: false,
errorPlacement: function (error, element) {
validator.setError($(element), error);
},
highlight: function (element, errorClass, validClass) {
var $el = validator.defineElement($(element));
if ($(element).attr('type') == 'file'){
setTimeout(function () {
$(element).parents('label').find('b.error').addClass('file-error');
$(element).parents('.img-load').after($(element).parents('label').find('b.error'));
}, 100)
} else {
if ($el){
$el.closest('.el-text-fel').removeClass(validClass).addClass(errorClass);
}
}
},
unhighlight: function (element, errorClass, validClass) {
var $el = validator.defineElement($(element));
if ($el){
$el.closest('.el-text-fel').removeClass(errorClass).addClass(validClass);
}
},
onfocusout: function(element) {
// var $el = validator.defineElement($(element));
// $el.valid();
},
messages: validator.messages
});
}
});
},
setError: function ($el, message) {
$el = this.defineElement($el);
if ($el) this.domWorker.error($el, message);
},
defineElement: function ($el) {
return $el;
},
domWorker: {
error: function ($el, message) {
$el.closest('.el-text-fel').addClass('error');
$el.after(message);
}
},
messages: {
'field_test': {
required: 'This field is required.'
}
},
valitatorRules: {
'form_test': {
'field_test': {
required: true
}
}
}
};
validator.init();
// validate by data attribute
//-----------------------------------------------------------------------------------
(function(){
// add to validate form class 'js-validate'
// add to validate field data-valid="test"
//-----------------------------------------------------------------------------------
var rules = {
'name': {
required: true,
minlength: 2,
maxlength: 255,
messages: {
required: "Это поле обезательное для заполнения",
minlength: 'Минимум 2 символа',
maxlength: 'Максимально 255 символов'
}
},
'phone': {
required: true,
digits: true,
minlength: 8,
maxlength: 255,
messages: {
required: "Это поле обезательное для заполнения",
minlength: 'Минимум 8 символа',
maxlength: 'Максимально 255 символов',
digits: 'Вводите только цифры'
}
},
'company': {
minlength: 2,
maxlength: 255,
messages: {
minlength: 'Must have at least 2 characters!',
maxlength: 'No more than 255 characters.'
}
},
'message': {
minlength: 10,
maxlength: 500,
messages: {
minlength: 'Must have at least 10 characters!',
maxlength: 'No more than 500 characters.'
}
},
'email': {
required: true,
email: true,
maxlength: 255,
messages: {
required: "Это поле обезательное для заполнения",
email: 'Неправильный e-mail!',
maxlength: 'Максимально 255 символов'
}
},
'file': {
extension: "jpeg|jpg|png|doc|docx|pdf",
filesize: 30720000,
messages: {
extension: 'Invalid extension jpeg|jpg|png|doc|docx|pdf',
filesize: 'File must be less than 30mb.'
}
}
};
for (var ruleName in rules) {
$('[data-valid=' + ruleName + ']').each(function(){
$(this).rules('add', rules[ruleName]);
});
};
}());
// custom rules
//-----------------------------------------------------------------------------------
$.validator.addMethod("email", function (value) {
if (value == '') return true;
var regexp = /[a-zA-Zа-яА-ЯёЁ0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-zA-Zа-яА-ЯёЁ0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-zA-Zа-яА-ЯёЁ0-9](?:[a-zA-Zа-яА-ЯёЁ0-9-]*[a-zA-Zа-яА-ЯёЁ0-9])?\.)+[a-zA-Zа-яА-ЯёЁ0-9](?:[a-zA-Zа-яА-ЯёЁ0-9-]*[a-zA-Zа-яА-ЯёЁ0-9])?/;
return regexp.test(value);
});
$.validator.addMethod("extension", function (value, element, param) {
param = typeof param === "string" ? param.replace(/,/g, '|') : "png|jpe?g|gif";
return this.optional(element) || value.match(new RegExp(".(" + param + ")$", "i"));
});
$.validator.addMethod('filesize', function (value, element, param) {
return this.optional(element) || (element.files[0].size <= param)
});
$.validator.addMethod("letters", function(value, element) {
return this.optional(element) || /^[^1-9!@#\$%\^&\*\(\)\[\]:;,.?=+_<>`~\\\/"]+$/i.test(value);
});
$.validator.addMethod("digits", function(value, element) {
return this.optional(element) || /^(\+?\d+)?\s*(\(\d+\))?[\s-]*([\d-]*)$/i.test(value);
});
$.validator.addMethod("valueNotEquals", function(value, element, arg){
return arg != value;
}, "Value must not equal arg.");
$.validator.addMethod( "require_from_group", function( value, element, options ) {
var $fields = $( options[ 1 ], element.form ),
$fieldsFirst = $fields.eq( 0 ),
validator = $fieldsFirst.data( "valid_req_grp" ) ? $fieldsFirst.data( "valid_req_grp" ) : $.extend( {}, this ),
isValid = $fields.filter( function() {
return validator.elementValue( this );
} ).length >= options[ 0 ];
$fieldsFirst.data( "valid_req_grp", validator );
if ( !$( element ).data( "being_validated" ) ) {
$fields.data( "being_validated", true );
$fields.each( function() {
validator.element( this );
} );
$fields.data( "being_validated", false );
}
return isValid;
}, $.validator.format( "Please fill at least {0} of these fields." ) );
$(function() {
var $container = $('.js-index');
$container.find('');
}); | initMap | identifier_name |
scripts.js | 'use strict';
// fixed svg show
//-----------------------------------------------------------------------------
svg4everybody();
// checking if element for page
//-----------------------------------------------------------------------------------
function isOnPage(selector) {
return ($(selector).length) ? $(selector) : false;
}
// search page
function pageWidget(pages) {
var widgetWrap = $('<div class="widget_wrap"><ul class="widget_list"></ul></div>');
widgetWrap.prependTo("body");
for (var i = 0; i < pages.length; i++) {
if (pages[i][0] === '#') {
$('<li class="widget_item"><a class="widget_link" href="' + pages[i] +'">' + pages[i] + '</a></li>').appendTo('.widget_list');
} else {
$('<li class="widget_item"><a class="widget_link" href="' + pages[i] + '.html' + '">' + pages[i] + '</a></li>').appendTo('.widget_list');
}
}
var widgetStilization = $('<style>body {position:relative} .widget_wrap{position:fixed;top:0;left:0;z-index:9999;padding:20px 20px;background:#222;border-bottom-right-radius:10px;-webkit-transition:all .3s ease;transition:all .3s ease;-webkit-transform:translate(-100%,0);-ms-transform:translate(-100%,0);transform:translate(-100%,0)}.widget_wrap:after{content:" ";position:absolute;top:0;left:100%;width:24px;height:24px;background:#222 url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAABGdBTUEAALGPC/xhBQAAAAxQTFRF////////AAAA////BQBkwgAAAAN0Uk5TxMMAjAd+zwAAACNJREFUCNdjqP///y/DfyBg+LVq1Xoo8W8/CkFYAmwA0Kg/AFcANT5fe7l4AAAAAElFTkSuQmCC) no-repeat 50% 50%;cursor:pointer}.widget_wrap:hover{-webkit-transform:translate(0,0);-ms-transform:translate(0,0);transform:translate(0,0)}.widget_item{padding:0 0 10px}.widget_link{color:#fff;text-decoration:none;font-size:15px;}.widget_link:hover{text-decoration:underline} </style>');
widgetStilization.prependTo(".widget_wrap");
}
$('.section-block').slick({
infinite: true,
dots: false,
arrows: true,
slidesToShow: 1,
slidesToScroll: 1,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>'
});
$('.section-news').slick({
infinite: true,
dots: true,
arrows: true,
slidesToShow: 3,
slidesToScroll: 3,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1164,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
dots: true
}
},
{
breakpoint: 1000,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
arrows:false,
dots: true
}
},
{
breakpoint: 754,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
dots: true
}
},
{
breakpoint: 425,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
arrows:false,
dots: true
}
}
]
});
$('.header-menu-account').slick({
infinite: true,
dots: false,
arrows: true,
slidesToShow: 4,
slidesToScroll: 4,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow mod-color-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow mod-color-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1025,
settings: {
slidesToShow: 4,
slidesToScroll: 4,
infinite: true,
dots: false,
arrows: false,
}
},
{
breakpoint: 768,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
dots: false,
arrows: false,
}
},
{
breakpoint: 321,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
dots: false,
arrows: false,
}
}
]
});
$('.trainer-block').slick({
infinite: true,
dots: true,
arrows: true,
slidesToShow: 4,
slidesToScroll: 4,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1164,
settings: {
slidesToShow: 3,
slidesToScroll: 3,
infinite: true,
dots: true
}
},
{
breakpoint: 951,
settings: {
slidesToShow: 2,
slidesToScroll: 2,
infinite: true,
dots: true
}
},
{
breakpoint: 542,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
dots: true
}
}
]
});
$('.archive-slider').slick({
infinite: true,
dots: false,
arrows: true,
slidesToShow: 1,
slidesToScroll: 1,
prevArrow: '<button type="button" class="slick-prev slick-arrow"><svg class="icon icon-arrow mod-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
nextArrow: '<button type="button" class="slick-next slick-arrow"><svg class="icon icon-arrow"><use xlink:href="assets/img/symbol/sprite.svg#arrow"></use></svg></button>',
responsive: [
{
breakpoint: 1164,
settings: {
slidesToShow: 1,
slidesToScroll: 1,
infinite: true,
arrows: false,
dots: true
}
}
]
});
function initMap() {
// The location of Uluru
var uluru = {lat: 49.421036, lng: 26.976296};
// The map, centered at Uluru
var map = new google.maps.Map(
document.getElementById('map'), {
zoom: 15,
center: uluru,
disableDefaultUI: true,
zoomControl: false
});
// The marker, positioned at Uluru
var marker = new google.maps.Marker({position: uluru, map: map});
}
if(isOnPage('#map')){
initMap();
}
$(document).ready(function($) {
pageWidget(['index']);
});
// custom jQuery validation
//-----------------------------------------------------------------------------------
var validator = {
init: function () {
$('form').each(function () {
var $form = $(this);
var name = $form.attr('name');
if (validator.valitatorRules.hasOwnProperty(name) || $form.hasClass('js-validate')) |
});
},
setError: function ($el, message) {
$el = this.defineElement($el);
if ($el) this.domWorker.error($el, message);
},
defineElement: function ($el) {
return $el;
},
domWorker: {
error: function ($el, message) {
$el.closest('.el-text-fel').addClass('error');
$el.after(message);
}
},
messages: {
'field_test': {
required: 'This field is required.'
}
},
valitatorRules: {
'form_test': {
'field_test': {
required: true
}
}
}
};
validator.init();
// validate by data attribute
//-----------------------------------------------------------------------------------
(function(){
// add to validate form class 'js-validate'
// add to validate field data-valid="test"
//-----------------------------------------------------------------------------------
var rules = {
'name': {
required: true,
minlength: 2,
maxlength: 255,
messages: {
required: "Это поле обезательное для заполнения",
minlength: 'Минимум 2 символа',
maxlength: 'Максимально 255 символов'
}
},
'phone': {
required: true,
digits: true,
minlength: 8,
maxlength: 255,
messages: {
required: "Это поле обезательное для заполнения",
minlength: 'Минимум 8 символа',
maxlength: 'Максимально 255 символов',
digits: 'Вводите только цифры'
}
},
'company': {
minlength: 2,
maxlength: 255,
messages: {
minlength: 'Must have at least 2 characters!',
maxlength: 'No more than 255 characters.'
}
},
'message': {
minlength: 10,
maxlength: 500,
messages: {
minlength: 'Must have at least 10 characters!',
maxlength: 'No more than 500 characters.'
}
},
'email': {
required: true,
email: true,
maxlength: 255,
messages: {
required: "Это поле обезательное для заполнения",
email: 'Неправильный e-mail!',
maxlength: 'Максимально 255 символов'
}
},
'file': {
extension: "jpeg|jpg|png|doc|docx|pdf",
filesize: 30720000,
messages: {
extension: 'Invalid extension jpeg|jpg|png|doc|docx|pdf',
filesize: 'File must be less than 30mb.'
}
}
};
for (var ruleName in rules) {
$('[data-valid=' + ruleName + ']').each(function(){
$(this).rules('add', rules[ruleName]);
});
};
}());
// custom rules
//-----------------------------------------------------------------------------------
$.validator.addMethod("email", function (value) {
if (value == '') return true;
var regexp = /[a-zA-Zа-яА-ЯёЁ0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-zA-Zа-яА-ЯёЁ0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-zA-Zа-яА-ЯёЁ0-9](?:[a-zA-Zа-яА-ЯёЁ0-9-]*[a-zA-Zа-яА-ЯёЁ0-9])?\.)+[a-zA-Zа-яА-ЯёЁ0-9](?:[a-zA-Zа-яА-ЯёЁ0-9-]*[a-zA-Zа-яА-ЯёЁ0-9])?/;
return regexp.test(value);
});
$.validator.addMethod("extension", function (value, element, param) {
param = typeof param === "string" ? param.replace(/,/g, '|') : "png|jpe?g|gif";
return this.optional(element) || value.match(new RegExp(".(" + param + ")$", "i"));
});
$.validator.addMethod('filesize', function (value, element, param) {
return this.optional(element) || (element.files[0].size <= param)
});
$.validator.addMethod("letters", function(value, element) {
return this.optional(element) || /^[^1-9!@#\$%\^&\*\(\)\[\]:;,.?=+_<>`~\\\/"]+$/i.test(value);
});
$.validator.addMethod("digits", function(value, element) {
return this.optional(element) || /^(\+?\d+)?\s*(\(\d+\))?[\s-]*([\d-]*)$/i.test(value);
});
$.validator.addMethod("valueNotEquals", function(value, element, arg){
return arg != value;
}, "Value must not equal arg.");
$.validator.addMethod( "require_from_group", function( value, element, options ) {
var $fields = $( options[ 1 ], element.form ),
$fieldsFirst = $fields.eq( 0 ),
validator = $fieldsFirst.data( "valid_req_grp" ) ? $fieldsFirst.data( "valid_req_grp" ) : $.extend( {}, this ),
isValid = $fields.filter( function() {
return validator.elementValue( this );
} ).length >= options[ 0 ];
$fieldsFirst.data( "valid_req_grp", validator );
if ( !$( element ).data( "being_validated" ) ) {
$fields.data( "being_validated", true );
$fields.each( function() {
validator.element( this );
} );
$fields.data( "being_validated", false );
}
return isValid;
}, $.validator.format( "Please fill at least {0} of these fields." ) );
$(function() {
var $container = $('.js-index');
$container.find('');
}); | {
var rules = validator.valitatorRules[name];
$form.validate({
rules: rules,
errorElement: 'b',
errorClass: 'error',
focusInvalid: true,
focusCleanup: false,
errorPlacement: function (error, element) {
validator.setError($(element), error);
},
highlight: function (element, errorClass, validClass) {
var $el = validator.defineElement($(element));
if ($(element).attr('type') == 'file'){
setTimeout(function () {
$(element).parents('label').find('b.error').addClass('file-error');
$(element).parents('.img-load').after($(element).parents('label').find('b.error'));
}, 100)
} else {
if ($el){
$el.closest('.el-text-fel').removeClass(validClass).addClass(errorClass);
}
}
},
unhighlight: function (element, errorClass, validClass) {
var $el = validator.defineElement($(element));
if ($el){
$el.closest('.el-text-fel').removeClass(errorClass).addClass(validClass);
}
},
onfocusout: function(element) {
// var $el = validator.defineElement($(element));
// $el.valid();
},
messages: validator.messages
});
} | conditional_block |
objects.rs | use ggez::input::keyboard;
use ggez::{graphics, Context, GameResult};
use graphics::{Mesh, MeshBuilder, DrawParam};
use crate::game;
use game::movement::Movement;
use game::Draw;
//#region Ship
/// The Ship.\
/// Width and Height is sort of switched here.\
/// This is because the mesh is made to face the right but then rotated upwards.\
/// I thought it would make more sense like this but it kind of didn't but whateer who cares.\
pub struct Ship {
pub width: f32,
pub height: f32,
pub x: f32,
pub y: f32,
pub mov: Movement,
/// Current rotation in radials
pub rotation: f32,
/// Force to add to the movement calculation when using rocket
pub movement_force: f32,
/// Speed of rotation in radials per tick
pub rotation_speed: f32,
/// How many time the ship can fire a laser per second
pub fire_rate: f32,
/// Time the ship fired for the last time
pub last_fire: std::time::Instant,
/// If the ship is currently using it's rocket
pub moving: bool
}
impl Ship {
pub fn new(ctx: &Context) -> Ship |
/// Handle keyboard inputs and update the location of the Ship accordingly
pub fn update_movement(&mut self, ctx: &Context) {
/* The current implementation does not allow external forces
This could be easily achieved by having this call take additional params which set
some force before movement calculation like gravity. This is (currently) not needed for this game.*/
self.mov.force_x = 0.0;
self.mov.force_y = 0.0;
self.moving = false;
if keyboard::is_key_pressed(ctx, keyboard::KeyCode::A) {
self.rotation -= self.rotation_speed;
}
if keyboard::is_key_pressed(ctx, keyboard::KeyCode::D) {
self.rotation += self.rotation_speed;
}
if keyboard::is_key_pressed(ctx, keyboard::KeyCode::W) {
self.mov.force_x += self.rotation.cos() * self.movement_force;
self.mov.force_y += self.rotation.sin() * self.movement_force;
self.moving = true;
}
// Movement structs handles the physics
self.mov.update();
self.x += self.mov.speed_x;
self.y += self.mov.speed_y;
}
/// Add a laser to the gamestate appearing from the ship
pub fn shoot(&self, lasers: &mut Vec<game::Laser>) {
lasers.push(game::Laser::new(
self.x + self.height /2.0,
self.y - self.width / 2.0,
self.rotation)
);
}
pub fn debug_string(&self) -> String {
format!(
"Force x: {}\n\
Force y: {}\n\
Acceleration x: {}\n\
Acceleration y: {}\n\
Speed x: {}\n\
Speed y: {}\n\
Rotation speed: {}\n",
self.mov.force_x,
self.mov.force_y,
self.mov.acceleration_x,
self.mov.acceleration_y,
self.mov.speed_x,
self.mov.speed_y,
self.rotation_speed
)
}
}
impl game::Draw for Ship {
fn mesh(&self, ctx: &mut Context) -> GameResult<Mesh> {
let mut mesh = MeshBuilder::new();
/*
With these points you could make the center of the mesh be the actual center of the triangle
This would make writing hit detection easier, and would make the offset trivial.
But I did not immediately implement it like this
and I don't want to redo the rocket fire mesh right now, so I am leaving this comment instead
[-self.height/2.0, -self.width/2.0],
[ self.height/2.0, 0.0],
[-self.height/2.0, self.width/2.0],
[-self.height/2.0, -self.width/2.0]
*/
// Could be a polygon as well
mesh.line(
&[
[0.0, 0.0],
[self.height, -self.width / 2.0],
[0.0, -self.width],
[0.0,0.0]
],
1.3,
graphics::WHITE
)?;
// Draw fire behind rocket
if self.moving {
mesh.line(
&[
[ - 0.0, - 0.1 * self.width],
[ - 0.3 * self.height, - 0.233 * self.width],
[ - 0.2 * self.height, - 0.366 * self.width],
[ - 0.6 * self.height, - 0.5 * self.width],
[ - 0.2 * self.height, - 0.633 * self.width],
[ - 0.3 * self.height, - 0.766 * self.width],
[ - 0.0 * self.height, - 0.9 * self.width]
],
1.3,
graphics::WHITE
)?;
}
mesh.build(ctx)
}
fn draw_param(&self) -> DrawParam {
DrawParam::new()
.dest([self.x, self.y])
.offset([0.5 * self.height, 0.5 * -self.width])
.rotation(self.rotation)
}
}
//#endregion
//#region Laser
/// Laser that has been fired from Ship
pub struct Laser {
pub x: f32,
pub y: f32,
pub rotation: f32,
speed: f32,
width: f32
}
impl Laser {
pub fn new(x: f32, y: f32, rotation: f32) -> Laser {
Laser {
x,
y,
rotation,
speed: 17.0,
width: 15.0
}
}
pub fn update(&mut self) {
self.x += self.rotation.cos() * self.speed;
self.y += self.rotation.sin() * self.speed;
}
}
impl Draw for Laser {
fn mesh(&self, ctx: &mut Context) -> ggez::GameResult<graphics::Mesh> {
MeshBuilder::new()
.line(
&[
[0.0,0.0],
[15.0,0.0]
],
2.0,
graphics::WHITE
)?
.build(ctx)
}
fn draw_param(&self) -> DrawParam {
DrawParam::new()
.dest([self.x, self.y])
.offset([0.5 * self.width, 0.0])
.rotation(self.rotation)
}
}
//#endregion
//#region Asteroid
/// The 3 different asteroid sizes
#[derive(Copy, Clone)]
pub enum AsteroidSize {
Big,
Medium,
Small
}
/// Factor to multiple mesh with
const ASTEROID_BIG: f32 = 40.0;
/// Factor to multiple mesh with
const ASTEROID_MEDIUM: f32 = 30.0;
/// Factor to multiple mesh with
const ASTEROID_SMALL: f32 = 20.0;
#[derive(Clone)]
pub struct Asteroid {
pub x: f32,
pub y: f32,
rotation: f32,
rotation_speed: f32,
speed_x: f32,
speed_y: f32,
size: AsteroidSize,
mirrored: bool,
/// Index for the asteroid_mashes var
mesh: usize
}
const ASTEROID_MAX_MOVEMENT_SPEED: f32 = 5.0;
const ASTEROID_MAX_ROTATION_SPEED: f32 = 0.1;
/// The width/height of the safezone of the ship.\
/// Asteroids do not spawn here
const SHIP_SAFEZONE: f32 = 300.0;
/// Array of different random meshes for the asteroids.\
/// The diameter before mulitplication with the asteroid size should be about 2.0
/* const ASTEROID_MESHES: [fn(f32) -> [[f32;2];13];1] = [
|size| [
[0.0 *size, 0.0 *size],
[1.0 *size, 0.0 *size],
[2.5 *size, 1.0 *size],
[2.5 *size, 1.3 *size],
[1.5 *size, 1.7 *size],
[2.4 *size, 1.9 *size],
[1.5 *size, 2.8 *size],
[0.9 *size, 2.6 *size],
[0.4 *size, 2.4 *size],
[-0.3*size, 1.2 *size],
[-0.1*size, 0.8 *size],
[0.3 *size, 1.0 *size],
[0.0 *size, 0.0 *size]
]
]; */
/// Array of different random meshes for the asteroids
const ASTEROID_MESHES: [fn(f32) -> [[f32;2];13];1] = [
|size| [
[-1.0 *size, -0.8 *size],
[0.0 *size, -1.0 *size],
[1.0 *size, -0.3 *size],
[1.1 *size, 0.3 *size],
[0.4 *size, 0.5 *size],
[1.0 *size, 0.8 *size],
[0.5 *size, 1.3 *size],
[-0.1 *size, 1.2 *size],
[-0.6 *size, 1.0 *size],
[-1.3*size, 0.2 *size],
[-1.1*size, -0.2 *size],
[-0.7 *size, 0.0 *size],
[-1.0 *size, -0.8 *size]
]
];
impl Asteroid {
pub fn new(ship_x: f32, ship_y: f32, sizeOption: Option<AsteroidSize>,ctx: &mut Context) -> Asteroid {
let (mut x, mut y);
loop {
// Can't shadow via pattern :(
let (x_, y_) = game::random_place(ctx);
x = x_;
y = y_;
// Break out when the coords are not in a safezone
if ship_x - x < SHIP_SAFEZONE / 2.0 || ship_x - x > -(SHIP_SAFEZONE / 2.0)
|| ship_y - y > SHIP_SAFEZONE / 2.0 || ship_y - y < -(SHIP_SAFEZONE / 2.0)
{
break;
}
}
let size;
if let None = sizeOption {
size = match (rand::random::<f32>() * 3.0 + 1.0).floor() as u8 {
1 => AsteroidSize::Small,
2 => AsteroidSize::Medium,
3 => AsteroidSize::Big,
_ => AsteroidSize::Small
}
} else {
size = sizeOption.unwrap();
}
/* let mirrored = match rand::random::<f32>().round() as u8 {
1 => false,
2 => true,
_ => true
}; */
let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED * 2.0 - ASTEROID_MAX_MOVEMENT_SPEED;
let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED * 2.0 - ASTEROID_MAX_MOVEMENT_SPEED;
let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED * 2.0 - ASTEROID_MAX_ROTATION_SPEED;
//let rotation = rand::random::<f32>() * (2.0 * std::f32::consts::PI);
let rotation = 0.0;
let mirrored = false;
let mesh = (rand::random::<f32>() * ASTEROID_MESHES.len() as f32).floor() as usize;
// Asteroid go brrr
Asteroid {
x,
y,
size,
speed_x,
speed_y,
rotation_speed,
rotation,
mirrored,
mesh
}
}
pub fn update(&mut self) {
self.x += self.speed_x;
self.y += self.speed_y;
self.rotation += self.rotation_speed;
}
/// Returns a boolean that states if someone is within the hitbox of this asteroid
pub fn in_hitbox(&self, (x, y): (f32, f32)) -> bool {
let size;
match &self.size {
AsteroidSize::Big => size = ASTEROID_BIG,
AsteroidSize::Medium => size = ASTEROID_MEDIUM,
AsteroidSize::Small => size = ASTEROID_SMALL
}
// I am going to take 2.0 as the raw diameter of an asteroid
let radius = 2.0 * size / 2.0;
/* println!("hitboxcalc");
println!("{}", radius);
println!("{} {}", x, y);
println!("{} {}", self.x, self.y);
println!("{}", ((self.x - x).powf(2.0) + (self.y - y).powf(2.0)).sqrt() ); */
((self.x - x).powf(2.0) + (self.y - y).powf(2.0)).sqrt() < radius
}
/// Split asteroid into 2 of smaller size
pub fn split(&self) -> Option<[Asteroid;2]> {
let size = match self.size {
AsteroidSize::Big => AsteroidSize::Medium,
AsteroidSize::Medium => AsteroidSize::Small,
AsteroidSize::Small => return None
};
let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED;
let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED;
let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED;
let asteroid1 = Asteroid {
speed_x,
speed_y,
rotation_speed,
size,
..*self
};
let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED;
let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED;
let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED;
let asteroid2 = Asteroid {
speed_x,
speed_y,
rotation_speed,
size,
..*self
};
Some([asteroid1, asteroid2])
}
}
/* enum SplitResult {
New([Asteroid;2]),
None
} */
impl Draw for Asteroid {
fn mesh(&self, ctx: &mut Context) -> GameResult<Mesh> {
let size;
match &self.size {
AsteroidSize::Big => size = ASTEROID_BIG,
AsteroidSize::Medium => size = ASTEROID_MEDIUM,
AsteroidSize::Small => size = ASTEROID_SMALL
}
let mut mesh = MeshBuilder::new();
mesh.line(
&ASTEROID_MESHES[self.mesh](size),
1.0,
graphics::WHITE
)?;
// I am going to take 2.0 as the raw diameter of an asteroid
/* let radius = 2.0 * size / 2.0;
//DEBUG
mesh.circle(
graphics::DrawMode::stroke(1.0),
[0.0, 0.0],
radius,
0.2,
graphics::WHITE
); */
mesh.build(ctx)
}
fn draw_param(&self) -> DrawParam {
let size;
match &self.size {
AsteroidSize::Big => size = ASTEROID_BIG,
AsteroidSize::Medium => size = ASTEROID_MEDIUM,
AsteroidSize::Small => size = ASTEROID_SMALL
}
let mut param = DrawParam::new()
.dest([self.x, self.y])
//.offset([0.5 * size, 0.5 * -size])
.rotation(self.rotation);
if self.mirrored {
param = param.scale([-1.0, 1.0]);
}
param
}
}
//#endregion Asteroid
| {
let (ctx_width, ctx_height) = graphics::drawable_size(ctx);
let ship_width = 18.0;
let ship_height = 20.0;
Ship {
width: ship_width,
height: ship_height,
x: (ctx_width - ship_width)/ 2.0,
y: (ctx_height- ship_height) / 2.0,
rotation: (3.0 / 2.0) * std::f32::consts::PI, // Start facing up
movement_force: 5.0,
rotation_speed: 0.1,
mov: Movement::new(0.3, 10.0),
fire_rate: 5.0,
last_fire: std::time::Instant::now(),
moving: false
}
} | identifier_body |
objects.rs | use ggez::input::keyboard;
use ggez::{graphics, Context, GameResult};
use graphics::{Mesh, MeshBuilder, DrawParam};
use crate::game;
use game::movement::Movement;
use game::Draw;
//#region Ship
/// The Ship.\
/// Width and Height is sort of switched here.\
/// This is because the mesh is made to face the right but then rotated upwards.\
/// I thought it would make more sense like this but it kind of didn't but whateer who cares.\
pub struct Ship {
pub width: f32,
pub height: f32,
pub x: f32,
pub y: f32,
pub mov: Movement,
/// Current rotation in radials
pub rotation: f32,
/// Force to add to the movement calculation when using rocket
pub movement_force: f32,
/// Speed of rotation in radials per tick
pub rotation_speed: f32,
/// How many time the ship can fire a laser per second
pub fire_rate: f32,
/// Time the ship fired for the last time
pub last_fire: std::time::Instant,
/// If the ship is currently using it's rocket
pub moving: bool
}
impl Ship {
pub fn new(ctx: &Context) -> Ship {
let (ctx_width, ctx_height) = graphics::drawable_size(ctx);
let ship_width = 18.0;
let ship_height = 20.0;
Ship {
width: ship_width,
height: ship_height,
x: (ctx_width - ship_width)/ 2.0,
y: (ctx_height- ship_height) / 2.0,
rotation: (3.0 / 2.0) * std::f32::consts::PI, // Start facing up
movement_force: 5.0,
rotation_speed: 0.1,
mov: Movement::new(0.3, 10.0),
fire_rate: 5.0,
last_fire: std::time::Instant::now(),
moving: false
}
}
/// Handle keyboard inputs and update the location of the Ship accordingly
pub fn update_movement(&mut self, ctx: &Context) {
/* The current implementation does not allow external forces
This could be easily achieved by having this call take additional params which set
some force before movement calculation like gravity. This is (currently) not needed for this game.*/
self.mov.force_x = 0.0;
self.mov.force_y = 0.0;
self.moving = false;
if keyboard::is_key_pressed(ctx, keyboard::KeyCode::A) {
self.rotation -= self.rotation_speed;
}
if keyboard::is_key_pressed(ctx, keyboard::KeyCode::D) {
self.rotation += self.rotation_speed;
}
if keyboard::is_key_pressed(ctx, keyboard::KeyCode::W) {
self.mov.force_x += self.rotation.cos() * self.movement_force;
self.mov.force_y += self.rotation.sin() * self.movement_force;
self.moving = true;
}
// Movement structs handles the physics
self.mov.update();
self.x += self.mov.speed_x;
self.y += self.mov.speed_y;
}
/// Add a laser to the gamestate appearing from the ship
pub fn shoot(&self, lasers: &mut Vec<game::Laser>) {
lasers.push(game::Laser::new(
self.x + self.height /2.0,
self.y - self.width / 2.0,
self.rotation)
);
}
pub fn debug_string(&self) -> String {
format!(
"Force x: {}\n\
Force y: {}\n\
Acceleration x: {}\n\
Acceleration y: {}\n\
Speed x: {}\n\
Speed y: {}\n\
Rotation speed: {}\n",
self.mov.force_x,
self.mov.force_y,
self.mov.acceleration_x,
self.mov.acceleration_y,
self.mov.speed_x,
self.mov.speed_y,
self.rotation_speed
)
}
}
impl game::Draw for Ship {
fn mesh(&self, ctx: &mut Context) -> GameResult<Mesh> {
let mut mesh = MeshBuilder::new();
/*
With these points you could make the center of the mesh be the actual center of the triangle
This would make writing hit detection easier, and would make the offset trivial.
But I did not immediately implement it like this
and I don't want to redo the rocket fire mesh right now, so I am leaving this comment instead
[-self.height/2.0, -self.width/2.0],
[ self.height/2.0, 0.0],
[-self.height/2.0, self.width/2.0],
[-self.height/2.0, -self.width/2.0]
*/
// Could be a polygon as well
mesh.line(
&[
[0.0, 0.0],
[self.height, -self.width / 2.0],
[0.0, -self.width],
[0.0,0.0]
],
1.3,
graphics::WHITE
)?;
// Draw fire behind rocket
if self.moving {
mesh.line(
&[
[ - 0.0, - 0.1 * self.width],
[ - 0.3 * self.height, - 0.233 * self.width],
[ - 0.2 * self.height, - 0.366 * self.width],
[ - 0.6 * self.height, - 0.5 * self.width],
[ - 0.2 * self.height, - 0.633 * self.width],
[ - 0.3 * self.height, - 0.766 * self.width],
[ - 0.0 * self.height, - 0.9 * self.width]
],
1.3,
graphics::WHITE
)?;
}
mesh.build(ctx)
}
fn draw_param(&self) -> DrawParam {
DrawParam::new()
.dest([self.x, self.y])
.offset([0.5 * self.height, 0.5 * -self.width])
.rotation(self.rotation)
}
}
//#endregion
//#region Laser
/// Laser that has been fired from Ship
pub struct Laser {
pub x: f32,
pub y: f32,
pub rotation: f32,
speed: f32,
width: f32
}
impl Laser {
pub fn new(x: f32, y: f32, rotation: f32) -> Laser {
Laser {
x,
y,
rotation,
speed: 17.0,
width: 15.0
}
}
pub fn update(&mut self) {
self.x += self.rotation.cos() * self.speed;
self.y += self.rotation.sin() * self.speed;
}
}
impl Draw for Laser {
fn mesh(&self, ctx: &mut Context) -> ggez::GameResult<graphics::Mesh> {
MeshBuilder::new()
.line(
&[
[0.0,0.0],
[15.0,0.0]
],
2.0,
graphics::WHITE
)?
.build(ctx)
}
fn draw_param(&self) -> DrawParam {
DrawParam::new()
.dest([self.x, self.y])
.offset([0.5 * self.width, 0.0])
.rotation(self.rotation)
}
}
//#endregion
//#region Asteroid
/// The 3 different asteroid sizes
#[derive(Copy, Clone)]
pub enum AsteroidSize {
Big,
Medium,
Small
}
/// Factor to multiple mesh with
const ASTEROID_BIG: f32 = 40.0;
/// Factor to multiple mesh with
const ASTEROID_MEDIUM: f32 = 30.0;
/// Factor to multiple mesh with
const ASTEROID_SMALL: f32 = 20.0;
#[derive(Clone)]
pub struct Asteroid {
pub x: f32,
pub y: f32,
rotation: f32,
rotation_speed: f32,
speed_x: f32,
speed_y: f32,
size: AsteroidSize,
mirrored: bool,
/// Index for the asteroid_mashes var
mesh: usize
}
const ASTEROID_MAX_MOVEMENT_SPEED: f32 = 5.0;
const ASTEROID_MAX_ROTATION_SPEED: f32 = 0.1;
/// The width/height of the safezone of the ship.\
/// Asteroids do not spawn here
const SHIP_SAFEZONE: f32 = 300.0;
/// Array of different random meshes for the asteroids.\
/// The diameter before mulitplication with the asteroid size should be about 2.0
/* const ASTEROID_MESHES: [fn(f32) -> [[f32;2];13];1] = [
|size| [
[0.0 *size, 0.0 *size],
[1.0 *size, 0.0 *size],
[2.5 *size, 1.0 *size],
[2.5 *size, 1.3 *size],
[1.5 *size, 1.7 *size],
[2.4 *size, 1.9 *size],
[1.5 *size, 2.8 *size],
[0.9 *size, 2.6 *size],
[0.4 *size, 2.4 *size],
[-0.3*size, 1.2 *size],
[-0.1*size, 0.8 *size],
[0.3 *size, 1.0 *size],
[0.0 *size, 0.0 *size]
]
]; */
/// Array of different random meshes for the asteroids
const ASTEROID_MESHES: [fn(f32) -> [[f32;2];13];1] = [
|size| [
[-1.0 *size, -0.8 *size],
[0.0 *size, -1.0 *size],
[1.0 *size, -0.3 *size],
[1.1 *size, 0.3 *size],
[0.4 *size, 0.5 *size],
[1.0 *size, 0.8 *size],
[0.5 *size, 1.3 *size],
[-0.1 *size, 1.2 *size],
[-0.6 *size, 1.0 *size],
[-1.3*size, 0.2 *size],
[-1.1*size, -0.2 *size],
[-0.7 *size, 0.0 *size],
[-1.0 *size, -0.8 *size]
]
];
impl Asteroid {
pub fn new(ship_x: f32, ship_y: f32, sizeOption: Option<AsteroidSize>,ctx: &mut Context) -> Asteroid {
let (mut x, mut y);
loop {
// Can't shadow via pattern :(
let (x_, y_) = game::random_place(ctx);
x = x_;
y = y_;
// Break out when the coords are not in a safezone
if ship_x - x < SHIP_SAFEZONE / 2.0 || ship_x - x > -(SHIP_SAFEZONE / 2.0)
|| ship_y - y > SHIP_SAFEZONE / 2.0 || ship_y - y < -(SHIP_SAFEZONE / 2.0)
{
break;
}
}
let size;
if let None = sizeOption {
size = match (rand::random::<f32>() * 3.0 + 1.0).floor() as u8 {
1 => AsteroidSize::Small,
2 => AsteroidSize::Medium,
3 => AsteroidSize::Big,
_ => AsteroidSize::Small
}
} else {
size = sizeOption.unwrap();
}
/* let mirrored = match rand::random::<f32>().round() as u8 {
1 => false,
2 => true,
_ => true
}; */
let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED * 2.0 - ASTEROID_MAX_MOVEMENT_SPEED;
let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED * 2.0 - ASTEROID_MAX_MOVEMENT_SPEED;
let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED * 2.0 - ASTEROID_MAX_ROTATION_SPEED;
//let rotation = rand::random::<f32>() * (2.0 * std::f32::consts::PI);
let rotation = 0.0;
let mirrored = false;
let mesh = (rand::random::<f32>() * ASTEROID_MESHES.len() as f32).floor() as usize;
// Asteroid go brrr
Asteroid {
x,
y,
size,
speed_x,
speed_y,
rotation_speed,
rotation,
mirrored,
mesh
}
}
pub fn | (&mut self) {
self.x += self.speed_x;
self.y += self.speed_y;
self.rotation += self.rotation_speed;
}
/// Returns a boolean that states if someone is within the hitbox of this asteroid
pub fn in_hitbox(&self, (x, y): (f32, f32)) -> bool {
let size;
match &self.size {
AsteroidSize::Big => size = ASTEROID_BIG,
AsteroidSize::Medium => size = ASTEROID_MEDIUM,
AsteroidSize::Small => size = ASTEROID_SMALL
}
// I am going to take 2.0 as the raw diameter of an asteroid
let radius = 2.0 * size / 2.0;
/* println!("hitboxcalc");
println!("{}", radius);
println!("{} {}", x, y);
println!("{} {}", self.x, self.y);
println!("{}", ((self.x - x).powf(2.0) + (self.y - y).powf(2.0)).sqrt() ); */
((self.x - x).powf(2.0) + (self.y - y).powf(2.0)).sqrt() < radius
}
/// Split asteroid into 2 of smaller size
pub fn split(&self) -> Option<[Asteroid;2]> {
let size = match self.size {
AsteroidSize::Big => AsteroidSize::Medium,
AsteroidSize::Medium => AsteroidSize::Small,
AsteroidSize::Small => return None
};
let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED;
let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED;
let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED;
let asteroid1 = Asteroid {
speed_x,
speed_y,
rotation_speed,
size,
..*self
};
let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED;
let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED;
let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED;
let asteroid2 = Asteroid {
speed_x,
speed_y,
rotation_speed,
size,
..*self
};
Some([asteroid1, asteroid2])
}
}
/* enum SplitResult {
New([Asteroid;2]),
None
} */
impl Draw for Asteroid {
fn mesh(&self, ctx: &mut Context) -> GameResult<Mesh> {
let size;
match &self.size {
AsteroidSize::Big => size = ASTEROID_BIG,
AsteroidSize::Medium => size = ASTEROID_MEDIUM,
AsteroidSize::Small => size = ASTEROID_SMALL
}
let mut mesh = MeshBuilder::new();
mesh.line(
&ASTEROID_MESHES[self.mesh](size),
1.0,
graphics::WHITE
)?;
// I am going to take 2.0 as the raw diameter of an asteroid
/* let radius = 2.0 * size / 2.0;
//DEBUG
mesh.circle(
graphics::DrawMode::stroke(1.0),
[0.0, 0.0],
radius,
0.2,
graphics::WHITE
); */
mesh.build(ctx)
}
fn draw_param(&self) -> DrawParam {
let size;
match &self.size {
AsteroidSize::Big => size = ASTEROID_BIG,
AsteroidSize::Medium => size = ASTEROID_MEDIUM,
AsteroidSize::Small => size = ASTEROID_SMALL
}
let mut param = DrawParam::new()
.dest([self.x, self.y])
//.offset([0.5 * size, 0.5 * -size])
.rotation(self.rotation);
if self.mirrored {
param = param.scale([-1.0, 1.0]);
}
param
}
}
//#endregion Asteroid
| update | identifier_name |
objects.rs | use ggez::input::keyboard;
use ggez::{graphics, Context, GameResult};
use graphics::{Mesh, MeshBuilder, DrawParam};
use crate::game;
use game::movement::Movement;
use game::Draw;
//#region Ship
/// The Ship.\
/// Width and Height is sort of switched here.\
/// This is because the mesh is made to face the right but then rotated upwards.\
/// I thought it would make more sense like this but it kind of didn't but whateer who cares.\
pub struct Ship {
pub width: f32,
pub height: f32,
pub x: f32,
pub y: f32,
pub mov: Movement,
/// Current rotation in radials
pub rotation: f32,
/// Force to add to the movement calculation when using rocket
pub movement_force: f32,
/// Speed of rotation in radials per tick
pub rotation_speed: f32,
/// How many time the ship can fire a laser per second
pub fire_rate: f32,
/// Time the ship fired for the last time
pub last_fire: std::time::Instant,
/// If the ship is currently using it's rocket
pub moving: bool
}
impl Ship {
pub fn new(ctx: &Context) -> Ship {
let (ctx_width, ctx_height) = graphics::drawable_size(ctx);
let ship_width = 18.0;
let ship_height = 20.0;
Ship {
width: ship_width,
height: ship_height,
x: (ctx_width - ship_width)/ 2.0,
y: (ctx_height- ship_height) / 2.0,
rotation: (3.0 / 2.0) * std::f32::consts::PI, // Start facing up
movement_force: 5.0,
rotation_speed: 0.1,
mov: Movement::new(0.3, 10.0),
fire_rate: 5.0,
last_fire: std::time::Instant::now(),
moving: false
}
}
/// Handle keyboard inputs and update the location of the Ship accordingly
pub fn update_movement(&mut self, ctx: &Context) {
/* The current implementation does not allow external forces
This could be easily achieved by having this call take additional params which set
some force before movement calculation like gravity. This is (currently) not needed for this game.*/
self.mov.force_x = 0.0;
self.mov.force_y = 0.0;
self.moving = false;
if keyboard::is_key_pressed(ctx, keyboard::KeyCode::A) {
self.rotation -= self.rotation_speed;
}
if keyboard::is_key_pressed(ctx, keyboard::KeyCode::D) {
self.rotation += self.rotation_speed;
}
if keyboard::is_key_pressed(ctx, keyboard::KeyCode::W) {
self.mov.force_x += self.rotation.cos() * self.movement_force;
self.mov.force_y += self.rotation.sin() * self.movement_force;
self.moving = true;
}
// Movement structs handles the physics
self.mov.update();
self.x += self.mov.speed_x;
self.y += self.mov.speed_y;
}
/// Add a laser to the gamestate appearing from the ship
pub fn shoot(&self, lasers: &mut Vec<game::Laser>) {
lasers.push(game::Laser::new(
self.x + self.height /2.0,
self.y - self.width / 2.0,
self.rotation)
);
}
pub fn debug_string(&self) -> String {
format!(
"Force x: {}\n\
Force y: {}\n\
Acceleration x: {}\n\
Acceleration y: {}\n\
Speed x: {}\n\
Speed y: {}\n\ | self.mov.force_y,
self.mov.acceleration_x,
self.mov.acceleration_y,
self.mov.speed_x,
self.mov.speed_y,
self.rotation_speed
)
}
}
impl game::Draw for Ship {
fn mesh(&self, ctx: &mut Context) -> GameResult<Mesh> {
let mut mesh = MeshBuilder::new();
/*
With these points you could make the center of the mesh be the actual center of the triangle
This would make writing hit detection easier, and would make the offset trivial.
But I did not immediately implement it like this
and I don't want to redo the rocket fire mesh right now, so I am leaving this comment instead
[-self.height/2.0, -self.width/2.0],
[ self.height/2.0, 0.0],
[-self.height/2.0, self.width/2.0],
[-self.height/2.0, -self.width/2.0]
*/
// Could be a polygon as well
mesh.line(
&[
[0.0, 0.0],
[self.height, -self.width / 2.0],
[0.0, -self.width],
[0.0,0.0]
],
1.3,
graphics::WHITE
)?;
// Draw fire behind rocket
if self.moving {
mesh.line(
&[
[ - 0.0, - 0.1 * self.width],
[ - 0.3 * self.height, - 0.233 * self.width],
[ - 0.2 * self.height, - 0.366 * self.width],
[ - 0.6 * self.height, - 0.5 * self.width],
[ - 0.2 * self.height, - 0.633 * self.width],
[ - 0.3 * self.height, - 0.766 * self.width],
[ - 0.0 * self.height, - 0.9 * self.width]
],
1.3,
graphics::WHITE
)?;
}
mesh.build(ctx)
}
fn draw_param(&self) -> DrawParam {
DrawParam::new()
.dest([self.x, self.y])
.offset([0.5 * self.height, 0.5 * -self.width])
.rotation(self.rotation)
}
}
//#endregion
//#region Laser
/// Laser that has been fired from Ship
pub struct Laser {
pub x: f32,
pub y: f32,
pub rotation: f32,
speed: f32,
width: f32
}
impl Laser {
pub fn new(x: f32, y: f32, rotation: f32) -> Laser {
Laser {
x,
y,
rotation,
speed: 17.0,
width: 15.0
}
}
pub fn update(&mut self) {
self.x += self.rotation.cos() * self.speed;
self.y += self.rotation.sin() * self.speed;
}
}
impl Draw for Laser {
fn mesh(&self, ctx: &mut Context) -> ggez::GameResult<graphics::Mesh> {
MeshBuilder::new()
.line(
&[
[0.0,0.0],
[15.0,0.0]
],
2.0,
graphics::WHITE
)?
.build(ctx)
}
fn draw_param(&self) -> DrawParam {
DrawParam::new()
.dest([self.x, self.y])
.offset([0.5 * self.width, 0.0])
.rotation(self.rotation)
}
}
//#endregion
//#region Asteroid
/// The 3 different asteroid sizes
#[derive(Copy, Clone)]
pub enum AsteroidSize {
Big,
Medium,
Small
}
/// Factor to multiple mesh with
const ASTEROID_BIG: f32 = 40.0;
/// Factor to multiple mesh with
const ASTEROID_MEDIUM: f32 = 30.0;
/// Factor to multiple mesh with
const ASTEROID_SMALL: f32 = 20.0;
#[derive(Clone)]
pub struct Asteroid {
pub x: f32,
pub y: f32,
rotation: f32,
rotation_speed: f32,
speed_x: f32,
speed_y: f32,
size: AsteroidSize,
mirrored: bool,
/// Index for the asteroid_mashes var
mesh: usize
}
const ASTEROID_MAX_MOVEMENT_SPEED: f32 = 5.0;
const ASTEROID_MAX_ROTATION_SPEED: f32 = 0.1;
/// The width/height of the safezone of the ship.\
/// Asteroids do not spawn here
const SHIP_SAFEZONE: f32 = 300.0;
/// Array of different random meshes for the asteroids.\
/// The diameter before mulitplication with the asteroid size should be about 2.0
/* const ASTEROID_MESHES: [fn(f32) -> [[f32;2];13];1] = [
|size| [
[0.0 *size, 0.0 *size],
[1.0 *size, 0.0 *size],
[2.5 *size, 1.0 *size],
[2.5 *size, 1.3 *size],
[1.5 *size, 1.7 *size],
[2.4 *size, 1.9 *size],
[1.5 *size, 2.8 *size],
[0.9 *size, 2.6 *size],
[0.4 *size, 2.4 *size],
[-0.3*size, 1.2 *size],
[-0.1*size, 0.8 *size],
[0.3 *size, 1.0 *size],
[0.0 *size, 0.0 *size]
]
]; */
/// Array of different random meshes for the asteroids
const ASTEROID_MESHES: [fn(f32) -> [[f32;2];13];1] = [
|size| [
[-1.0 *size, -0.8 *size],
[0.0 *size, -1.0 *size],
[1.0 *size, -0.3 *size],
[1.1 *size, 0.3 *size],
[0.4 *size, 0.5 *size],
[1.0 *size, 0.8 *size],
[0.5 *size, 1.3 *size],
[-0.1 *size, 1.2 *size],
[-0.6 *size, 1.0 *size],
[-1.3*size, 0.2 *size],
[-1.1*size, -0.2 *size],
[-0.7 *size, 0.0 *size],
[-1.0 *size, -0.8 *size]
]
];
impl Asteroid {
pub fn new(ship_x: f32, ship_y: f32, sizeOption: Option<AsteroidSize>,ctx: &mut Context) -> Asteroid {
let (mut x, mut y);
loop {
// Can't shadow via pattern :(
let (x_, y_) = game::random_place(ctx);
x = x_;
y = y_;
// Break out when the coords are not in a safezone
if ship_x - x < SHIP_SAFEZONE / 2.0 || ship_x - x > -(SHIP_SAFEZONE / 2.0)
|| ship_y - y > SHIP_SAFEZONE / 2.0 || ship_y - y < -(SHIP_SAFEZONE / 2.0)
{
break;
}
}
let size;
if let None = sizeOption {
size = match (rand::random::<f32>() * 3.0 + 1.0).floor() as u8 {
1 => AsteroidSize::Small,
2 => AsteroidSize::Medium,
3 => AsteroidSize::Big,
_ => AsteroidSize::Small
}
} else {
size = sizeOption.unwrap();
}
/* let mirrored = match rand::random::<f32>().round() as u8 {
1 => false,
2 => true,
_ => true
}; */
let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED * 2.0 - ASTEROID_MAX_MOVEMENT_SPEED;
let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED * 2.0 - ASTEROID_MAX_MOVEMENT_SPEED;
let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED * 2.0 - ASTEROID_MAX_ROTATION_SPEED;
//let rotation = rand::random::<f32>() * (2.0 * std::f32::consts::PI);
let rotation = 0.0;
let mirrored = false;
let mesh = (rand::random::<f32>() * ASTEROID_MESHES.len() as f32).floor() as usize;
// Asteroid go brrr
Asteroid {
x,
y,
size,
speed_x,
speed_y,
rotation_speed,
rotation,
mirrored,
mesh
}
}
pub fn update(&mut self) {
self.x += self.speed_x;
self.y += self.speed_y;
self.rotation += self.rotation_speed;
}
/// Returns a boolean that states if someone is within the hitbox of this asteroid
pub fn in_hitbox(&self, (x, y): (f32, f32)) -> bool {
let size;
match &self.size {
AsteroidSize::Big => size = ASTEROID_BIG,
AsteroidSize::Medium => size = ASTEROID_MEDIUM,
AsteroidSize::Small => size = ASTEROID_SMALL
}
// I am going to take 2.0 as the raw diameter of an asteroid
let radius = 2.0 * size / 2.0;
/* println!("hitboxcalc");
println!("{}", radius);
println!("{} {}", x, y);
println!("{} {}", self.x, self.y);
println!("{}", ((self.x - x).powf(2.0) + (self.y - y).powf(2.0)).sqrt() ); */
((self.x - x).powf(2.0) + (self.y - y).powf(2.0)).sqrt() < radius
}
/// Split asteroid into 2 of smaller size
pub fn split(&self) -> Option<[Asteroid;2]> {
let size = match self.size {
AsteroidSize::Big => AsteroidSize::Medium,
AsteroidSize::Medium => AsteroidSize::Small,
AsteroidSize::Small => return None
};
let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED;
let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED;
let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED;
let asteroid1 = Asteroid {
speed_x,
speed_y,
rotation_speed,
size,
..*self
};
let speed_x = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED;
let speed_y = rand::random::<f32>() * ASTEROID_MAX_MOVEMENT_SPEED;
let rotation_speed = rand::random::<f32>() * ASTEROID_MAX_ROTATION_SPEED;
let asteroid2 = Asteroid {
speed_x,
speed_y,
rotation_speed,
size,
..*self
};
Some([asteroid1, asteroid2])
}
}
/* enum SplitResult {
New([Asteroid;2]),
None
} */
impl Draw for Asteroid {
fn mesh(&self, ctx: &mut Context) -> GameResult<Mesh> {
let size;
match &self.size {
AsteroidSize::Big => size = ASTEROID_BIG,
AsteroidSize::Medium => size = ASTEROID_MEDIUM,
AsteroidSize::Small => size = ASTEROID_SMALL
}
let mut mesh = MeshBuilder::new();
mesh.line(
&ASTEROID_MESHES[self.mesh](size),
1.0,
graphics::WHITE
)?;
// I am going to take 2.0 as the raw diameter of an asteroid
/* let radius = 2.0 * size / 2.0;
//DEBUG
mesh.circle(
graphics::DrawMode::stroke(1.0),
[0.0, 0.0],
radius,
0.2,
graphics::WHITE
); */
mesh.build(ctx)
}
fn draw_param(&self) -> DrawParam {
let size;
match &self.size {
AsteroidSize::Big => size = ASTEROID_BIG,
AsteroidSize::Medium => size = ASTEROID_MEDIUM,
AsteroidSize::Small => size = ASTEROID_SMALL
}
let mut param = DrawParam::new()
.dest([self.x, self.y])
//.offset([0.5 * size, 0.5 * -size])
.rotation(self.rotation);
if self.mirrored {
param = param.scale([-1.0, 1.0]);
}
param
}
}
//#endregion Asteroid | Rotation speed: {}\n",
self.mov.force_x, | random_line_split |
made.py | # Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
# Initialize Qt resources from file resources.py
import resources
import locale
locale.setlocale(locale.LC_ALL, '')
# Import the code for the dialog
from dalacalcdialog import DalaCalcDialog
class DalaCalc:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
# menambahkan variable global untuk daftar layer
self.layerids = []
# initialize plugin directory
self.plugin_dir = QFileInfo(QgsApplication.qgisUserDbFilePath()).path() + "/python/plugins/dalacalc"
# initialize locale
localePath = ""
locale = QSettings().value("locale/userLocale").toString()[0:2]
if QFileInfo(self.plugin_dir).exists():
localePath = self.plugin_dir + "/i18n/dalacalc_" + locale + ".qm"
if QFileInfo(localePath).exists():
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
# self.dlg = DalaCalcDialog(self.iface.mainWindow(), flags)
def initGui(self):
# Create action that will start plugin configuration
self.action = QAction(QIcon(":/plugins/dalacalc/icon.png"),"Hitung Dala", self.iface.mainWindow())
self.action.setWhatsThis("Plugin untuk hitungan Kerusakan dan Kerugian")
self.action.setStatusTip("Damages And Losses Plugin")
# connect the action to the run method
QObject.connect(self.action, SIGNAL("triggered()"), self.run)
# Add toolbar button and menu item
self.iface.addToolBarIcon(self.action)
self.iface.addPluginToMenu(u"&Hitungan Kerusakan Kerugian", self.action)
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(u"&Hitungan Kerusakan Kerugian", self.action)
self.iface.removeToolBarIcon(self.action)
# run method that performs all the real work
def run(self):
# create and show the dialog
flags = Qt.WindowTitleHint | Qt.WindowSystemMenuHint | Qt.WindowMaximizeButtonHint
self.dlg = DalaCalcDialog(self.iface.mainWindow(), flags)
# show the dialog
self.dlg.show()
# koneksi signal
QObject.connect(self.dlg.ui.KeterdampakanComboBox,SIGNAL('currentIndexChanged(int)'), self.bacaKeterdampakan)
QObject.connect(self.dlg.ui.BahayaComboBox,SIGNAL('currentIndexChanged(int)'), self.bacaBahaya)
#QObject.connect(self.dlg.ui.KerugianLineEdit,SIGNAL('currentIndexChanged(int)'), self.bacaKerugian)
QObject.connect(self.dlg.ui.helpPushButton,SIGNAL('clicked()'), self.bantuan)
QObject.connect(self.dlg.ui.hitungPushButton,SIGNAL('clicked()'), self.hitungDala)
quitbutton = self.dlg.ui.closePushButton
QObject.connect(quitbutton, SIGNAL('clicked()'), self.dlg, SLOT('close()'))
# membuat daftar layer yang ada di qgis
self.layermap=QgsMapLayerRegistry.instance().mapLayers()
for (name,layer) in self.layermap.iteritems():
if type(layer).__name__ == "QgsVectorLayer":
tempname = str(name).rstrip('01234567890')
self.layerids.append(name)
self.dlg.ui.KeterdampakanComboBox.addItem(tempname)
self.dlg.ui.BahayaComboBox.addItem(tempname)
def bacaKeterdampakan(self):
# membaca layer yg akan digunakan sebagai keterdampakan
try:
comboindex = self.dlg.ui.KeterdampakanComboBox.currentIndex()
layerKeterdampakan = self.layermap[self.layerids[comboindex]]
except: #Crashes without valid shapefiles
return
def | (self):
# membaca layer yg akan digunakan sebagai exposure
try:
comboindex = self.dlg.ui.BahayaComboBox.currentIndex()
layerBahaya = self.layermap[self.layerids[comboindex]]
except: #Crashes without valid shapefiles
return
def bantuan(self):
# membaca menu bantuan
QMessageBox.information(self.iface.mainWindow(),"Bantuan Dala","Hitungan kerugian disesuaikan dengan peraturan daerah yang berlaku, dan diasumsikan kerusakan sebesar 90 %", QMessageBox.Close)
def hitungDala(self):
# membaca isi nilai kerugian - menguji isinya apakah yang dimasukkan benar merupakan angka
try:
nilaiKerugian = self.dlg.ui.KerugianLineEdit.text()
nilaiKerugian = float(nilaiKerugian)
except ValueError:
QMessageBox.warning(self.iface.mainWindow(),"Error","Nilai kerugian tidak boleh kosong dan harus berupa angka!", QMessageBox.Close)
return
# membaca layer exposure
comboindex = self.dlg.ui.KeterdampakanComboBox.currentIndex()
layerKeterdampakan = self.layermap[self.layerids[comboindex]]
# membaca layer hazard
comboindex = self.dlg.ui.BahayaComboBox.currentIndex()
layerBahaya = self.layermap[self.layerids[comboindex]]
# check apakah layer sudah bener masuk
#QMessageBox.information(self.iface.mainWindow(),"Error","terdampak = "+str(layerKeterdampakan)+"\nBahaya = "+str(layerBahaya), QMessageBox.Close)
# membuat spatial index untuk mempercepat proses
dampakIndex = QgsSpatialIndex() #index kosong untuk menampung layer dengan jumlah feature banyak
bahayaIndex = QgsSpatialIndex()
fbahaya = QgsFeature() #variabel untuk menyimpan feature pada layer bahaya
fdampak = QgsFeature() #variabel untuk menyimpan feature pada layer dampak
# dampak - buat penyimpanan feature menggunakan spatial index
allAttrsDampak = layerKeterdampakan.pendingAllAttributesList()
layerKeterdampakan.select(allAttrsDampak)
allFeaturesDampak = {fdampak.id(): fdampak for fdampak in layerKeterdampakan}
# bahaya - buat penyimpanan feature menggunakan spatial index
allAttrsBahaya = layerBahaya.pendingAllAttributesList()
layerBahaya.select(allAttrsBahaya)
allFeaturesBahaya = {fbahaya.id(): fbahaya for fbahaya in layerBahaya}
#mengisi dictionary dengan data keterdampakan
for fd in allFeaturesDampak.values():
dampakIndex.insertFeature(fd)
#mengisi dictionary dengan data bahaya
for fb in allFeaturesBahaya.values():
bahayaIndex.insertFeature(fb)
# --- MAIN ITERATION ---
ids_D = {}
ids_B = {}
luasAkhirTerdampak = 0
# loop untuk mengisi feature di layer dampak dengan spatial indexnya
for fdampak in allFeaturesDampak.values():
varA = fdampak.id()
ids_D[varA] = dampakIndex.intersects(fdampak.geometry().boundingBox())
#QMessageBox.information(self.iface.mainWindow(),"test", str(varA),QMessageBox.Close)
# loop untuk mengisi feature di layer bahaya dengan spatial indexnya
for fbahaya in allFeaturesBahaya.values():
varB = fbahaya.id()
ids_B[varB] = bahayaIndex.intersects(fbahaya.geometry().boundingBox())
#QMessageBox.information(self.iface.mainWindow(),"test", str(varB),QMessageBox.Close)
selection=[]
# seleksi fitur yang terseleksi
for id_D in ids_D:
f_D = allFeaturesDampak[id_D]
for id_B in ids_B:
f_B = allFeaturesBahaya[id_B]
intersct = f_D.geometry().intersects(f_B.geometry())
#QMessageBox.information(self.iface.mainWindow(),"test1", "intersect pa gak?"+str(intersct),QMessageBox.Close)
if intersct == True:
luasTerdampak = f_D.geometry().area()
luasAkhirTerdampak += luasTerdampak
selection.append(id_D) # mendaftar feature yang terseleksi
else:
pass
layerKeterdampakan.setSelectedFeatures(selection)
if varA == 1:
self.zoomFeature()
else:
mc=self.iface.mapCanvas()
mc.zoomToSelected(layerKeterdampakan)
# menghitung perkalian antara nilai kerugian dengan luas area terdampak
persentase = 90.0*(0.01)
hasilKali = luasAkhirTerdampak * nilaiKerugian * persentase
# menampilkan hasil
stringHasil = ("Hasil analisis kerugian dan kerusakan: \n"
"\n- Total jumlah fasilitas terdampak = "+str(len(selection))+
"\n- Total luas semua fasilitas terdampak "
"\n = "+str(luasAkhirTerdampak)+ " m2"
"\n- Dengan nilai kerugian per unit sebesar "
"\n Rp. "+locale.format("%d",nilaiKerugian,grouping=True)+",- "
"\n dan dengan asumsi bahwa bangunan yang rusak "
"\n mengalami "+str(persentase*100)+"% kerusakan, diperoleh bahwa"
"\nNilai total kerugian = "
"\n Rp. "+locale.format("%d",hasilKali,grouping=True)+",-")
QMessageBox.information(self.iface.mainWindow(),"Hasil Hitungan", stringHasil, QMessageBox.Close)
def zoomFeature(self):
#Kalau hanya satu feature yang terpilih, zoom ke feature tersebut.
try:
comboindex = self.dlg.ui.KeterdampakanComboBox.currentIndex()
layerKeterdampakan = self.layermap[self.layerids[comboindex]]
rect = QgsRectangle(layerKeterdampakan.boundingBoxOfSelected())
rect.setXMaximum(rect.xMaximum() + .5)
rect.setXMinimum(rect.xMinimum() - .5)
rect.setYMaximum(rect.yMaximum() + .5)
rect.setYMinimum(rect.yMinimum() - .5)
number = float(self.dlg.ui.scaleSpin.value())
mc=self.iface.mapCanvas()
#If the screen is longer than it is tall, scale based on width
if mc.extent().height() > mc.extent().width():
scalefactor = (mc.extent().width() / mc.scale())
else: #otherwise use height
scalefactor = (mc.extent().height() / mc.scale())
rect.scale(number * scalefactor)
mc.setExtent(rect)
mc.refresh()
#scalefactor = number / mc.scale()
#mc.zoomByFactor(scalefactor)
except:
print "Tidak ada feature yang terseleksi"
| bacaBahaya | identifier_name |
made.py | # Import the PyQt and QGIS libraries
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
# Initialize Qt resources from file resources.py
import resources
import locale
locale.setlocale(locale.LC_ALL, '')
# Import the code for the dialog
from dalacalcdialog import DalaCalcDialog
class DalaCalc:
def __init__(self, iface):
# Save reference to the QGIS interface
self.iface = iface
# menambahkan variable global untuk daftar layer
self.layerids = []
# initialize plugin directory
self.plugin_dir = QFileInfo(QgsApplication.qgisUserDbFilePath()).path() + "/python/plugins/dalacalc"
# initialize locale
localePath = ""
locale = QSettings().value("locale/userLocale").toString()[0:2]
if QFileInfo(self.plugin_dir).exists():
localePath = self.plugin_dir + "/i18n/dalacalc_" + locale + ".qm"
if QFileInfo(localePath).exists():
self.translator = QTranslator()
self.translator.load(localePath)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
# self.dlg = DalaCalcDialog(self.iface.mainWindow(), flags)
def initGui(self):
# Create action that will start plugin configuration
self.action = QAction(QIcon(":/plugins/dalacalc/icon.png"),"Hitung Dala", self.iface.mainWindow())
self.action.setWhatsThis("Plugin untuk hitungan Kerusakan dan Kerugian")
self.action.setStatusTip("Damages And Losses Plugin")
# connect the action to the run method
QObject.connect(self.action, SIGNAL("triggered()"), self.run)
# Add toolbar button and menu item
self.iface.addToolBarIcon(self.action)
self.iface.addPluginToMenu(u"&Hitungan Kerusakan Kerugian", self.action)
def unload(self):
# Remove the plugin menu item and icon
self.iface.removePluginMenu(u"&Hitungan Kerusakan Kerugian", self.action)
self.iface.removeToolBarIcon(self.action)
# run method that performs all the real work
def run(self):
# create and show the dialog
flags = Qt.WindowTitleHint | Qt.WindowSystemMenuHint | Qt.WindowMaximizeButtonHint
self.dlg = DalaCalcDialog(self.iface.mainWindow(), flags)
# show the dialog
self.dlg.show()
# koneksi signal
QObject.connect(self.dlg.ui.KeterdampakanComboBox,SIGNAL('currentIndexChanged(int)'), self.bacaKeterdampakan)
QObject.connect(self.dlg.ui.BahayaComboBox,SIGNAL('currentIndexChanged(int)'), self.bacaBahaya)
#QObject.connect(self.dlg.ui.KerugianLineEdit,SIGNAL('currentIndexChanged(int)'), self.bacaKerugian)
QObject.connect(self.dlg.ui.helpPushButton,SIGNAL('clicked()'), self.bantuan)
QObject.connect(self.dlg.ui.hitungPushButton,SIGNAL('clicked()'), self.hitungDala)
quitbutton = self.dlg.ui.closePushButton
QObject.connect(quitbutton, SIGNAL('clicked()'), self.dlg, SLOT('close()'))
# membuat daftar layer yang ada di qgis
self.layermap=QgsMapLayerRegistry.instance().mapLayers()
for (name,layer) in self.layermap.iteritems():
if type(layer).__name__ == "QgsVectorLayer":
tempname = str(name).rstrip('01234567890')
self.layerids.append(name)
self.dlg.ui.KeterdampakanComboBox.addItem(tempname)
self.dlg.ui.BahayaComboBox.addItem(tempname)
def bacaKeterdampakan(self):
# membaca layer yg akan digunakan sebagai keterdampakan
try:
comboindex = self.dlg.ui.KeterdampakanComboBox.currentIndex()
layerKeterdampakan = self.layermap[self.layerids[comboindex]]
except: #Crashes without valid shapefiles
return
def bacaBahaya(self):
# membaca layer yg akan digunakan sebagai exposure
try:
comboindex = self.dlg.ui.BahayaComboBox.currentIndex()
layerBahaya = self.layermap[self.layerids[comboindex]]
except: #Crashes without valid shapefiles
return
def bantuan(self):
# membaca menu bantuan
QMessageBox.information(self.iface.mainWindow(),"Bantuan Dala","Hitungan kerugian disesuaikan dengan peraturan daerah yang berlaku, dan diasumsikan kerusakan sebesar 90 %", QMessageBox.Close)
def hitungDala(self):
# membaca isi nilai kerugian - menguji isinya apakah yang dimasukkan benar merupakan angka
try:
nilaiKerugian = self.dlg.ui.KerugianLineEdit.text()
nilaiKerugian = float(nilaiKerugian)
except ValueError:
QMessageBox.warning(self.iface.mainWindow(),"Error","Nilai kerugian tidak boleh kosong dan harus berupa angka!", QMessageBox.Close)
return
# membaca layer exposure
comboindex = self.dlg.ui.KeterdampakanComboBox.currentIndex()
layerKeterdampakan = self.layermap[self.layerids[comboindex]]
# membaca layer hazard
comboindex = self.dlg.ui.BahayaComboBox.currentIndex()
layerBahaya = self.layermap[self.layerids[comboindex]]
# check apakah layer sudah bener masuk
#QMessageBox.information(self.iface.mainWindow(),"Error","terdampak = "+str(layerKeterdampakan)+"\nBahaya = "+str(layerBahaya), QMessageBox.Close)
# membuat spatial index untuk mempercepat proses
dampakIndex = QgsSpatialIndex() #index kosong untuk menampung layer dengan jumlah feature banyak
bahayaIndex = QgsSpatialIndex()
fbahaya = QgsFeature() #variabel untuk menyimpan feature pada layer bahaya
fdampak = QgsFeature() #variabel untuk menyimpan feature pada layer dampak
# dampak - buat penyimpanan feature menggunakan spatial index
allAttrsDampak = layerKeterdampakan.pendingAllAttributesList()
layerKeterdampakan.select(allAttrsDampak)
allFeaturesDampak = {fdampak.id(): fdampak for fdampak in layerKeterdampakan}
# bahaya - buat penyimpanan feature menggunakan spatial index
allAttrsBahaya = layerBahaya.pendingAllAttributesList()
layerBahaya.select(allAttrsBahaya)
allFeaturesBahaya = {fbahaya.id(): fbahaya for fbahaya in layerBahaya}
#mengisi dictionary dengan data keterdampakan
for fd in allFeaturesDampak.values():
dampakIndex.insertFeature(fd)
#mengisi dictionary dengan data bahaya
for fb in allFeaturesBahaya.values():
bahayaIndex.insertFeature(fb)
# --- MAIN ITERATION ---
ids_D = {}
ids_B = {}
luasAkhirTerdampak = 0
# loop untuk mengisi feature di layer dampak dengan spatial indexnya
for fdampak in allFeaturesDampak.values():
varA = fdampak.id()
ids_D[varA] = dampakIndex.intersects(fdampak.geometry().boundingBox())
#QMessageBox.information(self.iface.mainWindow(),"test", str(varA),QMessageBox.Close)
# loop untuk mengisi feature di layer bahaya dengan spatial indexnya
for fbahaya in allFeaturesBahaya.values():
varB = fbahaya.id()
ids_B[varB] = bahayaIndex.intersects(fbahaya.geometry().boundingBox())
#QMessageBox.information(self.iface.mainWindow(),"test", str(varB),QMessageBox.Close)
selection=[]
# seleksi fitur yang terseleksi
for id_D in ids_D:
f_D = allFeaturesDampak[id_D]
for id_B in ids_B:
f_B = allFeaturesBahaya[id_B]
intersct = f_D.geometry().intersects(f_B.geometry())
#QMessageBox.information(self.iface.mainWindow(),"test1", "intersect pa gak?"+str(intersct),QMessageBox.Close)
if intersct == True:
luasTerdampak = f_D.geometry().area()
luasAkhirTerdampak += luasTerdampak
selection.append(id_D) # mendaftar feature yang terseleksi
else:
pass
layerKeterdampakan.setSelectedFeatures(selection)
if varA == 1:
self.zoomFeature()
else:
mc=self.iface.mapCanvas()
mc.zoomToSelected(layerKeterdampakan)
# menghitung perkalian antara nilai kerugian dengan luas area terdampak | # menampilkan hasil
stringHasil = ("Hasil analisis kerugian dan kerusakan: \n"
"\n- Total jumlah fasilitas terdampak = "+str(len(selection))+
"\n- Total luas semua fasilitas terdampak "
"\n = "+str(luasAkhirTerdampak)+ " m2"
"\n- Dengan nilai kerugian per unit sebesar "
"\n Rp. "+locale.format("%d",nilaiKerugian,grouping=True)+",- "
"\n dan dengan asumsi bahwa bangunan yang rusak "
"\n mengalami "+str(persentase*100)+"% kerusakan, diperoleh bahwa"
"\nNilai total kerugian = "
"\n Rp. "+locale.format("%d",hasilKali,grouping=True)+",-")
QMessageBox.information(self.iface.mainWindow(),"Hasil Hitungan", stringHasil, QMessageBox.Close)
def zoomFeature(self):
#Kalau hanya satu feature yang terpilih, zoom ke feature tersebut.
try:
comboindex = self.dlg.ui.KeterdampakanComboBox.currentIndex()
layerKeterdampakan = self.layermap[self.layerids[comboindex]]
rect = QgsRectangle(layerKeterdampakan.boundingBoxOfSelected())
rect.setXMaximum(rect.xMaximum() + .5)
rect.setXMinimum(rect.xMinimum() - .5)
rect.setYMaximum(rect.yMaximum() + .5)
rect.setYMinimum(rect.yMinimum() - .5)
number = float(self.dlg.ui.scaleSpin.value())
mc=self.iface.mapCanvas()
#If the screen is longer than it is tall, scale based on width
if mc.extent().height() > mc.extent().width():
scalefactor = (mc.extent().width() / mc.scale())
else: #otherwise use height
scalefactor = (mc.extent().height() / mc.scale())
rect.scale(number * scalefactor)
mc.setExtent(rect)
mc.refresh()
#scalefactor = number / mc.scale()
#mc.zoomByFactor(scalefactor)
except:
print "Tidak ada feature yang terseleksi" | persentase = 90.0*(0.01)
hasilKali = luasAkhirTerdampak * nilaiKerugian * persentase | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.